mirror of https://github.com/minio/minio.git
Merge pull request #1211 from harshavardhana/vendorize
vendor: Add minio-go vendor updates.
This commit is contained in:
commit
9352cb87c6
|
@ -52,7 +52,7 @@ type webAPI struct {
|
||||||
// Once true log all incoming request.
|
// Once true log all incoming request.
|
||||||
AccessLog bool
|
AccessLog bool
|
||||||
// Minio client instance.
|
// Minio client instance.
|
||||||
Client minio.CloudStorageClient
|
Client *minio.Client
|
||||||
|
|
||||||
// private params.
|
// private params.
|
||||||
apiAddress string // api destination address.
|
apiAddress string // api destination address.
|
||||||
|
|
|
@ -14,7 +14,9 @@
|
||||||
- Have test cases for the new code. If you have questions about how to do it, please ask in your pull request.
|
- Have test cases for the new code. If you have questions about how to do it, please ask in your pull request.
|
||||||
- Run `go fmt`
|
- Run `go fmt`
|
||||||
- Squash your commits into a single commit. `git rebase -i`. It's okay to force update your pull request.
|
- Squash your commits into a single commit. `git rebase -i`. It's okay to force update your pull request.
|
||||||
- Make sure `go test -short -race ./...` and `go build` completes.
|
- Make sure `go test -race ./...` and `go build` completes.
|
||||||
|
NOTE: go test runs functional tests and requires you to have a AWS S3 account. Set them as environment variables
|
||||||
|
``ACCESS_KEY`` and ``SECRET_KEY``. To run shorter version of the tests please use ``go test -short -race ./...``
|
||||||
|
|
||||||
* Read [Effective Go](https://github.com/golang/go/wiki/CodeReviewComments) article from Golang project
|
* Read [Effective Go](https://github.com/golang/go/wiki/CodeReviewComments) article from Golang project
|
||||||
- `minio-go` project is strictly conformant with Golang style
|
- `minio-go` project is strictly conformant with Golang style
|
||||||
|
|
|
@ -47,7 +47,7 @@ type ErrorResponse struct {
|
||||||
|
|
||||||
// Region where the bucket is located. This header is returned
|
// Region where the bucket is located. This header is returned
|
||||||
// only in HEAD bucket and ListObjects response.
|
// only in HEAD bucket and ListObjects response.
|
||||||
AmzBucketRegion string
|
Region string
|
||||||
}
|
}
|
||||||
|
|
||||||
// ToErrorResponse - Returns parsed ErrorResponse struct from body and
|
// ToErrorResponse - Returns parsed ErrorResponse struct from body and
|
||||||
|
@ -98,65 +98,54 @@ func httpRespToErrorResponse(resp *http.Response, bucketName, objectName string)
|
||||||
case http.StatusNotFound:
|
case http.StatusNotFound:
|
||||||
if objectName == "" {
|
if objectName == "" {
|
||||||
errResp = ErrorResponse{
|
errResp = ErrorResponse{
|
||||||
Code: "NoSuchBucket",
|
Code: "NoSuchBucket",
|
||||||
Message: "The specified bucket does not exist.",
|
Message: "The specified bucket does not exist.",
|
||||||
BucketName: bucketName,
|
BucketName: bucketName,
|
||||||
RequestID: resp.Header.Get("x-amz-request-id"),
|
RequestID: resp.Header.Get("x-amz-request-id"),
|
||||||
HostID: resp.Header.Get("x-amz-id-2"),
|
HostID: resp.Header.Get("x-amz-id-2"),
|
||||||
AmzBucketRegion: resp.Header.Get("x-amz-bucket-region"),
|
Region: resp.Header.Get("x-amz-bucket-region"),
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
errResp = ErrorResponse{
|
errResp = ErrorResponse{
|
||||||
Code: "NoSuchKey",
|
Code: "NoSuchKey",
|
||||||
Message: "The specified key does not exist.",
|
Message: "The specified key does not exist.",
|
||||||
BucketName: bucketName,
|
BucketName: bucketName,
|
||||||
Key: objectName,
|
Key: objectName,
|
||||||
RequestID: resp.Header.Get("x-amz-request-id"),
|
RequestID: resp.Header.Get("x-amz-request-id"),
|
||||||
HostID: resp.Header.Get("x-amz-id-2"),
|
HostID: resp.Header.Get("x-amz-id-2"),
|
||||||
AmzBucketRegion: resp.Header.Get("x-amz-bucket-region"),
|
Region: resp.Header.Get("x-amz-bucket-region"),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
case http.StatusForbidden:
|
case http.StatusForbidden:
|
||||||
errResp = ErrorResponse{
|
errResp = ErrorResponse{
|
||||||
Code: "AccessDenied",
|
Code: "AccessDenied",
|
||||||
Message: "Access Denied.",
|
Message: "Access Denied.",
|
||||||
BucketName: bucketName,
|
BucketName: bucketName,
|
||||||
Key: objectName,
|
Key: objectName,
|
||||||
RequestID: resp.Header.Get("x-amz-request-id"),
|
RequestID: resp.Header.Get("x-amz-request-id"),
|
||||||
HostID: resp.Header.Get("x-amz-id-2"),
|
HostID: resp.Header.Get("x-amz-id-2"),
|
||||||
AmzBucketRegion: resp.Header.Get("x-amz-bucket-region"),
|
Region: resp.Header.Get("x-amz-bucket-region"),
|
||||||
}
|
}
|
||||||
case http.StatusConflict:
|
case http.StatusConflict:
|
||||||
errResp = ErrorResponse{
|
errResp = ErrorResponse{
|
||||||
Code: "Conflict",
|
Code: "Conflict",
|
||||||
Message: "Bucket not empty.",
|
Message: "Bucket not empty.",
|
||||||
BucketName: bucketName,
|
BucketName: bucketName,
|
||||||
RequestID: resp.Header.Get("x-amz-request-id"),
|
RequestID: resp.Header.Get("x-amz-request-id"),
|
||||||
HostID: resp.Header.Get("x-amz-id-2"),
|
HostID: resp.Header.Get("x-amz-id-2"),
|
||||||
AmzBucketRegion: resp.Header.Get("x-amz-bucket-region"),
|
Region: resp.Header.Get("x-amz-bucket-region"),
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
errResp = ErrorResponse{
|
errResp = ErrorResponse{
|
||||||
Code: resp.Status,
|
Code: resp.Status,
|
||||||
Message: resp.Status,
|
Message: resp.Status,
|
||||||
BucketName: bucketName,
|
BucketName: bucketName,
|
||||||
RequestID: resp.Header.Get("x-amz-request-id"),
|
RequestID: resp.Header.Get("x-amz-request-id"),
|
||||||
HostID: resp.Header.Get("x-amz-id-2"),
|
HostID: resp.Header.Get("x-amz-id-2"),
|
||||||
AmzBucketRegion: resp.Header.Get("x-amz-bucket-region"),
|
Region: resp.Header.Get("x-amz-bucket-region"),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// AccessDenied without a signature mismatch code, usually means
|
|
||||||
// that the bucket policy has certain restrictions where some API
|
|
||||||
// operations are not allowed. Handle this case so that top level
|
|
||||||
// callers can interpret this easily and fall back if needed to a
|
|
||||||
// lower functionality call. Read each individual API specific
|
|
||||||
// code for such fallbacks.
|
|
||||||
if errResp.Code == "AccessDenied" && errResp.Message == "Access Denied" {
|
|
||||||
errResp.Code = "NotImplemented"
|
|
||||||
errResp.Message = "Operation is not allowed according to your bucket policy."
|
|
||||||
}
|
|
||||||
return errResp
|
return errResp
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
|
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
|
||||||
*
|
*
|
||||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
* you may not use this file except in compliance with the License.
|
* you may not use this file except in compliance with the License.
|
||||||
|
@ -20,7 +20,6 @@ import (
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"math"
|
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
"strings"
|
"strings"
|
||||||
|
@ -48,17 +47,11 @@ func (c Client) GetBucketACL(bucketName string) (BucketACL, error) {
|
||||||
urlValues := make(url.Values)
|
urlValues := make(url.Values)
|
||||||
urlValues.Set("acl", "")
|
urlValues.Set("acl", "")
|
||||||
|
|
||||||
// Instantiate a new request.
|
// Execute GET acl on bucketName.
|
||||||
req, err := c.newRequest("GET", requestMetadata{
|
resp, err := c.executeMethod("GET", requestMetadata{
|
||||||
bucketName: bucketName,
|
bucketName: bucketName,
|
||||||
queryValues: urlValues,
|
queryValues: urlValues,
|
||||||
})
|
})
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Initiate the request.
|
|
||||||
resp, err := c.do(req)
|
|
||||||
defer closeResponse(resp)
|
defer closeResponse(resp)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
|
@ -83,12 +76,12 @@ func (c Client) GetBucketACL(bucketName string) (BucketACL, error) {
|
||||||
if !isGoogleEndpoint(c.endpointURL) {
|
if !isGoogleEndpoint(c.endpointURL) {
|
||||||
if policy.AccessControlList.Grant == nil {
|
if policy.AccessControlList.Grant == nil {
|
||||||
errorResponse := ErrorResponse{
|
errorResponse := ErrorResponse{
|
||||||
Code: "InternalError",
|
Code: "InternalError",
|
||||||
Message: "Access control Grant list is empty. " + reportIssue,
|
Message: "Access control Grant list is empty. " + reportIssue,
|
||||||
BucketName: bucketName,
|
BucketName: bucketName,
|
||||||
RequestID: resp.Header.Get("x-amz-request-id"),
|
RequestID: resp.Header.Get("x-amz-request-id"),
|
||||||
HostID: resp.Header.Get("x-amz-id-2"),
|
HostID: resp.Header.Get("x-amz-id-2"),
|
||||||
AmzBucketRegion: resp.Header.Get("x-amz-bucket-region"),
|
Region: resp.Header.Get("x-amz-bucket-region"),
|
||||||
}
|
}
|
||||||
return "", errorResponse
|
return "", errorResponse
|
||||||
}
|
}
|
||||||
|
@ -177,7 +170,11 @@ func (c Client) GetObject(bucketName, objectName string) (*Object, error) {
|
||||||
// Get shortest length.
|
// Get shortest length.
|
||||||
// NOTE: Last remaining bytes are usually smaller than
|
// NOTE: Last remaining bytes are usually smaller than
|
||||||
// req.Buffer size. Use that as the final length.
|
// req.Buffer size. Use that as the final length.
|
||||||
length := math.Min(float64(len(req.Buffer)), float64(objectInfo.Size-req.Offset))
|
// Don't use Math.min() here to avoid converting int64 to float64
|
||||||
|
length := int64(len(req.Buffer))
|
||||||
|
if objectInfo.Size-req.Offset < length {
|
||||||
|
length = objectInfo.Size - req.Offset
|
||||||
|
}
|
||||||
httpReader, _, err := c.getObject(bucketName, objectName, req.Offset, int64(length))
|
httpReader, _, err := c.getObject(bucketName, objectName, req.Offset, int64(length))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
resCh <- readResponse{
|
resCh <- readResponse{
|
||||||
|
@ -484,8 +481,8 @@ func (c Client) getObject(bucketName, objectName string, offset, length int64) (
|
||||||
customHeader.Set("Range", fmt.Sprintf("bytes=%d", length))
|
customHeader.Set("Range", fmt.Sprintf("bytes=%d", length))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Instantiate a new request.
|
// Execute GET on objectName.
|
||||||
req, err := c.newRequest("GET", requestMetadata{
|
resp, err := c.executeMethod("GET", requestMetadata{
|
||||||
bucketName: bucketName,
|
bucketName: bucketName,
|
||||||
objectName: objectName,
|
objectName: objectName,
|
||||||
customHeader: customHeader,
|
customHeader: customHeader,
|
||||||
|
@ -493,11 +490,6 @@ func (c Client) getObject(bucketName, objectName string, offset, length int64) (
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, ObjectInfo{}, err
|
return nil, ObjectInfo{}, err
|
||||||
}
|
}
|
||||||
// Execute the request.
|
|
||||||
resp, err := c.do(req)
|
|
||||||
if err != nil {
|
|
||||||
return nil, ObjectInfo{}, err
|
|
||||||
}
|
|
||||||
if resp != nil {
|
if resp != nil {
|
||||||
if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusPartialContent {
|
if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusPartialContent {
|
||||||
return nil, ObjectInfo{}, httpRespToErrorResponse(resp, bucketName, objectName)
|
return nil, ObjectInfo{}, httpRespToErrorResponse(resp, bucketName, objectName)
|
||||||
|
@ -513,11 +505,11 @@ func (c Client) getObject(bucketName, objectName string, offset, length int64) (
|
||||||
if err != nil {
|
if err != nil {
|
||||||
msg := "Last-Modified time format not recognized. " + reportIssue
|
msg := "Last-Modified time format not recognized. " + reportIssue
|
||||||
return nil, ObjectInfo{}, ErrorResponse{
|
return nil, ObjectInfo{}, ErrorResponse{
|
||||||
Code: "InternalError",
|
Code: "InternalError",
|
||||||
Message: msg,
|
Message: msg,
|
||||||
RequestID: resp.Header.Get("x-amz-request-id"),
|
RequestID: resp.Header.Get("x-amz-request-id"),
|
||||||
HostID: resp.Header.Get("x-amz-id-2"),
|
HostID: resp.Header.Get("x-amz-id-2"),
|
||||||
AmzBucketRegion: resp.Header.Get("x-amz-bucket-region"),
|
Region: resp.Header.Get("x-amz-bucket-region"),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Get content-type.
|
// Get content-type.
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
|
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
|
||||||
*
|
*
|
||||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
* you may not use this file except in compliance with the License.
|
* you may not use this file except in compliance with the License.
|
||||||
|
@ -34,13 +34,8 @@ import (
|
||||||
// }
|
// }
|
||||||
//
|
//
|
||||||
func (c Client) ListBuckets() ([]BucketInfo, error) {
|
func (c Client) ListBuckets() ([]BucketInfo, error) {
|
||||||
// Instantiate a new request.
|
// Execute GET on service.
|
||||||
req, err := c.newRequest("GET", requestMetadata{})
|
resp, err := c.executeMethod("GET", requestMetadata{})
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
// Initiate the request.
|
|
||||||
resp, err := c.do(req)
|
|
||||||
defer closeResponse(resp)
|
defer closeResponse(resp)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -206,16 +201,11 @@ func (c Client) listObjectsQuery(bucketName, objectPrefix, objectMarker, delimit
|
||||||
// Set max keys.
|
// Set max keys.
|
||||||
urlValues.Set("max-keys", fmt.Sprintf("%d", maxkeys))
|
urlValues.Set("max-keys", fmt.Sprintf("%d", maxkeys))
|
||||||
|
|
||||||
// Initialize a new request.
|
// Execute GET on bucket to list objects.
|
||||||
req, err := c.newRequest("GET", requestMetadata{
|
resp, err := c.executeMethod("GET", requestMetadata{
|
||||||
bucketName: bucketName,
|
bucketName: bucketName,
|
||||||
queryValues: urlValues,
|
queryValues: urlValues,
|
||||||
})
|
})
|
||||||
if err != nil {
|
|
||||||
return listBucketResult{}, err
|
|
||||||
}
|
|
||||||
// Execute list buckets.
|
|
||||||
resp, err := c.do(req)
|
|
||||||
defer closeResponse(resp)
|
defer closeResponse(resp)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return listBucketResult{}, err
|
return listBucketResult{}, err
|
||||||
|
@ -388,16 +378,11 @@ func (c Client) listMultipartUploadsQuery(bucketName, keyMarker, uploadIDMarker,
|
||||||
// Set max-uploads.
|
// Set max-uploads.
|
||||||
urlValues.Set("max-uploads", fmt.Sprintf("%d", maxUploads))
|
urlValues.Set("max-uploads", fmt.Sprintf("%d", maxUploads))
|
||||||
|
|
||||||
// Instantiate a new request.
|
// Execute GET on bucketName to list multipart uploads.
|
||||||
req, err := c.newRequest("GET", requestMetadata{
|
resp, err := c.executeMethod("GET", requestMetadata{
|
||||||
bucketName: bucketName,
|
bucketName: bucketName,
|
||||||
queryValues: urlValues,
|
queryValues: urlValues,
|
||||||
})
|
})
|
||||||
if err != nil {
|
|
||||||
return listMultipartUploadsResult{}, err
|
|
||||||
}
|
|
||||||
// Execute list multipart uploads request.
|
|
||||||
resp, err := c.do(req)
|
|
||||||
defer closeResponse(resp)
|
defer closeResponse(resp)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return listMultipartUploadsResult{}, err
|
return listMultipartUploadsResult{}, err
|
||||||
|
@ -510,16 +495,12 @@ func (c Client) listObjectPartsQuery(bucketName, objectName, uploadID string, pa
|
||||||
// Set max parts.
|
// Set max parts.
|
||||||
urlValues.Set("max-parts", fmt.Sprintf("%d", maxParts))
|
urlValues.Set("max-parts", fmt.Sprintf("%d", maxParts))
|
||||||
|
|
||||||
req, err := c.newRequest("GET", requestMetadata{
|
// Execute GET on objectName to get list of parts.
|
||||||
|
resp, err := c.executeMethod("GET", requestMetadata{
|
||||||
bucketName: bucketName,
|
bucketName: bucketName,
|
||||||
objectName: objectName,
|
objectName: objectName,
|
||||||
queryValues: urlValues,
|
queryValues: urlValues,
|
||||||
})
|
})
|
||||||
if err != nil {
|
|
||||||
return listObjectPartsResult{}, err
|
|
||||||
}
|
|
||||||
// Exectue list object parts.
|
|
||||||
resp, err := c.do(req)
|
|
||||||
defer closeResponse(resp)
|
defer closeResponse(resp)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return listObjectPartsResult{}, err
|
return listObjectPartsResult{}, err
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
|
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
|
||||||
*
|
*
|
||||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
* you may not use this file except in compliance with the License.
|
* you may not use this file except in compliance with the License.
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
|
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
|
||||||
*
|
*
|
||||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
* you may not use this file except in compliance with the License.
|
* you may not use this file except in compliance with the License.
|
||||||
|
@ -74,7 +74,7 @@ func (c Client) MakeBucket(bucketName string, acl BucketACL, location string) er
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Save the location into cache on a successfull makeBucket response.
|
// Save the location into cache on a successful makeBucket response.
|
||||||
c.bucketLocCache.Set(bucketName, location)
|
c.bucketLocCache.Set(bucketName, location)
|
||||||
|
|
||||||
// Return.
|
// Return.
|
||||||
|
@ -180,19 +180,16 @@ func (c Client) SetBucketACL(bucketName string, acl BucketACL) error {
|
||||||
customHeader.Set("x-amz-acl", "private")
|
customHeader.Set("x-amz-acl", "private")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Instantiate a new request.
|
// Execute PUT bucket.
|
||||||
req, err := c.newRequest("PUT", requestMetadata{
|
resp, err := c.executeMethod("PUT", requestMetadata{
|
||||||
bucketName: bucketName,
|
bucketName: bucketName,
|
||||||
queryValues: urlValues,
|
queryValues: urlValues,
|
||||||
customHeader: customHeader,
|
customHeader: customHeader,
|
||||||
})
|
})
|
||||||
|
defer closeResponse(resp)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Initiate the request.
|
|
||||||
resp, err := c.do(req)
|
|
||||||
defer closeResponse(resp)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
|
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
|
||||||
*
|
*
|
||||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
* you may not use this file except in compliance with the License.
|
* you may not use this file except in compliance with the License.
|
||||||
|
@ -187,7 +187,7 @@ func (c Client) putObjectMultipartFromFile(bucketName, objectName string, fileRe
|
||||||
}, partsInfo) {
|
}, partsInfo) {
|
||||||
// Proceed to upload the part.
|
// Proceed to upload the part.
|
||||||
var objPart objectPart
|
var objPart objectPart
|
||||||
objPart, err = c.uploadPart(bucketName, objectName, uploadID, ioutil.NopCloser(reader), partNumber,
|
objPart, err = c.uploadPart(bucketName, objectName, uploadID, reader, partNumber,
|
||||||
md5Sum, sha256Sum, prtSize)
|
md5Sum, sha256Sum, prtSize)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return totalUploadedSize, err
|
return totalUploadedSize, err
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
|
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
|
||||||
*
|
*
|
||||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
* you may not use this file except in compliance with the License.
|
* you may not use this file except in compliance with the License.
|
||||||
|
@ -124,7 +124,7 @@ func (c Client) putObjectMultipartStream(bucketName, objectName string, reader i
|
||||||
var reader io.Reader
|
var reader io.Reader
|
||||||
// Update progress reader appropriately to the latest offset
|
// Update progress reader appropriately to the latest offset
|
||||||
// as we read from the source.
|
// as we read from the source.
|
||||||
reader = newHook(tmpBuffer, progress)
|
reader = newHook(bytes.NewReader(tmpBuffer.Bytes()), progress)
|
||||||
|
|
||||||
// Verify if part should be uploaded.
|
// Verify if part should be uploaded.
|
||||||
if shouldUploadPart(objectPart{
|
if shouldUploadPart(objectPart{
|
||||||
|
@ -134,8 +134,7 @@ func (c Client) putObjectMultipartStream(bucketName, objectName string, reader i
|
||||||
}, partsInfo) {
|
}, partsInfo) {
|
||||||
// Proceed to upload the part.
|
// Proceed to upload the part.
|
||||||
var objPart objectPart
|
var objPart objectPart
|
||||||
objPart, err = c.uploadPart(bucketName, objectName, uploadID, ioutil.NopCloser(reader), partNumber,
|
objPart, err = c.uploadPart(bucketName, objectName, uploadID, reader, partNumber, md5Sum, sha256Sum, prtSize)
|
||||||
md5Sum, sha256Sum, prtSize)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// Reset the temporary buffer upon any error.
|
// Reset the temporary buffer upon any error.
|
||||||
tmpBuffer.Reset()
|
tmpBuffer.Reset()
|
||||||
|
@ -230,14 +229,8 @@ func (c Client) initiateMultipartUpload(bucketName, objectName, contentType stri
|
||||||
customHeader: customHeader,
|
customHeader: customHeader,
|
||||||
}
|
}
|
||||||
|
|
||||||
// Instantiate the request.
|
// Execute POST on an objectName to initiate multipart upload.
|
||||||
req, err := c.newRequest("POST", reqMetadata)
|
resp, err := c.executeMethod("POST", reqMetadata)
|
||||||
if err != nil {
|
|
||||||
return initiateMultipartUploadResult{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Execute the request.
|
|
||||||
resp, err := c.do(req)
|
|
||||||
defer closeResponse(resp)
|
defer closeResponse(resp)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return initiateMultipartUploadResult{}, err
|
return initiateMultipartUploadResult{}, err
|
||||||
|
@ -257,7 +250,7 @@ func (c Client) initiateMultipartUpload(bucketName, objectName, contentType stri
|
||||||
}
|
}
|
||||||
|
|
||||||
// uploadPart - Uploads a part in a multipart upload.
|
// uploadPart - Uploads a part in a multipart upload.
|
||||||
func (c Client) uploadPart(bucketName, objectName, uploadID string, reader io.ReadCloser, partNumber int, md5Sum, sha256Sum []byte, size int64) (objectPart, error) {
|
func (c Client) uploadPart(bucketName, objectName, uploadID string, reader io.Reader, partNumber int, md5Sum, sha256Sum []byte, size int64) (objectPart, error) {
|
||||||
// Input validation.
|
// Input validation.
|
||||||
if err := isValidBucketName(bucketName); err != nil {
|
if err := isValidBucketName(bucketName); err != nil {
|
||||||
return objectPart{}, err
|
return objectPart{}, err
|
||||||
|
@ -295,13 +288,8 @@ func (c Client) uploadPart(bucketName, objectName, uploadID string, reader io.Re
|
||||||
contentSHA256Bytes: sha256Sum,
|
contentSHA256Bytes: sha256Sum,
|
||||||
}
|
}
|
||||||
|
|
||||||
// Instantiate a request.
|
// Execute PUT on each part.
|
||||||
req, err := c.newRequest("PUT", reqMetadata)
|
resp, err := c.executeMethod("PUT", reqMetadata)
|
||||||
if err != nil {
|
|
||||||
return objectPart{}, err
|
|
||||||
}
|
|
||||||
// Execute the request.
|
|
||||||
resp, err := c.do(req)
|
|
||||||
defer closeResponse(resp)
|
defer closeResponse(resp)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return objectPart{}, err
|
return objectPart{}, err
|
||||||
|
@ -342,24 +330,18 @@ func (c Client) completeMultipartUpload(bucketName, objectName, uploadID string,
|
||||||
}
|
}
|
||||||
|
|
||||||
// Instantiate all the complete multipart buffer.
|
// Instantiate all the complete multipart buffer.
|
||||||
completeMultipartUploadBuffer := bytes.NewBuffer(completeMultipartUploadBytes)
|
completeMultipartUploadBuffer := bytes.NewReader(completeMultipartUploadBytes)
|
||||||
reqMetadata := requestMetadata{
|
reqMetadata := requestMetadata{
|
||||||
bucketName: bucketName,
|
bucketName: bucketName,
|
||||||
objectName: objectName,
|
objectName: objectName,
|
||||||
queryValues: urlValues,
|
queryValues: urlValues,
|
||||||
contentBody: ioutil.NopCloser(completeMultipartUploadBuffer),
|
contentBody: completeMultipartUploadBuffer,
|
||||||
contentLength: int64(completeMultipartUploadBuffer.Len()),
|
contentLength: int64(len(completeMultipartUploadBytes)),
|
||||||
contentSHA256Bytes: sum256(completeMultipartUploadBuffer.Bytes()),
|
contentSHA256Bytes: sum256(completeMultipartUploadBytes),
|
||||||
}
|
}
|
||||||
|
|
||||||
// Instantiate the request.
|
// Execute POST to complete multipart upload for an objectName.
|
||||||
req, err := c.newRequest("POST", reqMetadata)
|
resp, err := c.executeMethod("POST", reqMetadata)
|
||||||
if err != nil {
|
|
||||||
return completeMultipartUploadResult{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Execute the request.
|
|
||||||
resp, err := c.do(req)
|
|
||||||
defer closeResponse(resp)
|
defer closeResponse(resp)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return completeMultipartUploadResult{}, err
|
return completeMultipartUploadResult{}, err
|
||||||
|
|
|
@ -91,7 +91,7 @@ func (c Client) PutObjectWithProgress(bucketName, objectName string, reader io.R
|
||||||
errResp := ToErrorResponse(err)
|
errResp := ToErrorResponse(err)
|
||||||
// Verify if multipart functionality is not available, if not
|
// Verify if multipart functionality is not available, if not
|
||||||
// fall back to single PutObject operation.
|
// fall back to single PutObject operation.
|
||||||
if errResp.Code == "NotImplemented" {
|
if errResp.Code == "AccessDenied" && errResp.Message == "Access Denied." {
|
||||||
// Verify if size of reader is greater than '5GiB'.
|
// Verify if size of reader is greater than '5GiB'.
|
||||||
if size > maxSinglePutObjectSize {
|
if size > maxSinglePutObjectSize {
|
||||||
return 0, ErrEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName)
|
return 0, ErrEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName)
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
|
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
|
||||||
*
|
*
|
||||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
* you may not use this file except in compliance with the License.
|
* you may not use this file except in compliance with the License.
|
||||||
|
@ -155,12 +155,11 @@ func (c Client) putObjectMultipartFromReadAt(bucketName, objectName string, read
|
||||||
var reader io.Reader
|
var reader io.Reader
|
||||||
// Update progress reader appropriately to the latest offset
|
// Update progress reader appropriately to the latest offset
|
||||||
// as we read from the source.
|
// as we read from the source.
|
||||||
reader = newHook(tmpBuffer, progress)
|
reader = newHook(bytes.NewReader(tmpBuffer.Bytes()), progress)
|
||||||
|
|
||||||
// Proceed to upload the part.
|
// Proceed to upload the part.
|
||||||
var objPart objectPart
|
var objPart objectPart
|
||||||
objPart, err = c.uploadPart(bucketName, objectName, uploadID, ioutil.NopCloser(reader),
|
objPart, err = c.uploadPart(bucketName, objectName, uploadID, reader, partNumber, md5Sum, sha256Sum, prtSize)
|
||||||
partNumber, md5Sum, sha256Sum, prtSize)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// Reset the buffer upon any error.
|
// Reset the buffer upon any error.
|
||||||
tmpBuffer.Reset()
|
tmpBuffer.Reset()
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
|
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
|
||||||
*
|
*
|
||||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
* you may not use this file except in compliance with the License.
|
* you may not use this file except in compliance with the License.
|
||||||
|
@ -146,11 +146,11 @@ func (c Client) putObjectNoChecksum(bucketName, objectName string, reader io.Rea
|
||||||
|
|
||||||
// Update progress reader appropriately to the latest offset as we
|
// Update progress reader appropriately to the latest offset as we
|
||||||
// read from the source.
|
// read from the source.
|
||||||
reader = newHook(reader, progress)
|
readSeeker := newHook(reader, progress)
|
||||||
|
|
||||||
// This function does not calculate sha256 and md5sum for payload.
|
// This function does not calculate sha256 and md5sum for payload.
|
||||||
// Execute put object.
|
// Execute put object.
|
||||||
st, err := c.putObjectDo(bucketName, objectName, ioutil.NopCloser(reader), nil, nil, size, contentType)
|
st, err := c.putObjectDo(bucketName, objectName, readSeeker, nil, nil, size, contentType)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
|
@ -178,12 +178,12 @@ func (c Client) putObjectSingle(bucketName, objectName string, reader io.Reader,
|
||||||
size = maxSinglePutObjectSize
|
size = maxSinglePutObjectSize
|
||||||
}
|
}
|
||||||
var md5Sum, sha256Sum []byte
|
var md5Sum, sha256Sum []byte
|
||||||
var readCloser io.ReadCloser
|
|
||||||
if size <= minPartSize {
|
if size <= minPartSize {
|
||||||
// Initialize a new temporary buffer.
|
// Initialize a new temporary buffer.
|
||||||
tmpBuffer := new(bytes.Buffer)
|
tmpBuffer := new(bytes.Buffer)
|
||||||
md5Sum, sha256Sum, size, err = c.hashCopyN(tmpBuffer, reader, size)
|
md5Sum, sha256Sum, size, err = c.hashCopyN(tmpBuffer, reader, size)
|
||||||
readCloser = ioutil.NopCloser(tmpBuffer)
|
reader = bytes.NewReader(tmpBuffer.Bytes())
|
||||||
|
tmpBuffer.Reset()
|
||||||
} else {
|
} else {
|
||||||
// Initialize a new temporary file.
|
// Initialize a new temporary file.
|
||||||
var tmpFile *tempFile
|
var tmpFile *tempFile
|
||||||
|
@ -191,12 +191,13 @@ func (c Client) putObjectSingle(bucketName, objectName string, reader io.Reader,
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
|
defer tmpFile.Close()
|
||||||
md5Sum, sha256Sum, size, err = c.hashCopyN(tmpFile, reader, size)
|
md5Sum, sha256Sum, size, err = c.hashCopyN(tmpFile, reader, size)
|
||||||
// Seek back to beginning of the temporary file.
|
// Seek back to beginning of the temporary file.
|
||||||
if _, err = tmpFile.Seek(0, 0); err != nil {
|
if _, err = tmpFile.Seek(0, 0); err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
readCloser = tmpFile
|
reader = tmpFile
|
||||||
}
|
}
|
||||||
// Return error if its not io.EOF.
|
// Return error if its not io.EOF.
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -204,26 +205,26 @@ func (c Client) putObjectSingle(bucketName, objectName string, reader io.Reader,
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Progress the reader to the size.
|
|
||||||
if progress != nil {
|
|
||||||
if _, err = io.CopyN(ioutil.Discard, progress, size); err != nil {
|
|
||||||
return size, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Execute put object.
|
// Execute put object.
|
||||||
st, err := c.putObjectDo(bucketName, objectName, readCloser, md5Sum, sha256Sum, size, contentType)
|
st, err := c.putObjectDo(bucketName, objectName, reader, md5Sum, sha256Sum, size, contentType)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
if st.Size != size {
|
if st.Size != size {
|
||||||
return 0, ErrUnexpectedEOF(st.Size, size, bucketName, objectName)
|
return 0, ErrUnexpectedEOF(st.Size, size, bucketName, objectName)
|
||||||
}
|
}
|
||||||
|
// Progress the reader to the size if putObjectDo is successful.
|
||||||
|
if progress != nil {
|
||||||
|
if _, err = io.CopyN(ioutil.Discard, progress, size); err != nil {
|
||||||
|
return size, err
|
||||||
|
}
|
||||||
|
}
|
||||||
return size, nil
|
return size, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// putObjectDo - executes the put object http operation.
|
// putObjectDo - executes the put object http operation.
|
||||||
// NOTE: You must have WRITE permissions on a bucket to add an object to it.
|
// NOTE: You must have WRITE permissions on a bucket to add an object to it.
|
||||||
func (c Client) putObjectDo(bucketName, objectName string, reader io.ReadCloser, md5Sum []byte, sha256Sum []byte, size int64, contentType string) (ObjectInfo, error) {
|
func (c Client) putObjectDo(bucketName, objectName string, reader io.Reader, md5Sum []byte, sha256Sum []byte, size int64, contentType string) (ObjectInfo, error) {
|
||||||
// Input validation.
|
// Input validation.
|
||||||
if err := isValidBucketName(bucketName); err != nil {
|
if err := isValidBucketName(bucketName); err != nil {
|
||||||
return ObjectInfo{}, err
|
return ObjectInfo{}, err
|
||||||
|
@ -258,13 +259,9 @@ func (c Client) putObjectDo(bucketName, objectName string, reader io.ReadCloser,
|
||||||
contentMD5Bytes: md5Sum,
|
contentMD5Bytes: md5Sum,
|
||||||
contentSHA256Bytes: sha256Sum,
|
contentSHA256Bytes: sha256Sum,
|
||||||
}
|
}
|
||||||
// Initiate new request.
|
|
||||||
req, err := c.newRequest("PUT", reqMetadata)
|
// Execute PUT an objectName.
|
||||||
if err != nil {
|
resp, err := c.executeMethod("PUT", reqMetadata)
|
||||||
return ObjectInfo{}, err
|
|
||||||
}
|
|
||||||
// Execute the request.
|
|
||||||
resp, err := c.do(req)
|
|
||||||
defer closeResponse(resp)
|
defer closeResponse(resp)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return ObjectInfo{}, err
|
return ObjectInfo{}, err
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
|
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
|
||||||
*
|
*
|
||||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
* you may not use this file except in compliance with the License.
|
* you may not use this file except in compliance with the License.
|
||||||
|
@ -30,15 +30,10 @@ func (c Client) RemoveBucket(bucketName string) error {
|
||||||
if err := isValidBucketName(bucketName); err != nil {
|
if err := isValidBucketName(bucketName); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
// Instantiate a new request.
|
// Execute DELETE on bucket.
|
||||||
req, err := c.newRequest("DELETE", requestMetadata{
|
resp, err := c.executeMethod("DELETE", requestMetadata{
|
||||||
bucketName: bucketName,
|
bucketName: bucketName,
|
||||||
})
|
})
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
// Initiate the request.
|
|
||||||
resp, err := c.do(req)
|
|
||||||
defer closeResponse(resp)
|
defer closeResponse(resp)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -64,16 +59,11 @@ func (c Client) RemoveObject(bucketName, objectName string) error {
|
||||||
if err := isValidObjectName(objectName); err != nil {
|
if err := isValidObjectName(objectName); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
// Instantiate the request.
|
// Execute DELETE on objectName.
|
||||||
req, err := c.newRequest("DELETE", requestMetadata{
|
resp, err := c.executeMethod("DELETE", requestMetadata{
|
||||||
bucketName: bucketName,
|
bucketName: bucketName,
|
||||||
objectName: objectName,
|
objectName: objectName,
|
||||||
})
|
})
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
// Initiate the request.
|
|
||||||
resp, err := c.do(req)
|
|
||||||
defer closeResponse(resp)
|
defer closeResponse(resp)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -124,18 +114,12 @@ func (c Client) abortMultipartUpload(bucketName, objectName, uploadID string) er
|
||||||
urlValues := make(url.Values)
|
urlValues := make(url.Values)
|
||||||
urlValues.Set("uploadId", uploadID)
|
urlValues.Set("uploadId", uploadID)
|
||||||
|
|
||||||
// Instantiate a new DELETE request.
|
// Execute DELETE on multipart upload.
|
||||||
req, err := c.newRequest("DELETE", requestMetadata{
|
resp, err := c.executeMethod("DELETE", requestMetadata{
|
||||||
bucketName: bucketName,
|
bucketName: bucketName,
|
||||||
objectName: objectName,
|
objectName: objectName,
|
||||||
queryValues: urlValues,
|
queryValues: urlValues,
|
||||||
})
|
})
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Initiate the request.
|
|
||||||
resp, err := c.do(req)
|
|
||||||
defer closeResponse(resp)
|
defer closeResponse(resp)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -149,13 +133,13 @@ func (c Client) abortMultipartUpload(bucketName, objectName, uploadID string) er
|
||||||
// This is needed specifically for abort and it cannot
|
// This is needed specifically for abort and it cannot
|
||||||
// be converged into default case.
|
// be converged into default case.
|
||||||
errorResponse = ErrorResponse{
|
errorResponse = ErrorResponse{
|
||||||
Code: "NoSuchUpload",
|
Code: "NoSuchUpload",
|
||||||
Message: "The specified multipart upload does not exist.",
|
Message: "The specified multipart upload does not exist.",
|
||||||
BucketName: bucketName,
|
BucketName: bucketName,
|
||||||
Key: objectName,
|
Key: objectName,
|
||||||
RequestID: resp.Header.Get("x-amz-request-id"),
|
RequestID: resp.Header.Get("x-amz-request-id"),
|
||||||
HostID: resp.Header.Get("x-amz-id-2"),
|
HostID: resp.Header.Get("x-amz-id-2"),
|
||||||
AmzBucketRegion: resp.Header.Get("x-amz-bucket-region"),
|
Region: resp.Header.Get("x-amz-bucket-region"),
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
return httpRespToErrorResponse(resp, bucketName, objectName)
|
return httpRespToErrorResponse(resp, bucketName, objectName)
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
|
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
|
||||||
*
|
*
|
||||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
* you may not use this file except in compliance with the License.
|
* you may not use this file except in compliance with the License.
|
||||||
|
@ -29,15 +29,11 @@ func (c Client) BucketExists(bucketName string) error {
|
||||||
if err := isValidBucketName(bucketName); err != nil {
|
if err := isValidBucketName(bucketName); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
// Instantiate a new request.
|
|
||||||
req, err := c.newRequest("HEAD", requestMetadata{
|
// Execute HEAD on bucketName.
|
||||||
|
resp, err := c.executeMethod("HEAD", requestMetadata{
|
||||||
bucketName: bucketName,
|
bucketName: bucketName,
|
||||||
})
|
})
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
// Initiate the request.
|
|
||||||
resp, err := c.do(req)
|
|
||||||
defer closeResponse(resp)
|
defer closeResponse(resp)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -59,16 +55,12 @@ func (c Client) StatObject(bucketName, objectName string) (ObjectInfo, error) {
|
||||||
if err := isValidObjectName(objectName); err != nil {
|
if err := isValidObjectName(objectName); err != nil {
|
||||||
return ObjectInfo{}, err
|
return ObjectInfo{}, err
|
||||||
}
|
}
|
||||||
// Instantiate a new request.
|
|
||||||
req, err := c.newRequest("HEAD", requestMetadata{
|
// Execute HEAD on objectName.
|
||||||
|
resp, err := c.executeMethod("HEAD", requestMetadata{
|
||||||
bucketName: bucketName,
|
bucketName: bucketName,
|
||||||
objectName: objectName,
|
objectName: objectName,
|
||||||
})
|
})
|
||||||
if err != nil {
|
|
||||||
return ObjectInfo{}, err
|
|
||||||
}
|
|
||||||
// Initiate the request.
|
|
||||||
resp, err := c.do(req)
|
|
||||||
defer closeResponse(resp)
|
defer closeResponse(resp)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return ObjectInfo{}, err
|
return ObjectInfo{}, err
|
||||||
|
@ -87,26 +79,26 @@ func (c Client) StatObject(bucketName, objectName string) (ObjectInfo, error) {
|
||||||
size, err := strconv.ParseInt(resp.Header.Get("Content-Length"), 10, 64)
|
size, err := strconv.ParseInt(resp.Header.Get("Content-Length"), 10, 64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return ObjectInfo{}, ErrorResponse{
|
return ObjectInfo{}, ErrorResponse{
|
||||||
Code: "InternalError",
|
Code: "InternalError",
|
||||||
Message: "Content-Length is invalid. " + reportIssue,
|
Message: "Content-Length is invalid. " + reportIssue,
|
||||||
BucketName: bucketName,
|
BucketName: bucketName,
|
||||||
Key: objectName,
|
Key: objectName,
|
||||||
RequestID: resp.Header.Get("x-amz-request-id"),
|
RequestID: resp.Header.Get("x-amz-request-id"),
|
||||||
HostID: resp.Header.Get("x-amz-id-2"),
|
HostID: resp.Header.Get("x-amz-id-2"),
|
||||||
AmzBucketRegion: resp.Header.Get("x-amz-bucket-region"),
|
Region: resp.Header.Get("x-amz-bucket-region"),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Parse Last-Modified has http time format.
|
// Parse Last-Modified has http time format.
|
||||||
date, err := time.Parse(http.TimeFormat, resp.Header.Get("Last-Modified"))
|
date, err := time.Parse(http.TimeFormat, resp.Header.Get("Last-Modified"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return ObjectInfo{}, ErrorResponse{
|
return ObjectInfo{}, ErrorResponse{
|
||||||
Code: "InternalError",
|
Code: "InternalError",
|
||||||
Message: "Last-Modified time format is invalid. " + reportIssue,
|
Message: "Last-Modified time format is invalid. " + reportIssue,
|
||||||
BucketName: bucketName,
|
BucketName: bucketName,
|
||||||
Key: objectName,
|
Key: objectName,
|
||||||
RequestID: resp.Header.Get("x-amz-request-id"),
|
RequestID: resp.Header.Get("x-amz-request-id"),
|
||||||
HostID: resp.Header.Get("x-amz-id-2"),
|
HostID: resp.Header.Get("x-amz-id-2"),
|
||||||
AmzBucketRegion: resp.Header.Get("x-amz-bucket-region"),
|
Region: resp.Header.Get("x-amz-bucket-region"),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Fetch content type if any present.
|
// Fetch content type if any present.
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
|
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
|
||||||
*
|
*
|
||||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
* you may not use this file except in compliance with the License.
|
* you may not use this file except in compliance with the License.
|
||||||
|
@ -22,6 +22,7 @@ import (
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/http/httputil"
|
"net/http/httputil"
|
||||||
"net/url"
|
"net/url"
|
||||||
|
@ -64,7 +65,7 @@ type Client struct {
|
||||||
// Global constants.
|
// Global constants.
|
||||||
const (
|
const (
|
||||||
libraryName = "minio-go"
|
libraryName = "minio-go"
|
||||||
libraryVersion = "0.2.5"
|
libraryVersion = "1.0.0"
|
||||||
)
|
)
|
||||||
|
|
||||||
// User Agent should always following the below style.
|
// User Agent should always following the below style.
|
||||||
|
@ -78,7 +79,7 @@ const (
|
||||||
|
|
||||||
// NewV2 - instantiate minio client with Amazon S3 signature version
|
// NewV2 - instantiate minio client with Amazon S3 signature version
|
||||||
// '2' compatibility.
|
// '2' compatibility.
|
||||||
func NewV2(endpoint string, accessKeyID, secretAccessKey string, insecure bool) (CloudStorageClient, error) {
|
func NewV2(endpoint string, accessKeyID, secretAccessKey string, insecure bool) (*Client, error) {
|
||||||
clnt, err := privateNew(endpoint, accessKeyID, secretAccessKey, insecure)
|
clnt, err := privateNew(endpoint, accessKeyID, secretAccessKey, insecure)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -90,7 +91,7 @@ func NewV2(endpoint string, accessKeyID, secretAccessKey string, insecure bool)
|
||||||
|
|
||||||
// NewV4 - instantiate minio client with Amazon S3 signature version
|
// NewV4 - instantiate minio client with Amazon S3 signature version
|
||||||
// '4' compatibility.
|
// '4' compatibility.
|
||||||
func NewV4(endpoint string, accessKeyID, secretAccessKey string, insecure bool) (CloudStorageClient, error) {
|
func NewV4(endpoint string, accessKeyID, secretAccessKey string, insecure bool) (*Client, error) {
|
||||||
clnt, err := privateNew(endpoint, accessKeyID, secretAccessKey, insecure)
|
clnt, err := privateNew(endpoint, accessKeyID, secretAccessKey, insecure)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -102,7 +103,7 @@ func NewV4(endpoint string, accessKeyID, secretAccessKey string, insecure bool)
|
||||||
|
|
||||||
// New - instantiate minio client Client, adds automatic verification
|
// New - instantiate minio client Client, adds automatic verification
|
||||||
// of signature.
|
// of signature.
|
||||||
func New(endpoint string, accessKeyID, secretAccessKey string, insecure bool) (CloudStorageClient, error) {
|
func New(endpoint string, accessKeyID, secretAccessKey string, insecure bool) (*Client, error) {
|
||||||
clnt, err := privateNew(endpoint, accessKeyID, secretAccessKey, insecure)
|
clnt, err := privateNew(endpoint, accessKeyID, secretAccessKey, insecure)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -112,7 +113,7 @@ func New(endpoint string, accessKeyID, secretAccessKey string, insecure bool) (C
|
||||||
if isGoogleEndpoint(clnt.endpointURL) {
|
if isGoogleEndpoint(clnt.endpointURL) {
|
||||||
clnt.signature = SignatureV2
|
clnt.signature = SignatureV2
|
||||||
}
|
}
|
||||||
// If Amazon S3 set to signature v2.
|
// If Amazon S3 set to signature v2.n
|
||||||
if isAmazonEndpoint(clnt.endpointURL) {
|
if isAmazonEndpoint(clnt.endpointURL) {
|
||||||
clnt.signature = SignatureV4
|
clnt.signature = SignatureV4
|
||||||
}
|
}
|
||||||
|
@ -138,7 +139,14 @@ func privateNew(endpoint, accessKeyID, secretAccessKey string, insecure bool) (*
|
||||||
clnt.endpointURL = endpointURL
|
clnt.endpointURL = endpointURL
|
||||||
|
|
||||||
// Instantiate http client and bucket location cache.
|
// Instantiate http client and bucket location cache.
|
||||||
clnt.httpClient = &http.Client{}
|
clnt.httpClient = &http.Client{
|
||||||
|
// Setting a sensible time out of 2minutes to wait for response
|
||||||
|
// headers. Request is pro-actively cancelled after 2minutes
|
||||||
|
// if no response was received from server.
|
||||||
|
Timeout: 2 * time.Minute,
|
||||||
|
Transport: http.DefaultTransport,
|
||||||
|
}
|
||||||
|
|
||||||
clnt.bucketLocCache = newBucketLocationCache()
|
clnt.bucketLocCache = newBucketLocationCache()
|
||||||
|
|
||||||
// Return.
|
// Return.
|
||||||
|
@ -214,7 +222,7 @@ type requestMetadata struct {
|
||||||
|
|
||||||
// Generated by our internal code.
|
// Generated by our internal code.
|
||||||
bucketLocation string
|
bucketLocation string
|
||||||
contentBody io.ReadCloser
|
contentBody io.Reader
|
||||||
contentLength int64
|
contentLength int64
|
||||||
contentSHA256Bytes []byte
|
contentSHA256Bytes []byte
|
||||||
contentMD5Bytes []byte
|
contentMD5Bytes []byte
|
||||||
|
@ -292,7 +300,7 @@ func (c Client) dumpHTTP(req *http.Request, resp *http.Response) error {
|
||||||
// to zero. Keep this workaround until the above bug is fixed.
|
// to zero. Keep this workaround until the above bug is fixed.
|
||||||
if resp.ContentLength == 0 {
|
if resp.ContentLength == 0 {
|
||||||
var buffer bytes.Buffer
|
var buffer bytes.Buffer
|
||||||
if err := resp.Header.Write(&buffer); err != nil {
|
if err = resp.Header.Write(&buffer); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
respTrace = buffer.Bytes()
|
respTrace = buffer.Bytes()
|
||||||
|
@ -322,17 +330,28 @@ func (c Client) dumpHTTP(req *http.Request, resp *http.Response) error {
|
||||||
|
|
||||||
// do - execute http request.
|
// do - execute http request.
|
||||||
func (c Client) do(req *http.Request) (*http.Response, error) {
|
func (c Client) do(req *http.Request) (*http.Response, error) {
|
||||||
// execute the request.
|
// do the request.
|
||||||
resp, err := c.httpClient.Do(req)
|
resp, err := c.httpClient.Do(req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// Handle this specifically for now until future Golang
|
// Handle this specifically for now until future Golang
|
||||||
// versions fix this issue properly.
|
// versions fix this issue properly.
|
||||||
urlErr, ok := err.(*url.Error)
|
urlErr, ok := err.(*url.Error)
|
||||||
if ok && strings.Contains(urlErr.Err.Error(), "EOF") {
|
if ok && strings.Contains(urlErr.Err.Error(), "EOF") {
|
||||||
return nil, fmt.Errorf("Connection closed by foreign host %s. Retry again.", urlErr.URL)
|
return nil, &url.Error{
|
||||||
|
Op: urlErr.Op,
|
||||||
|
URL: urlErr.URL,
|
||||||
|
Err: fmt.Errorf("Connection closed by foreign host %s. Retry again.", urlErr.URL),
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return resp, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Response cannot be non-nil, report if its the case.
|
||||||
|
if resp == nil {
|
||||||
|
msg := "Response is empty. " + reportIssue
|
||||||
|
return nil, ErrInvalidArgument(msg)
|
||||||
|
}
|
||||||
|
|
||||||
// If trace is enabled, dump http request and response.
|
// If trace is enabled, dump http request and response.
|
||||||
if c.isTraceEnabled {
|
if c.isTraceEnabled {
|
||||||
err = c.dumpHTTP(req, resp)
|
err = c.dumpHTTP(req, resp)
|
||||||
|
@ -343,6 +362,85 @@ func (c Client) do(req *http.Request) (*http.Response, error) {
|
||||||
return resp, nil
|
return resp, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// List of success status.
|
||||||
|
var successStatus = []int{
|
||||||
|
http.StatusOK,
|
||||||
|
http.StatusNoContent,
|
||||||
|
http.StatusPartialContent,
|
||||||
|
}
|
||||||
|
|
||||||
|
// executeMethod - instantiates a given method, and retries the
|
||||||
|
// request upon any error up to maxRetries attempts in a binomially
|
||||||
|
// delayed manner using a standard back off algorithm.
|
||||||
|
func (c Client) executeMethod(method string, metadata requestMetadata) (res *http.Response, err error) {
|
||||||
|
var isRetryable bool // Indicates if request can be retried.
|
||||||
|
var bodySeeker io.Seeker // Extracted seeker from io.Reader.
|
||||||
|
if metadata.contentBody != nil {
|
||||||
|
// Check if body is seekable then it is retryable.
|
||||||
|
bodySeeker, isRetryable = metadata.contentBody.(io.Seeker)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Retry executes the following function body if request has an
|
||||||
|
// error until maxRetries have been exhausted, retry attempts are
|
||||||
|
// performed after waiting for a given period of time in a
|
||||||
|
// binomial fashion.
|
||||||
|
for range newRetryTimer(MaxRetry, time.Second, time.Second*30, MaxJitter) {
|
||||||
|
if isRetryable {
|
||||||
|
// Seek back to beginning for each attempt.
|
||||||
|
if _, err = bodySeeker.Seek(0, 0); err != nil {
|
||||||
|
// If seek failed, no need to retry.
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Instantiate a new request.
|
||||||
|
var req *http.Request
|
||||||
|
req, err = c.newRequest(method, metadata)
|
||||||
|
if err != nil {
|
||||||
|
errResponse := ToErrorResponse(err)
|
||||||
|
if isS3CodeRetryable(errResponse.Code) {
|
||||||
|
continue // Retry.
|
||||||
|
}
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Initiate the request.
|
||||||
|
res, err = c.do(req)
|
||||||
|
if err != nil {
|
||||||
|
// For supported network errors verify.
|
||||||
|
if isNetErrorRetryable(err) {
|
||||||
|
continue // Retry.
|
||||||
|
}
|
||||||
|
// For other errors, return here no need to retry.
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// For any known successful http status, return quickly.
|
||||||
|
for _, httpStatus := range successStatus {
|
||||||
|
if httpStatus == res.StatusCode {
|
||||||
|
return res, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// For errors verify if its retryable otherwise fail quickly.
|
||||||
|
errResponse := ToErrorResponse(httpRespToErrorResponse(res, metadata.bucketName, metadata.objectName))
|
||||||
|
// Bucket region if set in error response, we can retry the
|
||||||
|
// request with the new region.
|
||||||
|
if errResponse.Region != "" {
|
||||||
|
c.bucketLocCache.Set(metadata.bucketName, errResponse.Region)
|
||||||
|
continue // Retry.
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify if error response code is retryable.
|
||||||
|
if isS3CodeRetryable(errResponse.Code) {
|
||||||
|
continue // Retry.
|
||||||
|
}
|
||||||
|
// For all other cases break out of the retry loop.
|
||||||
|
break
|
||||||
|
}
|
||||||
|
return res, err
|
||||||
|
}
|
||||||
|
|
||||||
// newRequest - instantiate a new HTTP request for a given method.
|
// newRequest - instantiate a new HTTP request for a given method.
|
||||||
func (c Client) newRequest(method string, metadata requestMetadata) (req *http.Request, err error) {
|
func (c Client) newRequest(method string, metadata requestMetadata) (req *http.Request, err error) {
|
||||||
// If no method is supplied default to 'POST'.
|
// If no method is supplied default to 'POST'.
|
||||||
|
@ -391,10 +489,13 @@ func (c Client) newRequest(method string, metadata requestMetadata) (req *http.R
|
||||||
|
|
||||||
// Set content body if available.
|
// Set content body if available.
|
||||||
if metadata.contentBody != nil {
|
if metadata.contentBody != nil {
|
||||||
req.Body = metadata.contentBody
|
req.Body = ioutil.NopCloser(metadata.contentBody)
|
||||||
}
|
}
|
||||||
|
|
||||||
// set UserAgent for the request.
|
// set 'Expect' header for the request.
|
||||||
|
req.Header.Set("Expect", "100-continue")
|
||||||
|
|
||||||
|
// set 'User-Agent' header for the request.
|
||||||
c.setUserAgent(req)
|
c.setUserAgent(req)
|
||||||
|
|
||||||
// Set all headers.
|
// Set all headers.
|
||||||
|
@ -493,46 +594,3 @@ func (c Client) makeTargetURL(bucketName, objectName, bucketLocation string, que
|
||||||
|
|
||||||
return u, nil
|
return u, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// CloudStorageClient - Cloud Storage Client interface.
|
|
||||||
type CloudStorageClient interface {
|
|
||||||
// Bucket Read/Write/Stat operations.
|
|
||||||
MakeBucket(bucketName string, cannedACL BucketACL, location string) error
|
|
||||||
BucketExists(bucketName string) error
|
|
||||||
RemoveBucket(bucketName string) error
|
|
||||||
SetBucketACL(bucketName string, cannedACL BucketACL) error
|
|
||||||
GetBucketACL(bucketName string) (BucketACL, error)
|
|
||||||
|
|
||||||
ListBuckets() ([]BucketInfo, error)
|
|
||||||
ListObjects(bucket, prefix string, recursive bool, doneCh <-chan struct{}) <-chan ObjectInfo
|
|
||||||
ListIncompleteUploads(bucket, prefix string, recursive bool, doneCh <-chan struct{}) <-chan ObjectMultipartInfo
|
|
||||||
|
|
||||||
// Object Read/Write/Stat operations.
|
|
||||||
GetObject(bucketName, objectName string) (reader *Object, err error)
|
|
||||||
PutObject(bucketName, objectName string, reader io.Reader, contentType string) (n int64, err error)
|
|
||||||
StatObject(bucketName, objectName string) (ObjectInfo, error)
|
|
||||||
RemoveObject(bucketName, objectName string) error
|
|
||||||
RemoveIncompleteUpload(bucketName, objectName string) error
|
|
||||||
|
|
||||||
// File to Object API.
|
|
||||||
FPutObject(bucketName, objectName, filePath, contentType string) (n int64, err error)
|
|
||||||
FGetObject(bucketName, objectName, filePath string) error
|
|
||||||
|
|
||||||
// PutObjectWithProgress for progress.
|
|
||||||
PutObjectWithProgress(bucketName, objectName string, reader io.Reader, contentType string, progress io.Reader) (n int64, err error)
|
|
||||||
|
|
||||||
// Presigned operations.
|
|
||||||
PresignedGetObject(bucketName, objectName string, expires time.Duration, reqParams url.Values) (presignedURL string, err error)
|
|
||||||
PresignedPutObject(bucketName, objectName string, expires time.Duration) (presignedURL string, err error)
|
|
||||||
PresignedPostPolicy(*PostPolicy) (formData map[string]string, err error)
|
|
||||||
|
|
||||||
// Application info.
|
|
||||||
SetAppInfo(appName, appVersion string)
|
|
||||||
|
|
||||||
// Set custom transport.
|
|
||||||
SetCustomTransport(customTransport http.RoundTripper)
|
|
||||||
|
|
||||||
// HTTP tracing methods.
|
|
||||||
TraceOn(traceOutput io.Writer)
|
|
||||||
TraceOff()
|
|
||||||
}
|
|
||||||
|
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
|
@ -1,599 +0,0 @@
|
||||||
/*
|
|
||||||
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
|
|
||||||
*
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
* you may not use this file except in compliance with the License.
|
|
||||||
* You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package minio
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"io/ioutil"
|
|
||||||
"net/http"
|
|
||||||
"net/url"
|
|
||||||
"os"
|
|
||||||
"strings"
|
|
||||||
"testing"
|
|
||||||
)
|
|
||||||
|
|
||||||
type customReader struct{}
|
|
||||||
|
|
||||||
func (c *customReader) Read(p []byte) (n int, err error) {
|
|
||||||
return 0, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *customReader) Size() (n int64) {
|
|
||||||
return 10
|
|
||||||
}
|
|
||||||
|
|
||||||
// Tests getReaderSize() for various Reader types.
|
|
||||||
func TestGetReaderSize(t *testing.T) {
|
|
||||||
var reader io.Reader
|
|
||||||
size, err := getReaderSize(reader)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal("Error:", err)
|
|
||||||
}
|
|
||||||
if size != -1 {
|
|
||||||
t.Fatal("Reader shouldn't have any length.")
|
|
||||||
}
|
|
||||||
|
|
||||||
bytesReader := bytes.NewReader([]byte("Hello World"))
|
|
||||||
size, err = getReaderSize(bytesReader)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal("Error:", err)
|
|
||||||
}
|
|
||||||
if size != int64(len("Hello World")) {
|
|
||||||
t.Fatalf("Reader length doesn't match got: %v, want: %v", size, len("Hello World"))
|
|
||||||
}
|
|
||||||
|
|
||||||
size, err = getReaderSize(new(customReader))
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal("Error:", err)
|
|
||||||
}
|
|
||||||
if size != int64(10) {
|
|
||||||
t.Fatalf("Reader length doesn't match got: %v, want: %v", size, 10)
|
|
||||||
}
|
|
||||||
|
|
||||||
stringsReader := strings.NewReader("Hello World")
|
|
||||||
size, err = getReaderSize(stringsReader)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal("Error:", err)
|
|
||||||
}
|
|
||||||
if size != int64(len("Hello World")) {
|
|
||||||
t.Fatalf("Reader length doesn't match got: %v, want: %v", size, len("Hello World"))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create request channel.
|
|
||||||
reqCh := make(chan readRequest)
|
|
||||||
// Create response channel.
|
|
||||||
resCh := make(chan readResponse)
|
|
||||||
// Create done channel.
|
|
||||||
doneCh := make(chan struct{})
|
|
||||||
// objectInfo.
|
|
||||||
objectInfo := ObjectInfo{Size: 10}
|
|
||||||
objectReader := newObject(reqCh, resCh, doneCh, objectInfo)
|
|
||||||
defer objectReader.Close()
|
|
||||||
|
|
||||||
size, err = getReaderSize(objectReader)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal("Error:", err)
|
|
||||||
}
|
|
||||||
if size != int64(10) {
|
|
||||||
t.Fatalf("Reader length doesn't match got: %v, want: %v", size, 10)
|
|
||||||
}
|
|
||||||
|
|
||||||
fileReader, err := ioutil.TempFile(os.TempDir(), "prefix")
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal("Error:", err)
|
|
||||||
}
|
|
||||||
defer fileReader.Close()
|
|
||||||
defer os.RemoveAll(fileReader.Name())
|
|
||||||
|
|
||||||
size, err = getReaderSize(fileReader)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal("Error:", err)
|
|
||||||
}
|
|
||||||
if size == -1 {
|
|
||||||
t.Fatal("Reader length for file cannot be -1.")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Verify for standard input, output and error file descriptors.
|
|
||||||
size, err = getReaderSize(os.Stdin)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal("Error:", err)
|
|
||||||
}
|
|
||||||
if size != -1 {
|
|
||||||
t.Fatal("Stdin should have length of -1.")
|
|
||||||
}
|
|
||||||
size, err = getReaderSize(os.Stdout)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal("Error:", err)
|
|
||||||
}
|
|
||||||
if size != -1 {
|
|
||||||
t.Fatal("Stdout should have length of -1.")
|
|
||||||
}
|
|
||||||
size, err = getReaderSize(os.Stderr)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal("Error:", err)
|
|
||||||
}
|
|
||||||
if size != -1 {
|
|
||||||
t.Fatal("Stderr should have length of -1.")
|
|
||||||
}
|
|
||||||
file, err := os.Open(os.TempDir())
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal("Error:", err)
|
|
||||||
}
|
|
||||||
defer file.Close()
|
|
||||||
_, err = getReaderSize(file)
|
|
||||||
if err == nil {
|
|
||||||
t.Fatal("Input file as directory should throw an error.")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Tests valid hosts for location.
|
|
||||||
func TestValidBucketLocation(t *testing.T) {
|
|
||||||
s3Hosts := []struct {
|
|
||||||
bucketLocation string
|
|
||||||
endpoint string
|
|
||||||
}{
|
|
||||||
{"us-east-1", "s3.amazonaws.com"},
|
|
||||||
{"unknown", "s3.amazonaws.com"},
|
|
||||||
{"ap-southeast-1", "s3-ap-southeast-1.amazonaws.com"},
|
|
||||||
}
|
|
||||||
for _, s3Host := range s3Hosts {
|
|
||||||
endpoint := getS3Endpoint(s3Host.bucketLocation)
|
|
||||||
if endpoint != s3Host.endpoint {
|
|
||||||
t.Fatal("Error: invalid bucket location", endpoint)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Tests valid bucket names.
|
|
||||||
func TestBucketNames(t *testing.T) {
|
|
||||||
buckets := []struct {
|
|
||||||
name string
|
|
||||||
valid error
|
|
||||||
}{
|
|
||||||
{".mybucket", ErrInvalidBucketName("Bucket name cannot start or end with a '.' dot.")},
|
|
||||||
{"mybucket.", ErrInvalidBucketName("Bucket name cannot start or end with a '.' dot.")},
|
|
||||||
{"mybucket-", ErrInvalidBucketName("Bucket name contains invalid characters.")},
|
|
||||||
{"my", ErrInvalidBucketName("Bucket name cannot be smaller than 3 characters.")},
|
|
||||||
{"", ErrInvalidBucketName("Bucket name cannot be empty.")},
|
|
||||||
{"my..bucket", ErrInvalidBucketName("Bucket name cannot have successive periods.")},
|
|
||||||
{"my.bucket.com", nil},
|
|
||||||
{"my-bucket", nil},
|
|
||||||
{"123my-bucket", nil},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, b := range buckets {
|
|
||||||
err := isValidBucketName(b.name)
|
|
||||||
if err != b.valid {
|
|
||||||
t.Fatal("Error:", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Tests temp file.
|
|
||||||
func TestTempFile(t *testing.T) {
|
|
||||||
tmpFile, err := newTempFile("testing")
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal("Error:", err)
|
|
||||||
}
|
|
||||||
fileName := tmpFile.Name()
|
|
||||||
// Closing temporary file purges the file.
|
|
||||||
err = tmpFile.Close()
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal("Error:", err)
|
|
||||||
}
|
|
||||||
st, err := os.Stat(fileName)
|
|
||||||
if err != nil && !os.IsNotExist(err) {
|
|
||||||
t.Fatal("Error:", err)
|
|
||||||
}
|
|
||||||
if err == nil && st != nil {
|
|
||||||
t.Fatal("Error: file should be deleted and should not exist.")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Tests url encoding.
|
|
||||||
func TestEncodeURL2Path(t *testing.T) {
|
|
||||||
type urlStrings struct {
|
|
||||||
objName string
|
|
||||||
encodedObjName string
|
|
||||||
}
|
|
||||||
|
|
||||||
bucketName := "bucketName"
|
|
||||||
want := []urlStrings{
|
|
||||||
{
|
|
||||||
objName: "本語",
|
|
||||||
encodedObjName: "%E6%9C%AC%E8%AA%9E",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
objName: "本語.1",
|
|
||||||
encodedObjName: "%E6%9C%AC%E8%AA%9E.1",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
objName: ">123>3123123",
|
|
||||||
encodedObjName: "%3E123%3E3123123",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
objName: "test 1 2.txt",
|
|
||||||
encodedObjName: "test%201%202.txt",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
objName: "test++ 1.txt",
|
|
||||||
encodedObjName: "test%2B%2B%201.txt",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, o := range want {
|
|
||||||
u, err := url.Parse(fmt.Sprintf("https://%s.s3.amazonaws.com/%s", bucketName, o.objName))
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal("Error:", err)
|
|
||||||
}
|
|
||||||
urlPath := "/" + bucketName + "/" + o.encodedObjName
|
|
||||||
if urlPath != encodeURL2Path(u) {
|
|
||||||
t.Fatal("Error")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Tests error response structure.
|
|
||||||
func TestErrorResponse(t *testing.T) {
|
|
||||||
var err error
|
|
||||||
err = ErrorResponse{
|
|
||||||
Code: "Testing",
|
|
||||||
}
|
|
||||||
errResp := ToErrorResponse(err)
|
|
||||||
if errResp.Code != "Testing" {
|
|
||||||
t.Fatal("Type conversion failed, we have an empty struct.")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Test http response decoding.
|
|
||||||
var httpResponse *http.Response
|
|
||||||
// Set empty variables
|
|
||||||
httpResponse = nil
|
|
||||||
var bucketName, objectName string
|
|
||||||
|
|
||||||
// Should fail with invalid argument.
|
|
||||||
err = httpRespToErrorResponse(httpResponse, bucketName, objectName)
|
|
||||||
errResp = ToErrorResponse(err)
|
|
||||||
if errResp.Code != "InvalidArgument" {
|
|
||||||
t.Fatal("Empty response input should return invalid argument.")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Tests signature calculation.
|
|
||||||
func TestSignatureCalculation(t *testing.T) {
|
|
||||||
req, err := http.NewRequest("GET", "https://s3.amazonaws.com", nil)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal("Error:", err)
|
|
||||||
}
|
|
||||||
req = signV4(*req, "", "", "us-east-1")
|
|
||||||
if req.Header.Get("Authorization") != "" {
|
|
||||||
t.Fatal("Error: anonymous credentials should not have Authorization header.")
|
|
||||||
}
|
|
||||||
|
|
||||||
req = preSignV4(*req, "", "", "us-east-1", 0)
|
|
||||||
if strings.Contains(req.URL.RawQuery, "X-Amz-Signature") {
|
|
||||||
t.Fatal("Error: anonymous credentials should not have Signature query resource.")
|
|
||||||
}
|
|
||||||
|
|
||||||
req = signV2(*req, "", "")
|
|
||||||
if req.Header.Get("Authorization") != "" {
|
|
||||||
t.Fatal("Error: anonymous credentials should not have Authorization header.")
|
|
||||||
}
|
|
||||||
|
|
||||||
req = preSignV2(*req, "", "", 0)
|
|
||||||
if strings.Contains(req.URL.RawQuery, "Signature") {
|
|
||||||
t.Fatal("Error: anonymous credentials should not have Signature query resource.")
|
|
||||||
}
|
|
||||||
|
|
||||||
req = signV4(*req, "ACCESS-KEY", "SECRET-KEY", "us-east-1")
|
|
||||||
if req.Header.Get("Authorization") == "" {
|
|
||||||
t.Fatal("Error: normal credentials should have Authorization header.")
|
|
||||||
}
|
|
||||||
|
|
||||||
req = preSignV4(*req, "ACCESS-KEY", "SECRET-KEY", "us-east-1", 0)
|
|
||||||
if !strings.Contains(req.URL.RawQuery, "X-Amz-Signature") {
|
|
||||||
t.Fatal("Error: normal credentials should have Signature query resource.")
|
|
||||||
}
|
|
||||||
|
|
||||||
req = signV2(*req, "ACCESS-KEY", "SECRET-KEY")
|
|
||||||
if req.Header.Get("Authorization") == "" {
|
|
||||||
t.Fatal("Error: normal credentials should have Authorization header.")
|
|
||||||
}
|
|
||||||
|
|
||||||
req = preSignV2(*req, "ACCESS-KEY", "SECRET-KEY", 0)
|
|
||||||
if !strings.Contains(req.URL.RawQuery, "Signature") {
|
|
||||||
t.Fatal("Error: normal credentials should not have Signature query resource.")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Tests signature type.
|
|
||||||
func TestSignatureType(t *testing.T) {
|
|
||||||
clnt := Client{}
|
|
||||||
if !clnt.signature.isV4() {
|
|
||||||
t.Fatal("Error")
|
|
||||||
}
|
|
||||||
clnt.signature = SignatureV2
|
|
||||||
if !clnt.signature.isV2() {
|
|
||||||
t.Fatal("Error")
|
|
||||||
}
|
|
||||||
if clnt.signature.isV4() {
|
|
||||||
t.Fatal("Error")
|
|
||||||
}
|
|
||||||
clnt.signature = SignatureV4
|
|
||||||
if !clnt.signature.isV4() {
|
|
||||||
t.Fatal("Error")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Tests bucket acl types.
|
|
||||||
func TestBucketACLTypes(t *testing.T) {
|
|
||||||
want := map[string]bool{
|
|
||||||
"private": true,
|
|
||||||
"public-read": true,
|
|
||||||
"public-read-write": true,
|
|
||||||
"authenticated-read": true,
|
|
||||||
"invalid": false,
|
|
||||||
}
|
|
||||||
for acl, ok := range want {
|
|
||||||
if BucketACL(acl).isValidBucketACL() != ok {
|
|
||||||
t.Fatal("Error")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Tests optimal part size.
|
|
||||||
func TestPartSize(t *testing.T) {
|
|
||||||
totalPartsCount, partSize, lastPartSize, err := optimalPartInfo(5000000000000000000)
|
|
||||||
if err == nil {
|
|
||||||
t.Fatal("Error: should fail")
|
|
||||||
}
|
|
||||||
totalPartsCount, partSize, lastPartSize, err = optimalPartInfo(5497558138880)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal("Error: ", err)
|
|
||||||
}
|
|
||||||
if totalPartsCount != 9987 {
|
|
||||||
t.Fatalf("Error: expecting total parts count of 9987: got %v instead", totalPartsCount)
|
|
||||||
}
|
|
||||||
if partSize != 550502400 {
|
|
||||||
t.Fatalf("Error: expecting part size of 550502400: got %v instead", partSize)
|
|
||||||
}
|
|
||||||
if lastPartSize != 241172480 {
|
|
||||||
t.Fatalf("Error: expecting last part size of 241172480: got %v instead", lastPartSize)
|
|
||||||
}
|
|
||||||
totalPartsCount, partSize, lastPartSize, err = optimalPartInfo(5000000000)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal("Error:", err)
|
|
||||||
}
|
|
||||||
if partSize != minPartSize {
|
|
||||||
t.Fatalf("Error: expecting part size of %v: got %v instead", minPartSize, partSize)
|
|
||||||
}
|
|
||||||
totalPartsCount, partSize, lastPartSize, err = optimalPartInfo(-1)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal("Error:", err)
|
|
||||||
}
|
|
||||||
if totalPartsCount != 9987 {
|
|
||||||
t.Fatalf("Error: expecting total parts count of 9987: got %v instead", totalPartsCount)
|
|
||||||
}
|
|
||||||
if partSize != 550502400 {
|
|
||||||
t.Fatalf("Error: expecting part size of 550502400: got %v instead", partSize)
|
|
||||||
}
|
|
||||||
if lastPartSize != 241172480 {
|
|
||||||
t.Fatalf("Error: expecting last part size of 241172480: got %v instead", lastPartSize)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Tests query values to URL encoding.
|
|
||||||
func TestQueryURLEncoding(t *testing.T) {
|
|
||||||
urlValues := make(url.Values)
|
|
||||||
urlValues.Set("prefix", "test@1123")
|
|
||||||
urlValues.Set("delimiter", "/")
|
|
||||||
urlValues.Set("marker", "%%%@$$$")
|
|
||||||
|
|
||||||
queryStr := queryEncode(urlValues)
|
|
||||||
if !strings.Contains(queryStr, "test%401123") {
|
|
||||||
t.Fatalf("Error: @ should be encoded as %s, invalid query string %s", "test%401123", queryStr)
|
|
||||||
}
|
|
||||||
if !strings.Contains(queryStr, "%25%25%25%40%24%24%24") {
|
|
||||||
t.Fatalf("Error: %s should be encoded as %s, invalid query string %s", "%%%@$$$", "%25%25%25%40%24%24%24", queryStr)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Tests url encoding.
|
|
||||||
func TestURLEncoding(t *testing.T) {
|
|
||||||
type urlStrings struct {
|
|
||||||
name string
|
|
||||||
encodedName string
|
|
||||||
}
|
|
||||||
|
|
||||||
want := []urlStrings{
|
|
||||||
{
|
|
||||||
name: "bigfile-1._%",
|
|
||||||
encodedName: "bigfile-1._%25",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "本語",
|
|
||||||
encodedName: "%E6%9C%AC%E8%AA%9E",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "本語.1",
|
|
||||||
encodedName: "%E6%9C%AC%E8%AA%9E.1",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: ">123>3123123",
|
|
||||||
encodedName: "%3E123%3E3123123",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "test 1 2.txt",
|
|
||||||
encodedName: "test%201%202.txt",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "test++ 1.txt",
|
|
||||||
encodedName: "test%2B%2B%201.txt",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, u := range want {
|
|
||||||
if u.encodedName != urlEncodePath(u.name) {
|
|
||||||
t.Fatal("Error")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Tests constructing valid endpoint url.
|
|
||||||
func TestGetEndpointURL(t *testing.T) {
|
|
||||||
if _, err := getEndpointURL("s3.amazonaws.com", false); err != nil {
|
|
||||||
t.Fatal("Error:", err)
|
|
||||||
}
|
|
||||||
if _, err := getEndpointURL("192.168.1.1", false); err != nil {
|
|
||||||
t.Fatal("Error:", err)
|
|
||||||
}
|
|
||||||
if _, err := getEndpointURL("13333.123123.-", false); err == nil {
|
|
||||||
t.Fatal("Error")
|
|
||||||
}
|
|
||||||
if _, err := getEndpointURL("s3.aamzza.-", false); err == nil {
|
|
||||||
t.Fatal("Error")
|
|
||||||
}
|
|
||||||
if _, err := getEndpointURL("s3.amazonaws.com:443", false); err == nil {
|
|
||||||
t.Fatal("Error")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Tests valid ip address.
|
|
||||||
func TestValidIPAddr(t *testing.T) {
|
|
||||||
type validIP struct {
|
|
||||||
ip string
|
|
||||||
valid bool
|
|
||||||
}
|
|
||||||
|
|
||||||
want := []validIP{
|
|
||||||
{
|
|
||||||
ip: "192.168.1.1",
|
|
||||||
valid: true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
ip: "192.1.8",
|
|
||||||
valid: false,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
ip: "..192.",
|
|
||||||
valid: false,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
ip: "192.168.1.1.1",
|
|
||||||
valid: false,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
for _, w := range want {
|
|
||||||
valid := isValidIP(w.ip)
|
|
||||||
if valid != w.valid {
|
|
||||||
t.Fatal("Error")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Tests valid endpoint domain.
|
|
||||||
func TestValidEndpointDomain(t *testing.T) {
|
|
||||||
type validEndpoint struct {
|
|
||||||
endpointDomain string
|
|
||||||
valid bool
|
|
||||||
}
|
|
||||||
|
|
||||||
want := []validEndpoint{
|
|
||||||
{
|
|
||||||
endpointDomain: "s3.amazonaws.com",
|
|
||||||
valid: true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
endpointDomain: "s3.amazonaws.com_",
|
|
||||||
valid: false,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
endpointDomain: "%$$$",
|
|
||||||
valid: false,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
endpointDomain: "s3.amz.test.com",
|
|
||||||
valid: true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
endpointDomain: "s3.%%",
|
|
||||||
valid: false,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
endpointDomain: "localhost",
|
|
||||||
valid: true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
endpointDomain: "-localhost",
|
|
||||||
valid: false,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
endpointDomain: "",
|
|
||||||
valid: false,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
endpointDomain: "\n \t",
|
|
||||||
valid: false,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
endpointDomain: " ",
|
|
||||||
valid: false,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
for _, w := range want {
|
|
||||||
valid := isValidDomain(w.endpointDomain)
|
|
||||||
if valid != w.valid {
|
|
||||||
t.Fatal("Error:", w.endpointDomain)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Tests valid endpoint url.
|
|
||||||
func TestValidEndpointURL(t *testing.T) {
|
|
||||||
type validURL struct {
|
|
||||||
url string
|
|
||||||
valid bool
|
|
||||||
}
|
|
||||||
want := []validURL{
|
|
||||||
{
|
|
||||||
url: "https://s3.amazonaws.com",
|
|
||||||
valid: true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
url: "https://s3.amazonaws.com/bucket/object",
|
|
||||||
valid: false,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
url: "192.168.1.1",
|
|
||||||
valid: false,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
for _, w := range want {
|
|
||||||
u, err := url.Parse(w.url)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal("Error:", err)
|
|
||||||
}
|
|
||||||
valid := false
|
|
||||||
if err := isValidEndpointURL(u); err == nil {
|
|
||||||
valid = true
|
|
||||||
}
|
|
||||||
if valid != w.valid {
|
|
||||||
t.Fatal("Error")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -20,7 +20,7 @@ import (
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
"path/filepath"
|
"path"
|
||||||
"sync"
|
"sync"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -90,7 +90,20 @@ func (c Client) getBucketLocation(bucketName string) (string, error) {
|
||||||
}
|
}
|
||||||
if resp != nil {
|
if resp != nil {
|
||||||
if resp.StatusCode != http.StatusOK {
|
if resp.StatusCode != http.StatusOK {
|
||||||
return "", httpRespToErrorResponse(resp, bucketName, "")
|
err = httpRespToErrorResponse(resp, bucketName, "")
|
||||||
|
errResp := ToErrorResponse(err)
|
||||||
|
// AccessDenied without a signature mismatch code,
|
||||||
|
// usually means that the bucket policy has certain
|
||||||
|
// restrictions where some API operations are not
|
||||||
|
// allowed. Handle this case so that top level callers can
|
||||||
|
// interpret this easily and fall back if needed to a
|
||||||
|
// lower functionality call. Read each individual API
|
||||||
|
// specific code for such fallbacks.
|
||||||
|
if errResp.Code == "AccessDenied" && errResp.Message == "Access Denied" {
|
||||||
|
// In this case return as "us-east-1" and let the call fail.
|
||||||
|
return "us-east-1", nil
|
||||||
|
}
|
||||||
|
return "", err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -127,7 +140,7 @@ func (c Client) getBucketLocationRequest(bucketName string) (*http.Request, erro
|
||||||
|
|
||||||
// Set get bucket location always as path style.
|
// Set get bucket location always as path style.
|
||||||
targetURL := c.endpointURL
|
targetURL := c.endpointURL
|
||||||
targetURL.Path = filepath.Join(bucketName, "") + "/"
|
targetURL.Path = path.Join(bucketName, "") + "/"
|
||||||
targetURL.RawQuery = urlValues.Encode()
|
targetURL.RawQuery = urlValues.Encode()
|
||||||
|
|
||||||
// Get a new HTTP request for the method.
|
// Get a new HTTP request for the method.
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
|
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
|
||||||
*
|
*
|
||||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
* you may not use this file except in compliance with the License.
|
* you may not use this file except in compliance with the License.
|
||||||
|
@ -27,6 +27,22 @@ type hookReader struct {
|
||||||
hook io.Reader
|
hook io.Reader
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Seek implements io.Seeker. Seeks source first, and if necessary
|
||||||
|
// seeks hook if Seek method is appropriately found.
|
||||||
|
func (hr *hookReader) Seek(offset int64, whence int) (n int64, err error) {
|
||||||
|
// Verify for source has embedded Seeker, use it.
|
||||||
|
sourceSeeker, ok := hr.source.(io.Seeker)
|
||||||
|
if ok {
|
||||||
|
return sourceSeeker.Seek(offset, whence)
|
||||||
|
}
|
||||||
|
// Verify if hook has embedded Seeker, use it.
|
||||||
|
hookSeeker, ok := hr.hook.(io.Seeker)
|
||||||
|
if ok {
|
||||||
|
return hookSeeker.Seek(offset, whence)
|
||||||
|
}
|
||||||
|
return n, nil
|
||||||
|
}
|
||||||
|
|
||||||
// Read implements io.Reader. Always reads from the source, the return
|
// Read implements io.Reader. Always reads from the source, the return
|
||||||
// value 'n' number of bytes are reported through the hook. Returns
|
// value 'n' number of bytes are reported through the hook. Returns
|
||||||
// error for all non io.EOF conditions.
|
// error for all non io.EOF conditions.
|
||||||
|
@ -44,7 +60,7 @@ func (hr *hookReader) Read(b []byte) (n int, err error) {
|
||||||
return n, err
|
return n, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// newHook returns a io.Reader which implements hookReader that
|
// newHook returns a io.ReadSeeker which implements hookReader that
|
||||||
// reports the data read from the source to the hook.
|
// reports the data read from the source to the hook.
|
||||||
func newHook(source, hook io.Reader) io.Reader {
|
func newHook(source, hook io.Reader) io.Reader {
|
||||||
if hook == nil {
|
if hook == nil {
|
||||||
|
|
|
@ -0,0 +1,95 @@
|
||||||
|
/*
|
||||||
|
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package minio
|
||||||
|
|
||||||
|
import (
|
||||||
|
"math/rand"
|
||||||
|
"net"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// MaxRetry is the maximum number of retries before stopping.
|
||||||
|
var MaxRetry = 5
|
||||||
|
|
||||||
|
// MaxJitter will randomize over the full exponential backoff time
|
||||||
|
const MaxJitter = 1.0
|
||||||
|
|
||||||
|
// NoJitter disables the use of jitter for randomizing the exponential backoff time
|
||||||
|
const NoJitter = 0.0
|
||||||
|
|
||||||
|
// newRetryTimer creates a timer with exponentially increasing delays
|
||||||
|
// until the maximum retry attempts are reached.
|
||||||
|
func newRetryTimer(maxRetry int, unit time.Duration, cap time.Duration, jitter float64) <-chan int {
|
||||||
|
attemptCh := make(chan int)
|
||||||
|
|
||||||
|
// Seed random function with current unix nano time.
|
||||||
|
rand.Seed(time.Now().UTC().UnixNano())
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
defer close(attemptCh)
|
||||||
|
for i := 0; i < maxRetry; i++ {
|
||||||
|
attemptCh <- i + 1 // Attempts start from 1.
|
||||||
|
// Grow the interval at an exponential rate,
|
||||||
|
// starting at unit and capping at cap
|
||||||
|
time.Sleep(exponentialBackoffWait(unit, i, cap, jitter))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
return attemptCh
|
||||||
|
}
|
||||||
|
|
||||||
|
// isNetErrorRetryable - is network error retryable.
|
||||||
|
func isNetErrorRetryable(err error) bool {
|
||||||
|
switch err.(type) {
|
||||||
|
case *net.DNSError, *net.OpError, net.UnknownNetworkError:
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// computes the exponential backoff duration according to
|
||||||
|
// https://www.awsarchitectureblog.com/2015/03/backoff.html
|
||||||
|
func exponentialBackoffWait(base time.Duration, attempt int, cap time.Duration, jitter float64) time.Duration {
|
||||||
|
// normalize jitter to the range [0, 1.0]
|
||||||
|
if jitter < NoJitter {
|
||||||
|
jitter = NoJitter
|
||||||
|
}
|
||||||
|
if jitter > MaxJitter {
|
||||||
|
jitter = MaxJitter
|
||||||
|
}
|
||||||
|
//sleep = random_between(0, min(cap, base * 2 ** attempt))
|
||||||
|
sleep := base * time.Duration(1<<uint(attempt))
|
||||||
|
if sleep > cap {
|
||||||
|
sleep = cap
|
||||||
|
}
|
||||||
|
if jitter != NoJitter {
|
||||||
|
sleep -= time.Duration(rand.Float64() * float64(sleep) * jitter)
|
||||||
|
}
|
||||||
|
return sleep
|
||||||
|
}
|
||||||
|
|
||||||
|
// isS3CodeRetryable - is s3 error code retryable.
|
||||||
|
func isS3CodeRetryable(s3Code string) bool {
|
||||||
|
switch s3Code {
|
||||||
|
case "RequestError", "RequestTimeout", "Throttling", "ThrottlingException":
|
||||||
|
fallthrough
|
||||||
|
case "RequestLimitExceeded", "RequestThrottled", "InternalError":
|
||||||
|
fallthrough
|
||||||
|
case "ExpiredToken", "ExpiredTokenException":
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
|
@ -89,8 +89,8 @@
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"path": "github.com/minio/minio-go",
|
"path": "github.com/minio/minio-go",
|
||||||
"revision": "280f16a52008d3ebba1bd64398b9b082e6738386",
|
"revision": "a4c6c439feb53e1aad083f0a3f0083a047092c17",
|
||||||
"revisionTime": "2016-02-07T03:45:25-08:00"
|
"revisionTime": "2016-03-10T10:12:11-08:00"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"path": "github.com/minio/miniobrowser",
|
"path": "github.com/minio/miniobrowser",
|
||||||
|
|
Loading…
Reference in New Issue