Update minio-go dependency (#4551)

This updates dependency for

 - AWS S3 backend.
 - pkg/madmin

```
- Relax isValidBucketName to allow reading existing buckets. (#708) (3 minutes ago) <Harshavardhana>
- For GCS the size limit of S3 is not useful. (#711) (3 days ago) <Harshavardhana>
- s3utils: Support AWS S3 US GovCloud endpoint. (#701) (3 days ago) <Harshavardhana>
- api: Always strip 80/443 port from host (#709) (3 days ago) <Anis Elleuch>
- Redact signature strings properly. (#706) (4 days ago) <Harshavardhana>
- api: Single putObject can use temporary file always. (#703) (6 days ago) <Harshavardhana>
- Spelling fix (#704) (7 days ago) <Jacob Taylor>
- api/encrypt: Get() on encrypted object should be a reader. (#699) (2 weeks ago) <Harshavardhana>
- get: Fix reading an object if its size is unknown (#694) (3 weeks ago) <Anis Elleuch>
- fixes #696 by updating the examples for put-encrypted-object and get-encrypted-object (#697) (3 weeks ago) <Tejay Cardon>
- fix InvalidAccessKeyId error according to amazon documentation (#692) (4 weeks ago) <samkevich>
- Add AWS S3 SSE-C example. (#689) (4 weeks ago) <Harshavardhana>
- According to RFC7232 Etag should be in quotes for If-Match. (#688) (5 weeks ago) <Harshavardhana>
- api: getReaderSize() should honor seeked file descriptors. (#681) (5 weeks ago) <Harshavardhana>
- tests: Use bytes.Repeat() when generating big data (#683) (5 weeks ago) <Anis Elleuch>
- api: Failed call retry with region only when http.StatusBadRequest. (#678) (5 weeks ago) <Harshavardhana>
- api: Add NewWithCredentials() (#646) (5 weeks ago) <Harshavardhana>
```
This commit is contained in:
Harshavardhana
2017-06-19 16:02:35 -07:00
committed by GitHub
parent 94241cd153
commit f5b4b0765a
55 changed files with 5643 additions and 523 deletions

View File

@@ -161,6 +161,9 @@ func httpRespToErrorResponse(resp *http.Response, bucketName, objectName string)
if errResp.Region == "" {
errResp.Region = resp.Header.Get("x-amz-bucket-region")
}
if errResp.Code == "InvalidRegion" && errResp.Region != "" {
errResp.Message = fmt.Sprintf("Region does not match, expecting region '%s'.", errResp.Region)
}
// Save headers returned in the API XML error
errResp.Headers = resp.Header

View File

@@ -20,15 +20,17 @@ import (
"io"
"os"
"path/filepath"
"github.com/minio/minio-go/pkg/s3utils"
)
// FGetObject - download contents of an object to a local file.
func (c Client) FGetObject(bucketName, objectName, filePath string) error {
// Input validation.
if err := isValidBucketName(bucketName); err != nil {
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return err
}
if err := isValidObjectName(objectName); err != nil {
if err := s3utils.CheckValidObjectName(objectName); err != nil {
return err
}

View File

@@ -26,11 +26,12 @@ import (
"time"
"github.com/minio/minio-go/pkg/encrypt"
"github.com/minio/minio-go/pkg/s3utils"
)
// GetEncryptedObject deciphers and streams data stored in the server after applying a specifed encryption materiels
func (c Client) GetEncryptedObject(bucketName, objectName string, encryptMaterials encrypt.Materials) (io.Reader, error) {
// GetEncryptedObject deciphers and streams data stored in the server after applying a specifed encryption materials,
// returned stream should be closed by the caller.
func (c Client) GetEncryptedObject(bucketName, objectName string, encryptMaterials encrypt.Materials) (io.ReadCloser, error) {
if encryptMaterials == nil {
return nil, ErrInvalidArgument("Unable to recognize empty encryption properties")
}
@@ -57,10 +58,10 @@ func (c Client) GetEncryptedObject(bucketName, objectName string, encryptMateria
// GetObject - returns an seekable, readable object.
func (c Client) GetObject(bucketName, objectName string) (*Object, error) {
// Input validation.
if err := isValidBucketName(bucketName); err != nil {
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return nil, err
}
if err := isValidObjectName(objectName); err != nil {
if err := s3utils.CheckValidObjectName(objectName); err != nil {
return nil, err
}
@@ -328,14 +329,14 @@ func (o *Object) setOffset(bytesRead int64) error {
// Update the currentOffset.
o.currOffset += bytesRead
if o.currOffset >= o.objectInfo.Size {
if o.objectInfo.Size > -1 && o.currOffset >= o.objectInfo.Size {
return io.EOF
}
return nil
}
// Read reads up to len(b) bytes into b. It returns the number of
// bytes read (0 <= n <= len(p)) and any error encountered. Returns
// bytes read (0 <= n <= len(b)) and any error encountered. Returns
// io.EOF upon end of file.
func (o *Object) Read(b []byte) (n int, err error) {
if o == nil {
@@ -442,7 +443,7 @@ func (o *Object) ReadAt(b []byte, offset int64) (n int, err error) {
if o.objectInfoSet {
// If offset is negative than we return io.EOF.
// If offset is greater than or equal to object size we return io.EOF.
if offset >= o.objectInfo.Size || offset < 0 {
if (o.objectInfo.Size > -1 && offset >= o.objectInfo.Size) || offset < 0 {
return 0, io.EOF
}
}
@@ -542,16 +543,20 @@ func (o *Object) Seek(offset int64, whence int) (n int64, err error) {
default:
return 0, ErrInvalidArgument(fmt.Sprintf("Invalid whence %d", whence))
case 0:
if offset > o.objectInfo.Size {
if o.objectInfo.Size > -1 && offset > o.objectInfo.Size {
return 0, io.EOF
}
o.currOffset = offset
case 1:
if o.currOffset+offset > o.objectInfo.Size {
if o.objectInfo.Size > -1 && o.currOffset+offset > o.objectInfo.Size {
return 0, io.EOF
}
o.currOffset += offset
case 2:
// If we don't know the object size return an error for io.SeekEnd
if o.objectInfo.Size < 0 {
return 0, ErrInvalidArgument("Whence END is not supported when the object size is unknown")
}
// Seeking to positive offset is valid for whence '2', but
// since we are backing a Reader we have reached 'EOF' if
// offset is positive.
@@ -623,10 +628,10 @@ func newObject(reqCh chan<- getRequest, resCh <-chan getResponse, doneCh chan<-
// go to http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35.
func (c Client) getObject(bucketName, objectName string, reqHeaders RequestHeaders) (io.ReadCloser, ObjectInfo, error) {
// Validate input arguments.
if err := isValidBucketName(bucketName); err != nil {
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return nil, ObjectInfo{}, err
}
if err := isValidObjectName(objectName); err != nil {
if err := s3utils.CheckValidObjectName(objectName); err != nil {
return nil, ObjectInfo{}, err
}

View File

@@ -23,15 +23,16 @@ import (
"net/url"
"github.com/minio/minio-go/pkg/policy"
"github.com/minio/minio-go/pkg/s3utils"
)
// GetBucketPolicy - get bucket policy at a given path.
func (c Client) GetBucketPolicy(bucketName, objectPrefix string) (bucketPolicy policy.BucketPolicy, err error) {
// Input validation.
if err := isValidBucketName(bucketName); err != nil {
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return policy.BucketPolicyNone, err
}
if err := isValidObjectPrefix(objectPrefix); err != nil {
if err := s3utils.CheckValidObjectNamePrefix(objectPrefix); err != nil {
return policy.BucketPolicyNone, err
}
policyInfo, err := c.getBucketPolicy(bucketName)
@@ -48,10 +49,10 @@ func (c Client) GetBucketPolicy(bucketName, objectPrefix string) (bucketPolicy p
// ListBucketPolicies - list all policies for a given prefix and all its children.
func (c Client) ListBucketPolicies(bucketName, objectPrefix string) (bucketPolicies map[string]policy.BucketPolicy, err error) {
// Input validation.
if err := isValidBucketName(bucketName); err != nil {
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return map[string]policy.BucketPolicy{}, err
}
if err := isValidObjectPrefix(objectPrefix); err != nil {
if err := s3utils.CheckValidObjectNamePrefix(objectPrefix); err != nil {
return map[string]policy.BucketPolicy{}, err
}
policyInfo, err := c.getBucketPolicy(bucketName)

View File

@@ -21,6 +21,8 @@ import (
"net/http"
"net/url"
"strings"
"github.com/minio/minio-go/pkg/s3utils"
)
// ListBuckets list all buckets owned by this authenticated user.
@@ -69,7 +71,7 @@ func (c Client) ListBuckets() ([]BucketInfo, error) {
// // Create a done channel.
// doneCh := make(chan struct{})
// defer close(doneCh)
// // Recurively list all objects in 'mytestbucket'
// // Recursively list all objects in 'mytestbucket'
// recursive := true
// for message := range api.ListObjectsV2("mytestbucket", "starthere", recursive, doneCh) {
// fmt.Println(message)
@@ -87,7 +89,7 @@ func (c Client) ListObjectsV2(bucketName, objectPrefix string, recursive bool, d
// Return object owner information by default
fetchOwner := true
// Validate bucket name.
if err := isValidBucketName(bucketName); err != nil {
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
defer close(objectStatCh)
objectStatCh <- ObjectInfo{
Err: err,
@@ -95,7 +97,7 @@ func (c Client) ListObjectsV2(bucketName, objectPrefix string, recursive bool, d
return objectStatCh
}
// Validate incoming object prefix.
if err := isValidObjectPrefix(objectPrefix); err != nil {
if err := s3utils.CheckValidObjectNamePrefix(objectPrefix); err != nil {
defer close(objectStatCh)
objectStatCh <- ObjectInfo{
Err: err,
@@ -170,11 +172,11 @@ func (c Client) ListObjectsV2(bucketName, objectPrefix string, recursive bool, d
// ?max-keys - Sets the maximum number of keys returned in the response body.
func (c Client) listObjectsV2Query(bucketName, objectPrefix, continuationToken string, fetchOwner bool, delimiter string, maxkeys int) (ListBucketV2Result, error) {
// Validate bucket name.
if err := isValidBucketName(bucketName); err != nil {
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return ListBucketV2Result{}, err
}
// Validate object prefix.
if err := isValidObjectPrefix(objectPrefix); err != nil {
if err := s3utils.CheckValidObjectNamePrefix(objectPrefix); err != nil {
return ListBucketV2Result{}, err
}
// Get resources properly escaped and lined up before
@@ -266,7 +268,7 @@ func (c Client) ListObjects(bucketName, objectPrefix string, recursive bool, don
delimiter = ""
}
// Validate bucket name.
if err := isValidBucketName(bucketName); err != nil {
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
defer close(objectStatCh)
objectStatCh <- ObjectInfo{
Err: err,
@@ -274,7 +276,7 @@ func (c Client) ListObjects(bucketName, objectPrefix string, recursive bool, don
return objectStatCh
}
// Validate incoming object prefix.
if err := isValidObjectPrefix(objectPrefix); err != nil {
if err := s3utils.CheckValidObjectNamePrefix(objectPrefix); err != nil {
defer close(objectStatCh)
objectStatCh <- ObjectInfo{
Err: err,
@@ -350,11 +352,11 @@ func (c Client) ListObjects(bucketName, objectPrefix string, recursive bool, don
// ?max-keys - Sets the maximum number of keys returned in the response body.
func (c Client) listObjectsQuery(bucketName, objectPrefix, objectMarker, delimiter string, maxkeys int) (ListBucketResult, error) {
// Validate bucket name.
if err := isValidBucketName(bucketName); err != nil {
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return ListBucketResult{}, err
}
// Validate object prefix.
if err := isValidObjectPrefix(objectPrefix); err != nil {
if err := s3utils.CheckValidObjectNamePrefix(objectPrefix); err != nil {
return ListBucketResult{}, err
}
// Get resources properly escaped and lined up before
@@ -442,7 +444,7 @@ func (c Client) listIncompleteUploads(bucketName, objectPrefix string, recursive
delimiter = ""
}
// Validate bucket name.
if err := isValidBucketName(bucketName); err != nil {
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
defer close(objectMultipartStatCh)
objectMultipartStatCh <- ObjectMultipartInfo{
Err: err,
@@ -450,7 +452,7 @@ func (c Client) listIncompleteUploads(bucketName, objectPrefix string, recursive
return objectMultipartStatCh
}
// Validate incoming object prefix.
if err := isValidObjectPrefix(objectPrefix); err != nil {
if err := s3utils.CheckValidObjectNamePrefix(objectPrefix); err != nil {
defer close(objectMultipartStatCh)
objectMultipartStatCh <- ObjectMultipartInfo{
Err: err,

View File

@@ -30,7 +30,7 @@ import (
// GetBucketNotification - get bucket notification at a given path.
func (c Client) GetBucketNotification(bucketName string) (bucketNotification BucketNotification, err error) {
// Input validation.
if err := isValidBucketName(bucketName); err != nil {
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return BucketNotification{}, err
}
notification, err := c.getBucketNotification(bucketName)
@@ -140,7 +140,7 @@ func (c Client) ListenBucketNotification(bucketName, prefix, suffix string, even
defer close(notificationInfoCh)
// Validate the bucket name.
if err := isValidBucketName(bucketName); err != nil {
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
notificationInfoCh <- NotificationInfo{
Err: err,
}

View File

@@ -42,10 +42,10 @@ func (c Client) presignURL(method string, bucketName string, objectName string,
if method == "" {
return nil, ErrInvalidArgument("method cannot be empty.")
}
if err := isValidBucketName(bucketName); err != nil {
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return nil, err
}
if err := isValidObjectName(objectName); err != nil {
if err := s3utils.CheckValidObjectName(objectName); err != nil {
return nil, err
}
if err := isValidExpiry(expires); err != nil {
@@ -122,21 +122,38 @@ func (c Client) PresignedPostPolicy(p *PostPolicy) (u *url.URL, formData map[str
return nil, nil, err
}
// Get credentials from the configured credentials provider.
credValues, err := c.credsProvider.Get()
if err != nil {
return nil, nil, err
}
var (
signerType = credValues.SignerType
sessionToken = credValues.SessionToken
accessKeyID = credValues.AccessKeyID
secretAccessKey = credValues.SecretAccessKey
)
if signerType.IsAnonymous() {
return nil, nil, ErrInvalidArgument("Presigned operations are not supported for anonymous credentials")
}
// Keep time.
t := time.Now().UTC()
// For signature version '2' handle here.
if c.signature.isV2() {
if signerType.IsV2() {
policyBase64 := p.base64()
p.formData["policy"] = policyBase64
// For Google endpoint set this value to be 'GoogleAccessId'.
if s3utils.IsGoogleEndpoint(c.endpointURL) {
p.formData["GoogleAccessId"] = c.accessKeyID
p.formData["GoogleAccessId"] = accessKeyID
} else {
// For all other endpoints set this value to be 'AWSAccessKeyId'.
p.formData["AWSAccessKeyId"] = c.accessKeyID
p.formData["AWSAccessKeyId"] = accessKeyID
}
// Sign the policy.
p.formData["signature"] = s3signer.PostPresignSignatureV2(policyBase64, c.secretAccessKey)
p.formData["signature"] = s3signer.PostPresignSignatureV2(policyBase64, secretAccessKey)
return u, p.formData, nil
}
@@ -159,7 +176,7 @@ func (c Client) PresignedPostPolicy(p *PostPolicy) (u *url.URL, formData map[str
}
// Add a credential policy.
credential := s3signer.GetCredential(c.accessKeyID, location, t)
credential := s3signer.GetCredential(accessKeyID, location, t)
if err = p.addNewPolicy(policyCondition{
matchType: "eq",
condition: "$x-amz-credential",
@@ -168,13 +185,27 @@ func (c Client) PresignedPostPolicy(p *PostPolicy) (u *url.URL, formData map[str
return nil, nil, err
}
if sessionToken != "" {
if err = p.addNewPolicy(policyCondition{
matchType: "eq",
condition: "$x-amz-security-token",
value: sessionToken,
}); err != nil {
return nil, nil, err
}
}
// Get base64 encoded policy.
policyBase64 := p.base64()
// Fill in the form data.
p.formData["policy"] = policyBase64
p.formData["x-amz-algorithm"] = signV4Algorithm
p.formData["x-amz-credential"] = credential
p.formData["x-amz-date"] = t.Format(iso8601DateFormat)
p.formData["x-amz-signature"] = s3signer.PostPresignSignatureV4(policyBase64, t, c.secretAccessKey, location)
if sessionToken != "" {
p.formData["x-amz-security-token"] = sessionToken
}
p.formData["x-amz-signature"] = s3signer.PostPresignSignatureV4(policyBase64, t, secretAccessKey, location)
return u, p.formData, nil
}

View File

@@ -1,5 +1,6 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
* Minio Go Library for Amazon S3 Compatible Cloud Storage
* (C) 2015, 2016, 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -18,18 +19,14 @@ package minio
import (
"bytes"
"encoding/base64"
"encoding/hex"
"encoding/json"
"encoding/xml"
"fmt"
"io/ioutil"
"net/http"
"net/url"
"path"
"github.com/minio/minio-go/pkg/policy"
"github.com/minio/minio-go/pkg/s3signer"
"github.com/minio/minio-go/pkg/s3utils"
)
/// Bucket operations
@@ -50,95 +47,23 @@ func (c Client) MakeBucket(bucketName string, location string) (err error) {
}()
// Validate the input arguments.
if err := isValidBucketName(bucketName); err != nil {
if err := s3utils.CheckValidBucketNameStrict(bucketName); err != nil {
return err
}
// If location is empty, treat is a default region 'us-east-1'.
if location == "" {
location = "us-east-1"
}
// Try creating bucket with the provided region, in case of
// invalid region error let's guess the appropriate region
// from S3 API headers
// Create a done channel to control 'newRetryTimer' go routine.
doneCh := make(chan struct{}, 1)
// Indicate to our routine to exit cleanly upon return.
defer close(doneCh)
// Blank indentifier is kept here on purpose since 'range' without
// blank identifiers is only supported since go1.4
// https://golang.org/doc/go1.4#forrange.
for _ = range c.newRetryTimer(MaxRetry, DefaultRetryUnit, DefaultRetryCap, MaxJitter, doneCh) {
// Initialize the makeBucket request.
req, err := c.makeBucketRequest(bucketName, location)
if err != nil {
return err
// For custom region clients, default
// to custom region instead not 'us-east-1'.
if c.region != "" {
location = c.region
}
// Execute make bucket request.
resp, err := c.do(req)
defer closeResponse(resp)
if err != nil {
return err
}
if resp.StatusCode != http.StatusOK {
err := httpRespToErrorResponse(resp, bucketName, "")
errResp := ToErrorResponse(err)
if errResp.Code == "InvalidRegion" && errResp.Region != "" {
// Fetch bucket region found in headers
// of S3 error response, attempt bucket
// create again.
location = errResp.Region
continue
}
// Nothing to retry, fail.
return err
}
// Control reaches here when bucket create was successful,
// break out.
break
}
// Success.
return nil
}
// Low level wrapper API For makeBucketRequest.
func (c Client) makeBucketRequest(bucketName string, location string) (*http.Request, error) {
// Validate input arguments.
if err := isValidBucketName(bucketName); err != nil {
return nil, err
}
// In case of Amazon S3. The make bucket issued on
// already existing bucket would fail with
// 'AuthorizationMalformed' error if virtual style is
// used. So we default to 'path style' as that is the
// preferred method here. The final location of the
// 'bucket' is provided through XML LocationConstraint
// data with the request.
targetURL := c.endpointURL
targetURL.Path = path.Join(bucketName, "") + "/"
// get a new HTTP request for the method.
req, err := http.NewRequest("PUT", targetURL.String(), nil)
if err != nil {
return nil, err
}
// set UserAgent for the request.
c.setUserAgent(req)
// set sha256 sum for signature calculation only with
// signature version '4'.
if c.signature.isV4() {
req.Header.Set("X-Amz-Content-Sha256", hex.EncodeToString(sum256([]byte{})))
// PUT bucket request metadata.
reqMetadata := requestMetadata{
bucketName: bucketName,
bucketLocation: location,
}
// If location is not 'us-east-1' create bucket location config.
@@ -148,30 +73,29 @@ func (c Client) makeBucketRequest(bucketName string, location string) (*http.Req
var createBucketConfigBytes []byte
createBucketConfigBytes, err = xml.Marshal(createBucketConfig)
if err != nil {
return nil, err
return err
}
createBucketConfigBuffer := bytes.NewBuffer(createBucketConfigBytes)
req.Body = ioutil.NopCloser(createBucketConfigBuffer)
req.ContentLength = int64(len(createBucketConfigBytes))
// Set content-md5.
req.Header.Set("Content-Md5", base64.StdEncoding.EncodeToString(sumMD5(createBucketConfigBytes)))
if c.signature.isV4() {
// Set sha256.
req.Header.Set("X-Amz-Content-Sha256", hex.EncodeToString(sum256(createBucketConfigBytes)))
reqMetadata.contentMD5Bytes = sumMD5(createBucketConfigBytes)
reqMetadata.contentSHA256Bytes = sum256(createBucketConfigBytes)
reqMetadata.contentBody = bytes.NewReader(createBucketConfigBytes)
reqMetadata.contentLength = int64(len(createBucketConfigBytes))
}
// Execute PUT to create a new bucket.
resp, err := c.executeMethod("PUT", reqMetadata)
defer closeResponse(resp)
if err != nil {
return err
}
if resp != nil {
if resp.StatusCode != http.StatusOK {
return httpRespToErrorResponse(resp, bucketName, "")
}
}
// Sign the request.
if c.signature.isV4() {
// Signature calculated for MakeBucket request should be for 'us-east-1',
// regardless of the bucket's location constraint.
req = s3signer.SignV4(*req, c.accessKeyID, c.secretAccessKey, "us-east-1")
} else if c.signature.isV2() {
req = s3signer.SignV2(*req, c.accessKeyID, c.secretAccessKey)
}
// Return signed request.
return req, nil
// Success.
return nil
}
// SetBucketPolicy set the access permissions on an existing bucket.
@@ -184,10 +108,10 @@ func (c Client) makeBucketRequest(bucketName string, location string) (*http.Req
// writeonly - anonymous put/delete access to a given object prefix.
func (c Client) SetBucketPolicy(bucketName string, objectPrefix string, bucketPolicy policy.BucketPolicy) error {
// Input validation.
if err := isValidBucketName(bucketName); err != nil {
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return err
}
if err := isValidObjectPrefix(objectPrefix); err != nil {
if err := s3utils.CheckValidObjectNamePrefix(objectPrefix); err != nil {
return err
}
@@ -216,7 +140,7 @@ func (c Client) SetBucketPolicy(bucketName string, objectPrefix string, bucketPo
// Saves a new bucket policy.
func (c Client) putBucketPolicy(bucketName string, policyInfo policy.BucketAccessPolicy) error {
// Input validation.
if err := isValidBucketName(bucketName); err != nil {
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return err
}
@@ -262,7 +186,7 @@ func (c Client) putBucketPolicy(bucketName string, policyInfo policy.BucketAcces
// Removes all policies on a bucket.
func (c Client) removeBucketPolicy(bucketName string) error {
// Input validation.
if err := isValidBucketName(bucketName); err != nil {
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return err
}
// Get resources properly escaped and lined up before
@@ -286,7 +210,7 @@ func (c Client) removeBucketPolicy(bucketName string) error {
// SetBucketNotification saves a new bucket notification.
func (c Client) SetBucketNotification(bucketName string, bucketNotification BucketNotification) error {
// Input validation.
if err := isValidBucketName(bucketName); err != nil {
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return err
}

View File

@@ -23,6 +23,8 @@ import (
"io/ioutil"
"math"
"os"
"github.com/minio/minio-go/pkg/s3utils"
)
// Verify if reader is *os.File
@@ -168,10 +170,10 @@ func hashCopyN(hashAlgorithms map[string]hash.Hash, hashSums map[string][]byte,
// or initiate a new request to fetch a new upload id.
func (c Client) newUploadID(bucketName, objectName string, metaData map[string][]string) (uploadID string, err error) {
// Input validation.
if err := isValidBucketName(bucketName); err != nil {
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return "", err
}
if err := isValidObjectName(objectName); err != nil {
if err := s3utils.CheckValidObjectName(objectName); err != nil {
return "", err
}

View File

@@ -25,10 +25,10 @@ import (
// CopyObject - copy a source object into a new object with the provided name in the provided bucket
func (c Client) CopyObject(bucketName string, objectName string, objectSource string, cpCond CopyConditions) error {
// Input validation.
if err := isValidBucketName(bucketName); err != nil {
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return err
}
if err := isValidObjectName(objectName); err != nil {
if err := s3utils.CheckValidObjectName(objectName); err != nil {
return err
}
if objectSource == "" {

View File

@@ -35,10 +35,10 @@ import (
// FPutObject - Create an object in a bucket, with contents from file at filePath.
func (c Client) FPutObject(bucketName, objectName, filePath, contentType string) (n int64, err error) {
// Input validation.
if err := isValidBucketName(bucketName); err != nil {
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return 0, err
}
if err := isValidObjectName(objectName); err != nil {
if err := s3utils.CheckValidObjectName(objectName); err != nil {
return 0, err
}
@@ -77,17 +77,8 @@ func (c Client) FPutObject(bucketName, objectName, filePath, contentType string)
objMetadata["Content-Type"] = []string{contentType}
// NOTE: Google Cloud Storage multipart Put is not compatible with Amazon S3 APIs.
// Current implementation will only upload a maximum of 5GiB to Google Cloud Storage servers.
if s3utils.IsGoogleEndpoint(c.endpointURL) {
if fileSize > int64(maxSinglePutObjectSize) {
return 0, ErrorResponse{
Code: "NotImplemented",
Message: fmt.Sprintf("Invalid Content-Length %d for file uploads to Google Cloud Storage.", fileSize),
Key: objectName,
BucketName: bucketName,
}
}
// Do not compute MD5 for Google Cloud Storage. Uploads up to 5GiB in size.
// Do not compute MD5 for Google Cloud Storage.
return c.putObjectNoChecksum(bucketName, objectName, fileReader, fileSize, objMetadata, nil)
}
@@ -125,10 +116,10 @@ func (c Client) FPutObject(bucketName, objectName, filePath, contentType string)
// specific sections and not having to create temporary files.
func (c Client) putObjectMultipartFromFile(bucketName, objectName string, fileReader io.ReaderAt, fileSize int64, metaData map[string][]string, progress io.Reader) (int64, error) {
// Input validation.
if err := isValidBucketName(bucketName); err != nil {
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return 0, err
}
if err := isValidObjectName(objectName); err != nil {
if err := s3utils.CheckValidObjectName(objectName); err != nil {
return 0, err
}
@@ -182,7 +173,7 @@ func (c Client) putObjectMultipartFromFile(bucketName, objectName string, fileRe
hashAlgos := make(map[string]hash.Hash)
hashSums := make(map[string][]byte)
hashAlgos["md5"] = md5.New()
if c.signature.isV4() && !c.secure {
if c.overrideSignerType.IsV4() && !c.secure {
hashAlgos["sha256"] = sha256.New()
}

View File

@@ -32,6 +32,8 @@ import (
"sort"
"strconv"
"strings"
"github.com/minio/minio-go/pkg/s3utils"
)
// Comprehensive put object operation involving multipart resumable uploads.
@@ -74,10 +76,10 @@ func (c Client) putObjectMultipartStreamNoChecksum(bucketName, objectName string
reader io.Reader, size int64, metadata map[string][]string, progress io.Reader) (int64, error) {
// Input validation.
if err := isValidBucketName(bucketName); err != nil {
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return 0, err
}
if err := isValidObjectName(objectName); err != nil {
if err := s3utils.CheckValidObjectName(objectName); err != nil {
return 0, err
}
@@ -176,10 +178,10 @@ func (c Client) putObjectMultipartStreamNoChecksum(bucketName, objectName string
// special case where size is unknown i.e '-1'.
func (c Client) putObjectMultipartStream(bucketName, objectName string, reader io.Reader, size int64, metaData map[string][]string, progress io.Reader) (n int64, err error) {
// Input validation.
if err := isValidBucketName(bucketName); err != nil {
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return 0, err
}
if err := isValidObjectName(objectName); err != nil {
if err := s3utils.CheckValidObjectName(objectName); err != nil {
return 0, err
}
@@ -213,7 +215,7 @@ func (c Client) putObjectMultipartStream(bucketName, objectName string, reader i
hashSums := make(map[string][]byte)
hashAlgos := make(map[string]hash.Hash)
hashAlgos["md5"] = md5.New()
if c.signature.isV4() && !c.secure {
if c.overrideSignerType.IsV4() && !c.secure {
hashAlgos["sha256"] = sha256.New()
}
@@ -305,10 +307,10 @@ func (c Client) putObjectMultipartStream(bucketName, objectName string, reader i
// initiateMultipartUpload - Initiates a multipart upload and returns an upload ID.
func (c Client) initiateMultipartUpload(bucketName, objectName string, metaData map[string][]string) (initiateMultipartUploadResult, error) {
// Input validation.
if err := isValidBucketName(bucketName); err != nil {
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return initiateMultipartUploadResult{}, err
}
if err := isValidObjectName(objectName); err != nil {
if err := s3utils.CheckValidObjectName(objectName); err != nil {
return initiateMultipartUploadResult{}, err
}
@@ -359,10 +361,10 @@ func (c Client) initiateMultipartUpload(bucketName, objectName string, metaData
// uploadPart - Uploads a part in a multipart upload.
func (c Client) uploadPart(bucketName, objectName, uploadID string, reader io.Reader, partNumber int, md5Sum, sha256Sum []byte, size int64) (ObjectPart, error) {
// Input validation.
if err := isValidBucketName(bucketName); err != nil {
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return ObjectPart{}, err
}
if err := isValidObjectName(objectName); err != nil {
if err := s3utils.CheckValidObjectName(objectName); err != nil {
return ObjectPart{}, err
}
if size > maxPartSize {
@@ -419,10 +421,10 @@ func (c Client) uploadPart(bucketName, objectName, uploadID string, reader io.Re
// completeMultipartUpload - Completes a multipart upload by assembling previously uploaded parts.
func (c Client) completeMultipartUpload(bucketName, objectName, uploadID string, complete completeMultipartUpload) (completeMultipartUploadResult, error) {
// Input validation.
if err := isValidBucketName(bucketName); err != nil {
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return completeMultipartUploadResult{}, err
}
if err := isValidObjectName(objectName); err != nil {
if err := s3utils.CheckValidObjectName(objectName); err != nil {
return completeMultipartUploadResult{}, err
}

View File

@@ -20,6 +20,7 @@ import (
"io"
"strings"
"github.com/minio/minio-go/pkg/credentials"
"github.com/minio/minio-go/pkg/encrypt"
"github.com/minio/minio-go/pkg/s3utils"
)
@@ -57,10 +58,10 @@ func (c Client) PutEncryptedObject(bucketName, objectName string, reader io.Read
// PutObjectWithMetadata - with metadata.
func (c Client) PutObjectWithMetadata(bucketName, objectName string, reader io.Reader, metaData map[string][]string, progress io.Reader) (n int64, err error) {
// Input validation.
if err := isValidBucketName(bucketName); err != nil {
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return 0, err
}
if err := isValidObjectName(objectName); err != nil {
if err := s3utils.CheckValidObjectName(objectName); err != nil {
return 0, err
}
if reader == nil {
@@ -82,20 +83,8 @@ func (c Client) PutObjectWithMetadata(bucketName, objectName string, reader io.R
}
// NOTE: Google Cloud Storage does not implement Amazon S3 Compatible multipart PUT.
// So we fall back to single PUT operation with the maximum limit of 5GiB.
if s3utils.IsGoogleEndpoint(c.endpointURL) {
if size <= -1 {
return 0, ErrorResponse{
Code: "NotImplemented",
Message: "Content-Length cannot be negative for file uploads to Google Cloud Storage.",
Key: objectName,
BucketName: bucketName,
}
}
if size > maxSinglePutObjectSize {
return 0, ErrEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName)
}
// Do not compute MD5 for Google Cloud Storage. Uploads up to 5GiB in size.
// Do not compute MD5 for Google Cloud Storage.
return c.putObjectNoChecksum(bucketName, objectName, reader, size, metaData, progress)
}
@@ -103,6 +92,7 @@ func (c Client) PutObjectWithMetadata(bucketName, objectName string, reader io.R
if size < minPartSize && size >= 0 {
return c.putObjectSingle(bucketName, objectName, reader, size, metaData, progress)
}
// For all sizes greater than 5MiB do multipart.
n, err = c.putObjectMultipart(bucketName, objectName, reader, size, metaData, progress)
if err != nil {
@@ -143,8 +133,8 @@ func (c Client) PutObjectStreamingWithProgress(bucketName, objectName string, re
BucketName: bucketName,
}
}
// This method should return error with signature v2 minioClient.
if c.signature.isV2() {
if c.overrideSignerType.IsV2() {
return 0, ErrorResponse{
Code: "NotImplemented",
Message: "AWS streaming signature v4 is not supported with minio client initialized for AWS signature v2",
@@ -173,8 +163,8 @@ func (c Client) PutObjectStreamingWithProgress(bucketName, objectName string, re
return c.putObjectMultipartStream(bucketName, objectName, reader, size, metadata, progress)
}
// Set signature type to streaming signature v4.
c.signature = SignatureV4Streaming
// Set streaming signature.
c.overrideSignerType = credentials.SignatureV4Streaming
if size < minPartSize && size >= 0 {
return c.putObjectNoChecksum(bucketName, objectName, reader, size, metadata, progress)

View File

@@ -25,6 +25,8 @@ import (
"io"
"io/ioutil"
"sort"
"github.com/minio/minio-go/pkg/s3utils"
)
// uploadedPartRes - the response received from a part upload.
@@ -65,10 +67,10 @@ func shouldUploadPartReadAt(objPart ObjectPart, uploadReq uploadPartReq) bool {
// stream after uploading all the contents successfully.
func (c Client) putObjectMultipartFromReadAt(bucketName, objectName string, reader io.ReaderAt, size int64, metaData map[string][]string, progress io.Reader) (n int64, err error) {
// Input validation.
if err := isValidBucketName(bucketName); err != nil {
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return 0, err
}
if err := isValidObjectName(objectName); err != nil {
if err := s3utils.CheckValidObjectName(objectName); err != nil {
return 0, err
}
@@ -146,7 +148,7 @@ func (c Client) putObjectMultipartFromReadAt(bucketName, objectName string, read
hashSums := make(map[string][]byte)
hashAlgos := make(map[string]hash.Hash)
hashAlgos["md5"] = md5.New()
if c.signature.isV4() && !c.secure {
if c.overrideSignerType.IsV4() && !c.secure {
hashAlgos["sha256"] = sha256.New()
}

View File

@@ -17,7 +17,6 @@
package minio
import (
"bytes"
"crypto/md5"
"crypto/sha256"
"hash"
@@ -28,6 +27,8 @@ import (
"reflect"
"runtime"
"strings"
"github.com/minio/minio-go/pkg/s3utils"
)
// toInt - converts go value to its integer representation based
@@ -109,14 +110,24 @@ func getReaderSize(reader io.Reader) (size int64, err error) {
case "|0", "|1":
return
}
size = st.Size()
var pos int64
pos, err = v.Seek(0, 1) // SeekCurrent.
if err != nil {
return -1, err
}
size = st.Size() - pos
case *Object:
var st ObjectInfo
st, err = v.Stat()
if err != nil {
return
}
size = st.Size
var pos int64
pos, err = v.Seek(0, 1) // SeekCurrent.
if err != nil {
return -1, err
}
size = st.Size - pos
}
}
// Returns the size here.
@@ -151,14 +162,17 @@ func (c Client) PutObject(bucketName, objectName string, reader io.Reader, conte
// is used for Google Cloud Storage since Google's multipart API is not S3 compatible.
func (c Client) putObjectNoChecksum(bucketName, objectName string, reader io.Reader, size int64, metaData map[string][]string, progress io.Reader) (n int64, err error) {
// Input validation.
if err := isValidBucketName(bucketName); err != nil {
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return 0, err
}
if err := isValidObjectName(objectName); err != nil {
if err := s3utils.CheckValidObjectName(objectName); err != nil {
return 0, err
}
if size > maxSinglePutObjectSize {
return 0, ErrEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName)
if size > 0 {
readerAt, ok := reader.(io.ReaderAt)
if ok {
reader = io.NewSectionReader(readerAt, 0, size)
}
}
// Update progress reader appropriately to the latest offset as we
@@ -181,10 +195,10 @@ func (c Client) putObjectNoChecksum(bucketName, objectName string, reader io.Rea
// This special function is used as a fallback when multipart upload fails.
func (c Client) putObjectSingle(bucketName, objectName string, reader io.Reader, size int64, metaData map[string][]string, progress io.Reader) (n int64, err error) {
// Input validation.
if err := isValidBucketName(bucketName); err != nil {
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return 0, err
}
if err := isValidObjectName(objectName); err != nil {
if err := s3utils.CheckValidObjectName(objectName); err != nil {
return 0, err
}
if size > maxSinglePutObjectSize {
@@ -200,38 +214,29 @@ func (c Client) putObjectSingle(bucketName, objectName string, reader io.Reader,
hashAlgos := make(map[string]hash.Hash)
hashSums := make(map[string][]byte)
hashAlgos["md5"] = md5.New()
if c.signature.isV4() && !c.secure {
if c.overrideSignerType.IsV4() && !c.secure {
hashAlgos["sha256"] = sha256.New()
}
if size <= minPartSize {
// Initialize a new temporary buffer.
tmpBuffer := new(bytes.Buffer)
size, err = hashCopyN(hashAlgos, hashSums, tmpBuffer, reader, size)
reader = bytes.NewReader(tmpBuffer.Bytes())
tmpBuffer.Reset()
} else {
// Initialize a new temporary file.
var tmpFile *tempFile
tmpFile, err = newTempFile("single$-putobject-single")
if err != nil {
return 0, err
}
defer tmpFile.Close()
size, err = hashCopyN(hashAlgos, hashSums, tmpFile, reader, size)
if err != nil {
return 0, err
}
// Seek back to beginning of the temporary file.
if _, err = tmpFile.Seek(0, 0); err != nil {
return 0, err
}
reader = tmpFile
// Initialize a new temporary file.
tmpFile, err := newTempFile("single$-putobject-single")
if err != nil {
return 0, err
}
defer tmpFile.Close()
size, err = hashCopyN(hashAlgos, hashSums, tmpFile, reader, size)
// Return error if its not io.EOF.
if err != nil && err != io.EOF {
return 0, err
}
// Seek back to beginning of the temporary file.
if _, err = tmpFile.Seek(0, 0); err != nil {
return 0, err
}
reader = tmpFile
// Execute put object.
st, err := c.putObjectDo(bucketName, objectName, reader, hashSums["md5"], hashSums["sha256"], size, metaData)
if err != nil {
@@ -253,21 +258,13 @@ func (c Client) putObjectSingle(bucketName, objectName string, reader io.Reader,
// NOTE: You must have WRITE permissions on a bucket to add an object to it.
func (c Client) putObjectDo(bucketName, objectName string, reader io.Reader, md5Sum []byte, sha256Sum []byte, size int64, metaData map[string][]string) (ObjectInfo, error) {
// Input validation.
if err := isValidBucketName(bucketName); err != nil {
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return ObjectInfo{}, err
}
if err := isValidObjectName(objectName); err != nil {
if err := s3utils.CheckValidObjectName(objectName); err != nil {
return ObjectInfo{}, err
}
if size <= -1 {
return ObjectInfo{}, ErrEntityTooSmall(size, bucketName, objectName)
}
if size > maxSinglePutObjectSize {
return ObjectInfo{}, ErrEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName)
}
// Set headers.
customHeader := make(http.Header)

View File

@@ -22,6 +22,8 @@ import (
"io"
"net/http"
"net/url"
"github.com/minio/minio-go/pkg/s3utils"
)
// RemoveBucket deletes the bucket name.
@@ -30,7 +32,7 @@ import (
// in the bucket must be deleted before successfully attempting this request.
func (c Client) RemoveBucket(bucketName string) error {
// Input validation.
if err := isValidBucketName(bucketName); err != nil {
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return err
}
// Execute DELETE on bucket.
@@ -57,10 +59,10 @@ func (c Client) RemoveBucket(bucketName string) error {
// RemoveObject remove an object from a bucket.
func (c Client) RemoveObject(bucketName, objectName string) error {
// Input validation.
if err := isValidBucketName(bucketName); err != nil {
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return err
}
if err := isValidObjectName(objectName); err != nil {
if err := s3utils.CheckValidObjectName(objectName); err != nil {
return err
}
// Execute DELETE on objectName.
@@ -132,7 +134,7 @@ func (c Client) RemoveObjects(bucketName string, objectsCh <-chan string) <-chan
errorCh := make(chan RemoveObjectError, 1)
// Validate if bucket name is valid.
if err := isValidBucketName(bucketName); err != nil {
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
defer close(errorCh)
errorCh <- RemoveObjectError{
Err: err,
@@ -212,10 +214,10 @@ func (c Client) RemoveObjects(bucketName string, objectsCh <-chan string) <-chan
// RemoveIncompleteUpload aborts an partially uploaded object.
func (c Client) RemoveIncompleteUpload(bucketName, objectName string) error {
// Input validation.
if err := isValidBucketName(bucketName); err != nil {
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return err
}
if err := isValidObjectName(objectName); err != nil {
if err := s3utils.CheckValidObjectName(objectName); err != nil {
return err
}
// Find multipart upload id of the object to be aborted.
@@ -237,10 +239,10 @@ func (c Client) RemoveIncompleteUpload(bucketName, objectName string) error {
// uploadID, all previously uploaded parts are deleted.
func (c Client) abortMultipartUpload(bucketName, objectName, uploadID string) error {
// Input validation.
if err := isValidBucketName(bucketName); err != nil {
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return err
}
if err := isValidObjectName(objectName); err != nil {
if err := s3utils.CheckValidObjectName(objectName); err != nil {
return err
}

View File

@@ -28,7 +28,7 @@ import (
// BucketExists verify if bucket exists and you have permission to access it.
func (c Client) BucketExists(bucketName string) (bool, error) {
// Input validation.
if err := isValidBucketName(bucketName); err != nil {
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return false, err
}
@@ -80,10 +80,10 @@ func extractObjMetadata(header http.Header) http.Header {
// StatObject verifies if object exists and you have permission to access.
func (c Client) StatObject(bucketName, objectName string) (ObjectInfo, error) {
// Input validation.
if err := isValidBucketName(bucketName); err != nil {
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return ObjectInfo{}, err
}
if err := isValidObjectName(objectName); err != nil {
if err := s3utils.CheckValidObjectName(objectName); err != nil {
return ObjectInfo{}, err
}
reqHeaders := NewHeadReqHeaders()
@@ -93,10 +93,10 @@ func (c Client) StatObject(bucketName, objectName string) (ObjectInfo, error) {
// Lower level API for statObject supporting pre-conditions and range headers.
func (c Client) statObject(bucketName, objectName string, reqHeaders RequestHeaders) (ObjectInfo, error) {
// Input validation.
if err := isValidBucketName(bucketName); err != nil {
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return ObjectInfo{}, err
}
if err := isValidObjectName(objectName); err != nil {
if err := s3utils.CheckValidObjectName(objectName); err != nil {
return ObjectInfo{}, err
}
@@ -126,12 +126,13 @@ func (c Client) statObject(bucketName, objectName string, reqHeaders RequestHead
md5sum := strings.TrimPrefix(resp.Header.Get("ETag"), "\"")
md5sum = strings.TrimSuffix(md5sum, "\"")
// Content-Length is not valid for Google Cloud Storage, do not verify.
// Parse content length is exists
var size int64 = -1
if !s3utils.IsGoogleEndpoint(c.endpointURL) {
// Parse content length.
size, err = strconv.ParseInt(resp.Header.Get("Content-Length"), 10, 64)
contentLengthStr := resp.Header.Get("Content-Length")
if contentLengthStr != "" {
size, err = strconv.ParseInt(contentLengthStr, 10, 64)
if err != nil {
// Content-Length is not valid
return ObjectInfo{}, ErrorResponse{
Code: "InternalError",
Message: "Content-Length is invalid. " + reportIssue,

View File

@@ -1,5 +1,6 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
* Minio Go Library for Amazon S3 Compatible Cloud Storage
* (C) 2015, 2016, 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -25,16 +26,17 @@ import (
"io"
"io/ioutil"
"math/rand"
"net"
"net/http"
"net/http/httputil"
"net/url"
"os"
"regexp"
"runtime"
"strings"
"sync"
"time"
"github.com/minio/minio-go/pkg/credentials"
"github.com/minio/minio-go/pkg/s3signer"
"github.com/minio/minio-go/pkg/s3utils"
)
@@ -46,14 +48,11 @@ type Client struct {
// Parsed endpoint url provided by the user.
endpointURL url.URL
// AccessKeyID required for authorized requests.
accessKeyID string
// SecretAccessKey required for authorized requests.
secretAccessKey string
// Choose a signature type if necessary.
signature SignatureType
// Set to 'true' if Client has no access and secret keys.
anonymous bool
// Holds various credential providers.
credsProvider *credentials.Credentials
// Custom signerType value overrides all credentials.
overrideSignerType credentials.SignatureType
// User supplied.
appInfo struct {
@@ -85,7 +84,7 @@ type Client struct {
// Global constants.
const (
libraryName = "minio-go"
libraryVersion = "2.0.4"
libraryVersion = "2.1.0"
)
// User Agent should always following the below style.
@@ -100,58 +99,58 @@ const (
// NewV2 - instantiate minio client with Amazon S3 signature version
// '2' compatibility.
func NewV2(endpoint string, accessKeyID, secretAccessKey string, secure bool) (*Client, error) {
clnt, err := privateNew(endpoint, accessKeyID, secretAccessKey, secure)
creds := credentials.NewStaticV2(accessKeyID, secretAccessKey, "")
clnt, err := privateNew(endpoint, creds, secure, "")
if err != nil {
return nil, err
}
// Set to use signature version '2'.
clnt.signature = SignatureV2
clnt.overrideSignerType = credentials.SignatureV2
return clnt, nil
}
// NewV4 - instantiate minio client with Amazon S3 signature version
// '4' compatibility.
func NewV4(endpoint string, accessKeyID, secretAccessKey string, secure bool) (*Client, error) {
clnt, err := privateNew(endpoint, accessKeyID, secretAccessKey, secure)
creds := credentials.NewStaticV4(accessKeyID, secretAccessKey, "")
clnt, err := privateNew(endpoint, creds, secure, "")
if err != nil {
return nil, err
}
// Set to use signature version '4'.
clnt.signature = SignatureV4
clnt.overrideSignerType = credentials.SignatureV4
return clnt, nil
}
// New - instantiate minio client Client, adds automatic verification of signature.
// New - instantiate minio client, adds automatic verification of signature.
func New(endpoint, accessKeyID, secretAccessKey string, secure bool) (*Client, error) {
return NewWithRegion(endpoint, accessKeyID, secretAccessKey, secure, "")
creds := credentials.NewStaticV4(accessKeyID, secretAccessKey, "")
clnt, err := privateNew(endpoint, creds, secure, "")
if err != nil {
return nil, err
}
// Google cloud storage should be set to signature V2, force it if not.
if s3utils.IsGoogleEndpoint(clnt.endpointURL) {
clnt.overrideSignerType = credentials.SignatureV2
}
// If Amazon S3 set to signature v4.
if s3utils.IsAmazonEndpoint(clnt.endpointURL) {
clnt.overrideSignerType = credentials.SignatureV4
}
return clnt, nil
}
// NewWithCredentials - instantiate minio client with credentials provider
// for retrieving credentials from various credentials provider such as
// IAM, File, Env etc.
func NewWithCredentials(endpoint string, creds *credentials.Credentials, secure bool, region string) (*Client, error) {
return privateNew(endpoint, creds, secure, region)
}
// NewWithRegion - instantiate minio client, with region configured. Unlike New(),
// NewWithRegion avoids bucket-location lookup operations and it is slightly faster.
// Use this function when if your application deals with single region.
func NewWithRegion(endpoint, accessKeyID, secretAccessKey string, secure bool, region string) (*Client, error) {
clnt, err := privateNew(endpoint, accessKeyID, secretAccessKey, secure)
if err != nil {
return nil, err
}
// Google cloud storage should be set to signature V2, force it if not.
if s3utils.IsGoogleEndpoint(clnt.endpointURL) {
clnt.signature = SignatureV2
}
// If Amazon S3 set to signature v2.n
if s3utils.IsAmazonEndpoint(clnt.endpointURL) {
clnt.signature = SignatureV4
}
// Sets custom region, if region is empty bucket location cache is used automatically.
clnt.region = region
// Success..
return clnt, nil
creds := credentials.NewStaticV4(accessKeyID, secretAccessKey, "")
return privateNew(endpoint, creds, secure, region)
}
// lockedRandSource provides protected rand source, implements rand.Source interface.
@@ -188,7 +187,7 @@ func redirectHeaders(req *http.Request, via []*http.Request) error {
return nil
}
func privateNew(endpoint, accessKeyID, secretAccessKey string, secure bool) (*Client, error) {
func privateNew(endpoint string, creds *credentials.Credentials, secure bool, region string) (*Client, error) {
// construct endpoint.
endpointURL, err := getEndpointURL(endpoint, secure)
if err != nil {
@@ -197,8 +196,9 @@ func privateNew(endpoint, accessKeyID, secretAccessKey string, secure bool) (*Cl
// instantiate new Client.
clnt := new(Client)
clnt.accessKeyID = accessKeyID
clnt.secretAccessKey = secretAccessKey
// Save the credentials.
clnt.credsProvider = creds
// Remember whether we are using https or not
clnt.secure = secure
@@ -212,7 +212,10 @@ func privateNew(endpoint, accessKeyID, secretAccessKey string, secure bool) (*Cl
CheckRedirect: redirectHeaders,
}
// Instantiae bucket location cache.
// Sets custom region, if region is empty bucket location cache is used automatically.
clnt.region = region
// Instantiate bucket location cache.
clnt.bucketLocCache = newBucketLocationCache()
// Introduce a new locked random seed.
@@ -306,40 +309,6 @@ type requestMetadata struct {
contentMD5Bytes []byte
}
// regCred matches credential string in HTTP header
var regCred = regexp.MustCompile("Credential=([A-Z0-9]+)/")
// regCred matches signature string in HTTP header
var regSign = regexp.MustCompile("Signature=([[0-9a-f]+)")
// Filter out signature value from Authorization header.
func (c Client) filterSignature(req *http.Request) {
if _, ok := req.Header["Authorization"]; !ok {
return
}
// Handle if Signature V2.
if c.signature.isV2() {
// Set a temporary redacted auth
req.Header.Set("Authorization", "AWS **REDACTED**:**REDACTED**")
return
}
/// Signature V4 authorization header.
// Save the original auth.
origAuth := req.Header.Get("Authorization")
// Strip out accessKeyID from:
// Credential=<access-key-id>/<date>/<aws-region>/<aws-service>/aws4_request
newAuth := regCred.ReplaceAllString(origAuth, "Credential=**REDACTED**/")
// Strip out 256-bit signature from: Signature=<256-bit signature>
newAuth = regSign.ReplaceAllString(newAuth, "Signature=**REDACTED**")
// Set a temporary redacted auth
req.Header.Set("Authorization", newAuth)
return
}
// dumpHTTP - dump HTTP request and response.
func (c Client) dumpHTTP(req *http.Request, resp *http.Response) error {
// Starts http dump.
@@ -349,7 +318,10 @@ func (c Client) dumpHTTP(req *http.Request, resp *http.Response) error {
}
// Filter out Signature field from Authorization header.
c.filterSignature(req)
origAuth := req.Header.Get("Authorization")
if origAuth != "" {
req.Header.Set("Authorization", redactSignature(origAuth))
}
// Only display request header.
reqTrace, err := httputil.DumpRequestOut(req, false)
@@ -553,9 +525,14 @@ func (c Client) executeMethod(method string, metadata requestMetadata) (res *htt
// Bucket region if set in error response and the error
// code dictates invalid region, we can retry the request
// with the new region.
if errResponse.Code == "InvalidRegion" && errResponse.Region != "" {
c.bucketLocCache.Set(metadata.bucketName, errResponse.Region)
continue // Retry.
//
// Additionally we should only retry if bucketLocation and custom
// region is empty.
if metadata.bucketLocation == "" && c.region == "" {
if res.StatusCode == http.StatusBadRequest && errResponse.Region != "" {
c.bucketLocCache.Set(metadata.bucketName, errResponse.Region)
continue // Retry.
}
}
// Verify if error response code is retryable.
@@ -581,29 +558,19 @@ func (c Client) newRequest(method string, metadata requestMetadata) (req *http.R
method = "POST"
}
// Default all requests to "us-east-1" or "cn-north-1" (china region)
location := "us-east-1"
if s3utils.IsAmazonChinaEndpoint(c.endpointURL) {
// For china specifically we need to set everything to
// cn-north-1 for now, there is no easier way until AWS S3
// provides a cleaner compatible API across "us-east-1" and
// China region.
location = "cn-north-1"
}
var location string
// Gather location only if bucketName is present.
if metadata.bucketName != "" {
if metadata.bucketName != "" && metadata.bucketLocation == "" {
location, err = c.getBucketLocation(metadata.bucketName)
if err != nil {
return nil, err
}
} else {
location = metadata.bucketLocation
}
// Save location.
metadata.bucketLocation = location
// Construct a new target URL.
targetURL, err := c.makeTargetURL(metadata.bucketName, metadata.objectName, metadata.bucketLocation, metadata.queryValues)
targetURL, err := c.makeTargetURL(metadata.bucketName, metadata.objectName, location, metadata.queryValues)
if err != nil {
return nil, err
}
@@ -614,20 +581,41 @@ func (c Client) newRequest(method string, metadata requestMetadata) (req *http.R
return nil, err
}
// Anonymous request.
anonymous := c.accessKeyID == "" || c.secretAccessKey == ""
// Get credentials from the configured credentials provider.
value, err := c.credsProvider.Get()
if err != nil {
return nil, err
}
var (
signerType = value.SignerType
accessKeyID = value.AccessKeyID
secretAccessKey = value.SecretAccessKey
sessionToken = value.SessionToken
)
// Custom signer set then override the behavior.
if c.overrideSignerType != credentials.SignatureDefault {
signerType = c.overrideSignerType
}
// If signerType returned by credentials helper is anonymous,
// then do not sign regardless of signerType override.
if value.SignerType == credentials.SignatureAnonymous {
signerType = credentials.SignatureAnonymous
}
// Generate presign url if needed, return right here.
if metadata.expires != 0 && metadata.presignURL {
if anonymous {
return nil, ErrInvalidArgument("Requests cannot be presigned with anonymous credentials.")
if signerType.IsAnonymous() {
return nil, ErrInvalidArgument("Presigned URLs cannot be generated with anonymous credentials.")
}
if c.signature.isV2() {
if signerType.IsV2() {
// Presign URL with signature v2.
req = s3signer.PreSignV2(*req, c.accessKeyID, c.secretAccessKey, metadata.expires)
} else if c.signature.isV4() {
req = s3signer.PreSignV2(*req, accessKeyID, secretAccessKey, metadata.expires)
} else if signerType.IsV4() {
// Presign URL with signature v4.
req = s3signer.PreSignV4(*req, c.accessKeyID, c.secretAccessKey, location, metadata.expires)
req = s3signer.PreSignV4(*req, accessKeyID, secretAccessKey, sessionToken, location, metadata.expires)
}
return req, nil
}
@@ -640,9 +628,11 @@ func (c Client) newRequest(method string, metadata requestMetadata) (req *http.R
req.Header.Set(k, v[0])
}
// set incoming content-length.
if metadata.contentLength > 0 {
req.ContentLength = metadata.contentLength
// Set incoming content-length.
req.ContentLength = metadata.contentLength
if req.ContentLength <= -1 {
// For unknown content length, we upload using transfer-encoding: chunked.
req.TransferEncoding = []string{"chunked"}
}
// set md5Sum for content protection.
@@ -650,17 +640,18 @@ func (c Client) newRequest(method string, metadata requestMetadata) (req *http.R
req.Header.Set("Content-Md5", base64.StdEncoding.EncodeToString(metadata.contentMD5Bytes))
}
if anonymous {
// For anonymous requests just return.
if signerType.IsAnonymous() {
return req, nil
} // Sign the request for all authenticated requests.
}
switch {
case c.signature.isV2():
case signerType.IsV2():
// Add signature version '2' authorization header.
req = s3signer.SignV2(*req, c.accessKeyID, c.secretAccessKey)
case c.signature.isStreamingV4() && method == "PUT":
req = s3signer.StreamingSignV4(req, c.accessKeyID,
c.secretAccessKey, location, metadata.contentLength, time.Now().UTC())
req = s3signer.SignV2(*req, accessKeyID, secretAccessKey)
case signerType.IsStreamingV4() && method == "PUT":
req = s3signer.StreamingSignV4(req, accessKeyID,
secretAccessKey, sessionToken, location, metadata.contentLength, time.Now().UTC())
default:
// Set sha256 sum for signature calculation only with signature version '4'.
shaHeader := unsignedPayload
@@ -670,7 +661,7 @@ func (c Client) newRequest(method string, metadata requestMetadata) (req *http.R
req.Header.Set("X-Amz-Content-Sha256", shaHeader)
// Add signature version '4' authorization header.
req = s3signer.SignV4(*req, c.accessKeyID, c.secretAccessKey, location)
req = s3signer.SignV4(*req, accessKeyID, secretAccessKey, sessionToken, location)
}
// Return request.
@@ -701,14 +692,26 @@ func (c Client) makeTargetURL(bucketName, objectName, bucketLocation string, que
// http://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html
host = c.s3AccelerateEndpoint
} else {
// Fetch new host based on the bucket location.
host = getS3Endpoint(bucketLocation)
// Do not change the host if the endpoint URL is a FIPS S3 endpoint.
if !s3utils.IsAmazonFIPSGovCloudEndpoint(c.endpointURL) {
// Fetch new host based on the bucket location.
host = getS3Endpoint(bucketLocation)
}
}
}
// Save scheme.
scheme := c.endpointURL.Scheme
// Strip port 80 and 443 so we won't send these ports in Host header.
// The reason is that browsers and curl automatically remove :80 and :443
// with the generated presigned urls, then a signature mismatch error.
if h, p, err := net.SplitHostPort(host); err == nil {
if scheme == "http" && p == "80" || scheme == "https" && p == "443" {
host = h
}
}
urlStr := scheme + "://" + host + "/"
// Make URL only if bucketName is available, otherwise use the
// endpoint URL.
@@ -732,13 +735,16 @@ func (c Client) makeTargetURL(bucketName, objectName, bucketLocation string, que
}
}
}
// If there are any query values, add them to the end.
if len(queryValues) > 0 {
urlStr = urlStr + "?" + s3utils.QueryEncode(queryValues)
}
u, err := url.Parse(urlStr)
if err != nil {
return nil, err
}
return u, nil
}

View File

@@ -17,6 +17,8 @@ install:
- go version
- go env
- go get -u github.com/golang/lint/golint
- go get -u github.com/go-ini/ini
- go get -u github.com/minio/go-homedir
- go get -u github.com/remyoudompheng/go-misc/deadcode
- go get -u github.com/gordonklaus/ineffassign

View File

@@ -1,5 +1,6 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
* Minio Go Library for Amazon S3 Compatible Cloud Storage
* (C) 2015, 2016, 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -23,6 +24,7 @@ import (
"path"
"sync"
"github.com/minio/minio-go/pkg/credentials"
"github.com/minio/minio-go/pkg/s3signer"
"github.com/minio/minio-go/pkg/s3utils"
)
@@ -71,7 +73,7 @@ func (r *bucketLocationCache) Delete(bucketName string) {
// GetBucketLocation - get location for the bucket name from location cache, if not
// fetch freshly by making a new request.
func (c Client) GetBucketLocation(bucketName string) (string, error) {
if err := isValidBucketName(bucketName); err != nil {
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return "", err
}
return c.getBucketLocation(bucketName)
@@ -80,7 +82,7 @@ func (c Client) GetBucketLocation(bucketName string) (string, error) {
// getBucketLocation - Get location for the bucketName from location map cache, if not
// fetch freshly by making a new request.
func (c Client) getBucketLocation(bucketName string) (string, error) {
if err := isValidBucketName(bucketName); err != nil {
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return "", err
}
@@ -90,6 +92,12 @@ func (c Client) getBucketLocation(bucketName string) (string, error) {
// provides a cleaner compatible API across "us-east-1" and
// China region.
return "cn-north-1", nil
} else if s3utils.IsAmazonGovCloudEndpoint(c.endpointURL) {
// For us-gov specifically we need to set everything to
// us-gov-west-1 for now, there is no easier way until AWS S3
// provides a cleaner compatible API across "us-east-1" and
// Gov cloud region.
return "us-gov-west-1", nil
}
// Region set then no need to fetch bucket location.
@@ -181,8 +189,33 @@ func (c Client) getBucketLocationRequest(bucketName string) (*http.Request, erro
// Set UserAgent for the request.
c.setUserAgent(req)
// Get credentials from the configured credentials provider.
value, err := c.credsProvider.Get()
if err != nil {
return nil, err
}
var (
signerType = value.SignerType
accessKeyID = value.AccessKeyID
secretAccessKey = value.SecretAccessKey
sessionToken = value.SessionToken
)
// Custom signer set then override the behavior.
if c.overrideSignerType != credentials.SignatureDefault {
signerType = c.overrideSignerType
}
// If signerType returned by credentials helper is anonymous,
// then do not sign regardless of signerType override.
if value.SignerType == credentials.SignatureAnonymous {
signerType = credentials.SignatureAnonymous
}
// Set sha256 sum for signature calculation only with signature version '4'.
if c.signature.isV4() {
switch {
case signerType.IsV4():
var contentSha256 string
if c.secure {
contentSha256 = unsignedPayload
@@ -190,13 +223,10 @@ func (c Client) getBucketLocationRequest(bucketName string) (*http.Request, erro
contentSha256 = hex.EncodeToString(sum256([]byte{}))
}
req.Header.Set("X-Amz-Content-Sha256", contentSha256)
req = s3signer.SignV4(*req, accessKeyID, secretAccessKey, sessionToken, "us-east-1")
case signerType.IsV2():
req = s3signer.SignV2(*req, accessKeyID, secretAccessKey)
}
// Sign the request.
if c.signature.isV4() {
req = s3signer.SignV4(*req, c.accessKeyID, c.secretAccessKey, "us-east-1")
} else if c.signature.isV2() {
req = s3signer.SignV2(*req, c.accessKeyID, c.secretAccessKey)
}
return req, nil
}

View File

@@ -0,0 +1,89 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage
* (C) 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package credentials
import "fmt"
// A Chain will search for a provider which returns credentials
// and cache that provider until Retrieve is called again.
//
// The Chain provides a way of chaining multiple providers together
// which will pick the first available using priority order of the
// Providers in the list.
//
// If none of the Providers retrieve valid credentials Value, ChainProvider's
// Retrieve() will return the error, collecting all errors from all providers.
//
// If a Provider is found which returns valid credentials Value ChainProvider
// will cache that Provider for all calls to IsExpired(), until Retrieve is
// called again.
//
// creds := credentials.NewChainCredentials(
// []credentials.Provider{
// &credentials.EnvAWSS3{},
// &credentials.EnvMinio{},
// })
//
// // Usage of ChainCredentials.
// mc, err := minio.NewWithCredentials(endpoint, creds, secure, "us-east-1")
// if err != nil {
// log.Fatalln(err)
// }
//
type Chain struct {
Providers []Provider
curr Provider
}
// NewChainCredentials returns a pointer to a new Credentials object
// wrapping a chain of providers.
func NewChainCredentials(providers []Provider) *Credentials {
return New(&Chain{
Providers: append([]Provider{}, providers...),
})
}
// Retrieve returns the credentials value or error if no provider returned
// without error.
//
// If a provider is found it will be cached and any calls to IsExpired()
// will return the expired state of the cached provider.
func (c *Chain) Retrieve() (Value, error) {
var errs []error
for _, p := range c.Providers {
creds, err := p.Retrieve()
if err != nil {
errs = append(errs, err)
continue
} // Success.
c.curr = p
return creds, nil
}
c.curr = nil
return Value{}, fmt.Errorf("No valid providers found %v", errs)
}
// IsExpired will returned the expired state of the currently cached provider
// if there is one. If there is no current provider, true will be returned.
func (c *Chain) IsExpired() bool {
if c.curr != nil {
return c.curr.IsExpired()
}
return true
}

View File

@@ -0,0 +1,17 @@
{
"version": "8",
"hosts": {
"play": {
"url": "https://play.minio.io:9000",
"accessKey": "Q3AM3UQ867SPQQA43P2F",
"secretKey": "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG",
"api": "S3v2"
},
"s3": {
"url": "https://s3.amazonaws.com",
"accessKey": "accessKey",
"secretKey": "secret",
"api": "S3v4"
}
}
}

View File

@@ -0,0 +1,175 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage
* (C) 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package credentials
import (
"sync"
"time"
)
// A Value is the AWS credentials value for individual credential fields.
type Value struct {
// AWS Access key ID
AccessKeyID string
// AWS Secret Access Key
SecretAccessKey string
// AWS Session Token
SessionToken string
// Signature Type.
SignerType SignatureType
}
// A Provider is the interface for any component which will provide credentials
// Value. A provider is required to manage its own Expired state, and what to
// be expired means.
type Provider interface {
// Retrieve returns nil if it successfully retrieved the value.
// Error is returned if the value were not obtainable, or empty.
Retrieve() (Value, error)
// IsExpired returns if the credentials are no longer valid, and need
// to be retrieved.
IsExpired() bool
}
// A Expiry provides shared expiration logic to be used by credentials
// providers to implement expiry functionality.
//
// The best method to use this struct is as an anonymous field within the
// provider's struct.
//
// Example:
// type IAMCredentialProvider struct {
// Expiry
// ...
// }
type Expiry struct {
// The date/time when to expire on
expiration time.Time
// If set will be used by IsExpired to determine the current time.
// Defaults to time.Now if CurrentTime is not set.
CurrentTime func() time.Time
}
// SetExpiration sets the expiration IsExpired will check when called.
//
// If window is greater than 0 the expiration time will be reduced by the
// window value.
//
// Using a window is helpful to trigger credentials to expire sooner than
// the expiration time given to ensure no requests are made with expired
// tokens.
func (e *Expiry) SetExpiration(expiration time.Time, window time.Duration) {
e.expiration = expiration
if window > 0 {
e.expiration = e.expiration.Add(-window)
}
}
// IsExpired returns if the credentials are expired.
func (e *Expiry) IsExpired() bool {
if e.CurrentTime == nil {
e.CurrentTime = time.Now
}
return e.expiration.Before(e.CurrentTime())
}
// Credentials - A container for synchronous safe retrieval of credentials Value.
// Credentials will cache the credentials value until they expire. Once the value
// expires the next Get will attempt to retrieve valid credentials.
//
// Credentials is safe to use across multiple goroutines and will manage the
// synchronous state so the Providers do not need to implement their own
// synchronization.
//
// The first Credentials.Get() will always call Provider.Retrieve() to get the
// first instance of the credentials Value. All calls to Get() after that
// will return the cached credentials Value until IsExpired() returns true.
type Credentials struct {
sync.Mutex
creds Value
forceRefresh bool
provider Provider
}
// New returns a pointer to a new Credentials with the provider set.
func New(provider Provider) *Credentials {
return &Credentials{
provider: provider,
forceRefresh: true,
}
}
// Get returns the credentials value, or error if the credentials Value failed
// to be retrieved.
//
// Will return the cached credentials Value if it has not expired. If the
// credentials Value has expired the Provider's Retrieve() will be called
// to refresh the credentials.
//
// If Credentials.Expire() was called the credentials Value will be force
// expired, and the next call to Get() will cause them to be refreshed.
func (c *Credentials) Get() (Value, error) {
c.Lock()
defer c.Unlock()
if c.isExpired() {
creds, err := c.provider.Retrieve()
if err != nil {
return Value{}, err
}
c.creds = creds
c.forceRefresh = false
}
return c.creds, nil
}
// Expire expires the credentials and forces them to be retrieved on the
// next call to Get().
//
// This will override the Provider's expired state, and force Credentials
// to call the Provider's Retrieve().
func (c *Credentials) Expire() {
c.Lock()
defer c.Unlock()
c.forceRefresh = true
}
// IsExpired returns if the credentials are no longer valid, and need
// to be refreshed.
//
// If the Credentials were forced to be expired with Expire() this will
// reflect that override.
func (c *Credentials) IsExpired() bool {
c.Lock()
defer c.Unlock()
return c.isExpired()
}
// isExpired helper method wrapping the definition of expired credentials.
func (c *Credentials) isExpired() bool {
return c.forceRefresh || c.provider.IsExpired()
}

View File

@@ -0,0 +1,12 @@
[default]
aws_access_key_id = accessKey
aws_secret_access_key = secret
aws_session_token = token
[no_token]
aws_access_key_id = accessKey
aws_secret_access_key = secret
[with_colon]
aws_access_key_id: accessKey
aws_secret_access_key: secret

View File

@@ -0,0 +1,45 @@
// Package credentials provides credential retrieval and management
// for S3 compatible object storage.
//
// By default the Credentials.Get() will cache the successful result of a
// Provider's Retrieve() until Provider.IsExpired() returns true. At which
// point Credentials will call Provider's Retrieve() to get new credential Value.
//
// The Provider is responsible for determining when credentials have expired.
// It is also important to note that Credentials will always call Retrieve the
// first time Credentials.Get() is called.
//
// Example of using the environment variable credentials.
//
// creds := NewFromEnv()
// // Retrieve the credentials value
// credValue, err := creds.Get()
// if err != nil {
// // handle error
// }
//
// Example of forcing credentials to expire and be refreshed on the next Get().
// This may be helpful to proactively expire credentials and refresh them sooner
// than they would naturally expire on their own.
//
// creds := NewFromIAM("")
// creds.Expire()
// credsValue, err := creds.Get()
// // New credentials will be retrieved instead of from cache.
//
//
// Custom Provider
//
// Each Provider built into this package also provides a helper method to generate
// a Credentials pointer setup with the provider. To use a custom Provider just
// create a type which satisfies the Provider interface and pass it to the
// NewCredentials method.
//
// type MyProvider struct{}
// func (m *MyProvider) Retrieve() (Value, error) {...}
// func (m *MyProvider) IsExpired() bool {...}
//
// creds := NewCredentials(&MyProvider{})
// credValue, err := creds.Get()
//
package credentials

View File

@@ -0,0 +1,71 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage
* (C) 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package credentials
import "os"
// A EnvAWS retrieves credentials from the environment variables of the
// running process. EnvAWSironment credentials never expire.
//
// EnvAWSironment variables used:
//
// * Access Key ID: AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY.
// * Secret Access Key: AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY.
// * Secret Token: AWS_SESSION_TOKEN.
type EnvAWS struct {
retrieved bool
}
// NewEnvAWS returns a pointer to a new Credentials object
// wrapping the environment variable provider.
func NewEnvAWS() *Credentials {
return New(&EnvAWS{})
}
// Retrieve retrieves the keys from the environment.
func (e *EnvAWS) Retrieve() (Value, error) {
e.retrieved = false
id := os.Getenv("AWS_ACCESS_KEY_ID")
if id == "" {
id = os.Getenv("AWS_ACCESS_KEY")
}
secret := os.Getenv("AWS_SECRET_ACCESS_KEY")
if secret == "" {
secret = os.Getenv("AWS_SECRET_KEY")
}
signerType := SignatureV4
if id == "" || secret == "" {
signerType = SignatureAnonymous
}
e.retrieved = true
return Value{
AccessKeyID: id,
SecretAccessKey: secret,
SessionToken: os.Getenv("AWS_SESSION_TOKEN"),
SignerType: signerType,
}, nil
}
// IsExpired returns if the credentials have been retrieved.
func (e *EnvAWS) IsExpired() bool {
return !e.retrieved
}

View File

@@ -0,0 +1,62 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage
* (C) 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package credentials
import "os"
// A EnvMinio retrieves credentials from the environment variables of the
// running process. EnvMinioironment credentials never expire.
//
// EnvMinioironment variables used:
//
// * Access Key ID: MINIO_ACCESS_KEY.
// * Secret Access Key: MINIO_SECRET_KEY.
type EnvMinio struct {
retrieved bool
}
// NewEnvMinio returns a pointer to a new Credentials object
// wrapping the environment variable provider.
func NewEnvMinio() *Credentials {
return New(&EnvMinio{})
}
// Retrieve retrieves the keys from the environment.
func (e *EnvMinio) Retrieve() (Value, error) {
e.retrieved = false
id := os.Getenv("MINIO_ACCESS_KEY")
secret := os.Getenv("MINIO_SECRET_KEY")
signerType := SignatureV4
if id == "" || secret == "" {
signerType = SignatureAnonymous
}
e.retrieved = true
return Value{
AccessKeyID: id,
SecretAccessKey: secret,
SignerType: signerType,
}, nil
}
// IsExpired returns if the credentials have been retrieved.
func (e *EnvMinio) IsExpired() bool {
return !e.retrieved
}

View File

@@ -0,0 +1,120 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage
* (C) 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package credentials
import (
"os"
"path/filepath"
"github.com/go-ini/ini"
homedir "github.com/minio/go-homedir"
)
// A FileAWSCredentials retrieves credentials from the current user's home
// directory, and keeps track if those credentials are expired.
//
// Profile ini file example: $HOME/.aws/credentials
type FileAWSCredentials struct {
// Path to the shared credentials file.
//
// If empty will look for "AWS_SHARED_CREDENTIALS_FILE" env variable. If the
// env value is empty will default to current user's home directory.
// Linux/OSX: "$HOME/.aws/credentials"
// Windows: "%USERPROFILE%\.aws\credentials"
filename string
// AWS Profile to extract credentials from the shared credentials file. If empty
// will default to environment variable "AWS_PROFILE" or "default" if
// environment variable is also not set.
profile string
// retrieved states if the credentials have been successfully retrieved.
retrieved bool
}
// NewFileAWSCredentials returns a pointer to a new Credentials object
// wrapping the Profile file provider.
func NewFileAWSCredentials(filename string, profile string) *Credentials {
return New(&FileAWSCredentials{
filename: filename,
profile: profile,
})
}
// Retrieve reads and extracts the shared credentials from the current
// users home directory.
func (p *FileAWSCredentials) Retrieve() (Value, error) {
if p.filename == "" {
p.filename = os.Getenv("AWS_SHARED_CREDENTIALS_FILE")
if p.filename == "" {
homeDir, err := homedir.Dir()
if err != nil {
return Value{}, err
}
p.filename = filepath.Join(homeDir, ".aws", "credentials")
}
}
if p.profile == "" {
p.profile = os.Getenv("AWS_PROFILE")
if p.profile == "" {
p.profile = "default"
}
}
p.retrieved = false
iniProfile, err := loadProfile(p.filename, p.profile)
if err != nil {
return Value{}, err
}
// Default to empty string if not found.
id := iniProfile.Key("aws_access_key_id")
// Default to empty string if not found.
secret := iniProfile.Key("aws_secret_access_key")
// Default to empty string if not found.
token := iniProfile.Key("aws_session_token")
p.retrieved = true
return Value{
AccessKeyID: id.String(),
SecretAccessKey: secret.String(),
SessionToken: token.String(),
SignerType: SignatureV4,
}, nil
}
// IsExpired returns if the shared credentials have expired.
func (p *FileAWSCredentials) IsExpired() bool {
return !p.retrieved
}
// loadProfiles loads from the file pointed to by shared credentials filename for profile.
// The credentials retrieved from the profile will be returned or error. Error will be
// returned if it fails to read from the file, or the data is invalid.
func loadProfile(filename, profile string) (*ini.Section, error) {
config, err := ini.Load(filename)
if err != nil {
return nil, err
}
iniProfile, err := config.GetSection(profile)
if err != nil {
return nil, err
}
return iniProfile, nil
}

View File

@@ -0,0 +1,129 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage
* (C) 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package credentials
import (
"encoding/json"
"io/ioutil"
"os"
"path/filepath"
"runtime"
homedir "github.com/minio/go-homedir"
)
// A FileMinioClient retrieves credentials from the current user's home
// directory, and keeps track if those credentials are expired.
//
// Configuration file example: $HOME/.mc/config.json
type FileMinioClient struct {
// Path to the shared credentials file.
//
// If empty will look for "MINIO_SHARED_CREDENTIALS_FILE" env variable. If the
// env value is empty will default to current user's home directory.
// Linux/OSX: "$HOME/.mc/config.json"
// Windows: "%USERALIAS%\mc\config.json"
filename string
// Minio Alias to extract credentials from the shared credentials file. If empty
// will default to environment variable "MINIO_ALIAS" or "default" if
// environment variable is also not set.
alias string
// retrieved states if the credentials have been successfully retrieved.
retrieved bool
}
// NewFileMinioClient returns a pointer to a new Credentials object
// wrapping the Alias file provider.
func NewFileMinioClient(filename string, alias string) *Credentials {
return New(&FileMinioClient{
filename: filename,
alias: alias,
})
}
// Retrieve reads and extracts the shared credentials from the current
// users home directory.
func (p *FileMinioClient) Retrieve() (Value, error) {
if p.filename == "" {
homeDir, err := homedir.Dir()
if err != nil {
return Value{}, err
}
p.filename = filepath.Join(homeDir, ".mc", "config.json")
if runtime.GOOS == "windows" {
p.filename = filepath.Join(homeDir, "mc", "config.json")
}
}
if p.alias == "" {
p.alias = os.Getenv("MINIO_ALIAS")
if p.alias == "" {
p.alias = "s3"
}
}
p.retrieved = false
hostCfg, err := loadAlias(p.filename, p.alias)
if err != nil {
return Value{}, err
}
p.retrieved = true
return Value{
AccessKeyID: hostCfg.AccessKey,
SecretAccessKey: hostCfg.SecretKey,
SignerType: parseSignatureType(hostCfg.API),
}, nil
}
// IsExpired returns if the shared credentials have expired.
func (p *FileMinioClient) IsExpired() bool {
return !p.retrieved
}
// hostConfig configuration of a host.
type hostConfig struct {
URL string `json:"url"`
AccessKey string `json:"accessKey"`
SecretKey string `json:"secretKey"`
API string `json:"api"`
}
// config config version.
type config struct {
Version string `json:"version"`
Hosts map[string]hostConfig `json:"hosts"`
}
// loadAliass loads from the file pointed to by shared credentials filename for alias.
// The credentials retrieved from the alias will be returned or error. Error will be
// returned if it fails to read from the file.
func loadAlias(filename, alias string) (hostConfig, error) {
cfg := &config{}
configBytes, err := ioutil.ReadFile(filename)
if err != nil {
return hostConfig{}, err
}
if err = json.Unmarshal(configBytes, cfg); err != nil {
return hostConfig{}, err
}
return cfg.Hosts[alias], nil
}

View File

@@ -0,0 +1,196 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage
* (C) 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package credentials
import (
"bufio"
"encoding/json"
"errors"
"net/http"
"net/url"
"path"
"time"
)
// DefaultExpiryWindow - Default expiry window.
// ExpiryWindow will allow the credentials to trigger refreshing
// prior to the credentials actually expiring. This is beneficial
// so race conditions with expiring credentials do not cause
// request to fail unexpectedly due to ExpiredTokenException exceptions.
const DefaultExpiryWindow = time.Second * 10 // 10 secs
// A IAM retrieves credentials from the EC2 service, and keeps track if
// those credentials are expired.
type IAM struct {
Expiry
// Required http Client to use when connecting to IAM metadata service.
Client *http.Client
// Custom endpoint in place of
endpoint string
}
// redirectHeaders copies all headers when following a redirect URL.
// This won't be needed anymore from go 1.8 (https://github.com/golang/go/issues/4800)
func redirectHeaders(req *http.Request, via []*http.Request) error {
if len(via) == 0 {
return nil
}
for key, val := range via[0].Header {
req.Header[key] = val
}
return nil
}
// NewIAM returns a pointer to a new Credentials object wrapping
// the IAM. Takes a ConfigProvider to create a EC2Metadata client.
// The ConfigProvider is satisfied by the session.Session type.
func NewIAM(endpoint string) *Credentials {
if endpoint == "" {
// IAM Roles for Amazon EC2
// http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html
endpoint = "http://169.254.169.254"
}
p := &IAM{
Client: &http.Client{
Transport: http.DefaultTransport,
CheckRedirect: redirectHeaders,
},
endpoint: endpoint,
}
return New(p)
}
// Retrieve retrieves credentials from the EC2 service.
// Error will be returned if the request fails, or unable to extract
// the desired
func (m *IAM) Retrieve() (Value, error) {
credsList, err := requestCredList(m.Client, m.endpoint)
if err != nil {
return Value{}, err
}
if len(credsList) == 0 {
return Value{}, errors.New("empty EC2 Role list")
}
credsName := credsList[0]
roleCreds, err := requestCred(m.Client, m.endpoint, credsName)
if err != nil {
return Value{}, err
}
// Expiry window is set to 10secs.
m.SetExpiration(roleCreds.Expiration, DefaultExpiryWindow)
return Value{
AccessKeyID: roleCreds.AccessKeyID,
SecretAccessKey: roleCreds.SecretAccessKey,
SessionToken: roleCreds.Token,
SignerType: SignatureV4,
}, nil
}
// A ec2RoleCredRespBody provides the shape for unmarshaling credential
// request responses.
type ec2RoleCredRespBody struct {
// Success State
Expiration time.Time
AccessKeyID string
SecretAccessKey string
Token string
// Error state
Code string
Message string
}
const iamSecurityCredsPath = "/latest/meta-data/iam/security-credentials"
// requestCredList requests a list of credentials from the EC2 service.
// If there are no credentials, or there is an error making or receiving the request
func requestCredList(client *http.Client, endpoint string) ([]string, error) {
u, err := url.Parse(endpoint)
if err != nil {
return nil, err
}
u.Path = iamSecurityCredsPath
req, err := http.NewRequest("GET", u.String(), nil)
if err != nil {
return nil, err
}
resp, err := client.Do(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return nil, errors.New(resp.Status)
}
credsList := []string{}
s := bufio.NewScanner(resp.Body)
for s.Scan() {
credsList = append(credsList, s.Text())
}
if err := s.Err(); err != nil {
return nil, err
}
return credsList, nil
}
// requestCred requests the credentials for a specific credentials from the EC2 service.
//
// If the credentials cannot be found, or there is an error reading the response
// and error will be returned.
func requestCred(client *http.Client, endpoint string, credsName string) (ec2RoleCredRespBody, error) {
u, err := url.Parse(endpoint)
if err != nil {
return ec2RoleCredRespBody{}, err
}
u.Path = path.Join(iamSecurityCredsPath, credsName)
req, err := http.NewRequest("GET", u.String(), nil)
if err != nil {
return ec2RoleCredRespBody{}, err
}
resp, err := client.Do(req)
if err != nil {
return ec2RoleCredRespBody{}, err
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return ec2RoleCredRespBody{}, errors.New(resp.Status)
}
respCreds := ec2RoleCredRespBody{}
if err := json.NewDecoder(resp.Body).Decode(&respCreds); err != nil {
return ec2RoleCredRespBody{}, err
}
if respCreds.Code != "Success" {
// If an error code was returned something failed requesting the role.
return ec2RoleCredRespBody{}, errors.New(respCreds.Message)
}
return respCreds, nil
}

View File

@@ -0,0 +1,76 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package credentials
import "strings"
// SignatureType is type of Authorization requested for a given HTTP request.
type SignatureType int
// Different types of supported signatures - default is SignatureV4 or SignatureDefault.
const (
// SignatureDefault is always set to v4.
SignatureDefault SignatureType = iota
SignatureV4
SignatureV2
SignatureV4Streaming
SignatureAnonymous // Anonymous signature signifies, no signature.
)
// IsV2 - is signature SignatureV2?
func (s SignatureType) IsV2() bool {
return s == SignatureV2
}
// IsV4 - is signature SignatureV4?
func (s SignatureType) IsV4() bool {
return s == SignatureV4 || s == SignatureDefault
}
// IsStreamingV4 - is signature SignatureV4Streaming?
func (s SignatureType) IsStreamingV4() bool {
return s == SignatureV4Streaming
}
// IsAnonymous - is signature empty?
func (s SignatureType) IsAnonymous() bool {
return s == SignatureAnonymous
}
// Stringer humanized version of signature type,
// strings returned here are case insensitive.
func (s SignatureType) String() string {
if s.IsV2() {
return "S3v2"
} else if s.IsV4() {
return "S3v4"
} else if s.IsStreamingV4() {
return "S3v4Streaming"
}
return "Anonymous"
}
func parseSignatureType(str string) SignatureType {
if strings.EqualFold(str, "S3v4") {
return SignatureV4
} else if strings.EqualFold(str, "S3v2") {
return SignatureV2
} else if strings.EqualFold(str, "S3v4Streaming") {
return SignatureV4Streaming
}
return SignatureAnonymous
}

View File

@@ -0,0 +1,67 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage
* (C) 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package credentials
// A Static is a set of credentials which are set programmatically,
// and will never expire.
type Static struct {
Value
}
// NewStaticV2 returns a pointer to a new Credentials object
// wrapping a static credentials value provider, signature is
// set to v2. If access and secret are not specified then
// regardless of signature type set it Value will return
// as anonymous.
func NewStaticV2(id, secret, token string) *Credentials {
return NewStatic(id, secret, token, SignatureV2)
}
// NewStaticV4 is similar to NewStaticV2 with similar considerations.
func NewStaticV4(id, secret, token string) *Credentials {
return NewStatic(id, secret, token, SignatureV4)
}
// NewStatic returns a pointer to a new Credentials object
// wrapping a static credentials value provider.
func NewStatic(id, secret, token string, signerType SignatureType) *Credentials {
return New(&Static{
Value: Value{
AccessKeyID: id,
SecretAccessKey: secret,
SessionToken: token,
SignerType: signerType,
},
})
}
// Retrieve returns the static credentials.
func (s *Static) Retrieve() (Value, error) {
if s.AccessKeyID == "" || s.SecretAccessKey == "" {
// Anonymous is not an error
return Value{SignerType: SignatureAnonymous}, nil
}
return s.Value, nil
}
// IsExpired returns if the credentials are expired.
//
// For Static, the credentials never expired.
func (s *Static) IsExpired() bool {
return false
}

View File

@@ -89,6 +89,15 @@ func NewCBCSecureMaterials(key Key) (*CBCSecureMaterials, error) {
}
// Close implements closes the internal stream.
func (s *CBCSecureMaterials) Close() error {
closer, ok := s.stream.(io.Closer)
if ok {
return closer.Close()
}
return nil
}
// SetupEncryptMode - tells CBC that we are going to encrypt data
func (s *CBCSecureMaterials) SetupEncryptMode(stream io.Reader) error {
// Set mode to encrypt

View File

@@ -25,6 +25,9 @@ import "io"
// Materials - provides generic interface to encrypt any stream of data.
type Materials interface {
// Closes the wrapped stream properly, initiated by the caller.
Close() error
// Returns encrypted/decrypted data, io.Reader compatible.
Read(b []byte) (int, error)

View File

@@ -583,7 +583,7 @@ func GetPolicies(statements []Statement, bucketName string) map[string]BucketPol
r = r[:len(r)-1]
asterisk = "*"
}
objectPath := r[len(awsResourcePrefix+bucketName)+1 : len(r)]
objectPath := r[len(awsResourcePrefix+bucketName)+1:]
p := GetPolicy(statements, bucketName, objectPath)
policyRules[bucketName+"/"+objectPath+asterisk] = p
}

View File

@@ -92,9 +92,12 @@ func buildChunkStringToSign(t time.Time, region, previousSig string, chunkData [
// prepareStreamingRequest - prepares a request with appropriate
// headers before computing the seed signature.
func prepareStreamingRequest(req *http.Request, dataLen int64, timestamp time.Time) {
func prepareStreamingRequest(req *http.Request, sessionToken string, dataLen int64, timestamp time.Time) {
// Set x-amz-content-sha256 header.
req.Header.Set("X-Amz-Content-Sha256", streamingSignAlgorithm)
if sessionToken != "" {
req.Header.Set("X-Amz-Security-Token", sessionToken)
}
req.Header.Set("Content-Encoding", streamingEncoding)
req.Header.Set("X-Amz-Date", timestamp.Format(iso8601DateFormat))
@@ -138,6 +141,7 @@ func (s *StreamingReader) setSeedSignature(req *http.Request) {
type StreamingReader struct {
accessKeyID string
secretAccessKey string
sessionToken string
region string
prevSignature string
seedSignature string
@@ -195,16 +199,17 @@ func (s *StreamingReader) setStreamingAuthHeader(req *http.Request) {
// StreamingSignV4 - provides chunked upload signatureV4 support by
// implementing io.Reader.
func StreamingSignV4(req *http.Request, accessKeyID, secretAccessKey,
func StreamingSignV4(req *http.Request, accessKeyID, secretAccessKey, sessionToken,
region string, dataLen int64, reqTime time.Time) *http.Request {
// Set headers needed for streaming signature.
prepareStreamingRequest(req, dataLen, reqTime)
prepareStreamingRequest(req, sessionToken, dataLen, reqTime)
stReader := &StreamingReader{
baseReadCloser: req.Body,
accessKeyID: accessKeyID,
secretAccessKey: secretAccessKey,
sessionToken: sessionToken,
region: region,
reqTime: reqTime,
chunkBuf: make([]byte, payloadChunkSize),

View File

@@ -206,7 +206,7 @@ func getStringToSignV4(t time.Time, location, canonicalRequest string) string {
// PreSignV4 presign the request, in accordance with
// http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html.
func PreSignV4(req http.Request, accessKeyID, secretAccessKey, location string, expires int64) *http.Request {
func PreSignV4(req http.Request, accessKeyID, secretAccessKey, sessionToken, location string, expires int64) *http.Request {
// Presign is not needed for anonymous credentials.
if accessKeyID == "" || secretAccessKey == "" {
return &req
@@ -228,6 +228,10 @@ func PreSignV4(req http.Request, accessKeyID, secretAccessKey, location string,
query.Set("X-Amz-Expires", strconv.FormatInt(expires, 10))
query.Set("X-Amz-SignedHeaders", signedHeaders)
query.Set("X-Amz-Credential", credential)
// Set session token if available.
if sessionToken != "" {
query.Set("X-Amz-Security-Token", sessionToken)
}
req.URL.RawQuery = query.Encode()
// Get canonical request.
@@ -260,7 +264,7 @@ func PostPresignSignatureV4(policyBase64 string, t time.Time, secretAccessKey, l
// SignV4 sign the request before Do(), in accordance with
// http://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html.
func SignV4(req http.Request, accessKeyID, secretAccessKey, location string) *http.Request {
func SignV4(req http.Request, accessKeyID, secretAccessKey, sessionToken, location string) *http.Request {
// Signature calculation is not needed for anonymous credentials.
if accessKeyID == "" || secretAccessKey == "" {
return &req
@@ -272,6 +276,11 @@ func SignV4(req http.Request, accessKeyID, secretAccessKey, location string) *ht
// Set x-amz-date.
req.Header.Set("X-Amz-Date", t.Format(iso8601DateFormat))
// Set session token if available.
if sessionToken != "" {
req.Header.Set("X-Amz-Security-Token", sessionToken)
}
// Get canonical request.
canonicalRequest := getCanonicalRequest(req, v4IgnoredHeaders)

View File

@@ -19,6 +19,7 @@ package s3utils
import (
"bytes"
"encoding/hex"
"errors"
"net"
"net/url"
"regexp"
@@ -84,10 +85,29 @@ func IsAmazonEndpoint(endpointURL url.URL) bool {
if IsAmazonChinaEndpoint(endpointURL) {
return true
}
if IsAmazonGovCloudEndpoint(endpointURL) {
return true
}
return endpointURL.Host == "s3.amazonaws.com"
}
// IsAmazonGovCloudEndpoint - Match if it is exactly Amazon S3 GovCloud endpoint.
func IsAmazonGovCloudEndpoint(endpointURL url.URL) bool {
if endpointURL == sentinelURL {
return false
}
return (endpointURL.Host == "s3-us-gov-west-1.amazonaws.com" ||
IsAmazonFIPSGovCloudEndpoint(endpointURL))
}
// IsAmazonFIPSGovCloudEndpoint - Match if it is exactly Amazon S3 FIPS GovCloud endpoint.
func IsAmazonFIPSGovCloudEndpoint(endpointURL url.URL) bool {
if endpointURL == sentinelURL {
return false
}
return endpointURL.Host == "s3-fips-us-gov-west-1.amazonaws.com"
}
// IsAmazonChinaEndpoint - Match if it is exactly Amazon S3 China endpoint.
// Customers who wish to use the new Beijing Region are required
// to sign up for a separate set of account credentials unique to
@@ -181,3 +201,74 @@ func EncodePath(pathName string) string {
}
return encodedPathname
}
// We support '.' with bucket names but we fallback to using path
// style requests instead for such buckets.
var (
validBucketName = regexp.MustCompile(`^[A-Za-z0-9][A-Za-z0-9\.\-]{1,61}[A-Za-z0-9]$`)
validBucketNameStrict = regexp.MustCompile(`^[a-z0-9][a-z0-9\.\-]{1,61}[a-z0-9]$`)
ipAddress = regexp.MustCompile(`^(\d+\.){3}\d+$`)
)
// Common checker for both stricter and basic validation.
func checkBucketNameCommon(bucketName string, strict bool) (err error) {
if strings.TrimSpace(bucketName) == "" {
return errors.New("Bucket name cannot be empty")
}
if len(bucketName) < 3 {
return errors.New("Bucket name cannot be smaller than 3 characters")
}
if len(bucketName) > 63 {
return errors.New("Bucket name cannot be greater than 63 characters")
}
if ipAddress.MatchString(bucketName) {
return errors.New("Bucket name cannot be an ip address")
}
if strings.Contains(bucketName, "..") {
return errors.New("Bucket name contains invalid characters")
}
if strict {
if !validBucketNameStrict.MatchString(bucketName) {
err = errors.New("Bucket name contains invalid characters")
}
return err
}
if !validBucketName.MatchString(bucketName) {
err = errors.New("Bucket name contains invalid characters")
}
return err
}
// CheckValidBucketName - checks if we have a valid input bucket name.
// This is a non stricter version.
// - http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html
func CheckValidBucketName(bucketName string) (err error) {
return checkBucketNameCommon(bucketName, false)
}
// CheckValidBucketNameStrict - checks if we have a valid input bucket name.
// This is a stricter version.
func CheckValidBucketNameStrict(bucketName string) (err error) {
return checkBucketNameCommon(bucketName, true)
}
// CheckValidObjectNamePrefix - checks if we have a valid input object name prefix.
// - http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html
func CheckValidObjectNamePrefix(objectName string) error {
if len(objectName) > 1024 {
return errors.New("Object name cannot be greater than 1024 characters")
}
if !utf8.ValidString(objectName) {
return errors.New("Object name with non UTF-8 strings are not supported")
}
return nil
}
// CheckValidObjectName - checks if we have a valid input object name.
// - http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html
func CheckValidObjectName(objectName string) error {
if strings.TrimSpace(objectName) == "" {
return errors.New("Object name cannot be empty")
}
return CheckValidObjectNamePrefix(objectName)
}

View File

@@ -48,7 +48,7 @@ func (c RequestHeaders) SetMatchETag(etag string) error {
if etag == "" {
return ErrInvalidArgument("ETag cannot be empty.")
}
c.Set("If-Match", etag)
c.Set("If-Match", "\""+etag+"\"")
return nil
}
@@ -57,7 +57,7 @@ func (c RequestHeaders) SetMatchETagExcept(etag string) error {
if etag == "" {
return ErrInvalidArgument("ETag cannot be empty.")
}
c.Set("If-None-Match", etag)
c.Set("If-None-Match", "\""+etag+"\"")
return nil
}

View File

@@ -33,6 +33,7 @@ var awsS3EndpointMap = map[string]string{
"ap-northeast-1": "s3-ap-northeast-1.amazonaws.com",
"ap-northeast-2": "s3-ap-northeast-2.amazonaws.com",
"sa-east-1": "s3-sa-east-1.amazonaws.com",
"us-gov-west-1": "s3-us-gov-west-1.amazonaws.com",
"cn-north-1": "s3.cn-north-1.amazonaws.com.cn",
}

View File

@@ -25,7 +25,7 @@ var s3ErrorResponseMap = map[string]string{
"EntityTooLarge": "Your proposed upload exceeds the maximum allowed object size.",
"IncompleteBody": "You did not provide the number of bytes specified by the Content-Length HTTP header.",
"InternalError": "We encountered an internal error, please try again.",
"InvalidAccessKeyID": "The access key ID you provided does not exist in our records.",
"InvalidAccessKeyId": "The access key ID you provided does not exist in our records.",
"InvalidBucketName": "The specified bucket is not valid.",
"InvalidDigest": "The Content-Md5 you specified is not valid.",
"InvalidRange": "The requested range is not satisfiable",

View File

@@ -1,45 +0,0 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package minio
// SignatureType is type of Authorization requested for a given HTTP request.
type SignatureType int
// Different types of supported signatures - default is Latest i.e SignatureV4.
const (
Latest SignatureType = iota
SignatureV4
SignatureV2
SignatureV4Streaming
)
var emptySHA256 = sum256(nil)
// isV2 - is signature SignatureV2?
func (s SignatureType) isV2() bool {
return s == SignatureV2
}
// isV4 - is signature SignatureV4?
func (s SignatureType) isV4() bool {
return s == SignatureV4 || s == Latest
}
// isStreamingV4 - is signature SignatureV4Streaming?
func (s SignatureType) isStreamingV4() bool {
return s == SignatureV4Streaming
}

View File

@@ -28,7 +28,6 @@ import (
"regexp"
"strings"
"time"
"unicode/utf8"
"github.com/minio/minio-go/pkg/s3utils"
)
@@ -110,6 +109,8 @@ func closeResponse(resp *http.Response) {
}
}
var emptySHA256 = sum256(nil)
// Sentinel URL is the default url value which is invalid.
var sentinelURL = url.URL{}
@@ -146,63 +147,6 @@ func isValidExpiry(expires time.Duration) error {
return nil
}
// We support '.' with bucket names but we fallback to using path
// style requests instead for such buckets.
var validBucketName = regexp.MustCompile(`^[a-z0-9][a-z0-9\.\-]{1,61}[a-z0-9]$`)
// Invalid bucket name with double dot.
var invalidDotBucketName = regexp.MustCompile(`\.\.`)
// isValidBucketName - verify bucket name in accordance with
// - http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html
func isValidBucketName(bucketName string) error {
if strings.TrimSpace(bucketName) == "" {
return ErrInvalidBucketName("Bucket name cannot be empty.")
}
if len(bucketName) < 3 {
return ErrInvalidBucketName("Bucket name cannot be smaller than 3 characters.")
}
if len(bucketName) > 63 {
return ErrInvalidBucketName("Bucket name cannot be greater than 63 characters.")
}
if bucketName[0] == '.' || bucketName[len(bucketName)-1] == '.' {
return ErrInvalidBucketName("Bucket name cannot start or end with a '.' dot.")
}
if invalidDotBucketName.MatchString(bucketName) {
return ErrInvalidBucketName("Bucket name cannot have successive periods.")
}
if !validBucketName.MatchString(bucketName) {
return ErrInvalidBucketName("Bucket name contains invalid characters.")
}
return nil
}
// isValidObjectName - verify object name in accordance with
// - http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html
func isValidObjectName(objectName string) error {
if strings.TrimSpace(objectName) == "" {
return ErrInvalidObjectName("Object name cannot be empty.")
}
if len(objectName) > 1024 {
return ErrInvalidObjectName("Object name cannot be greater than 1024 characters.")
}
if !utf8.ValidString(objectName) {
return ErrInvalidBucketName("Object name with non UTF-8 strings are not supported.")
}
return nil
}
// isValidObjectPrefix - verify if object prefix is valid.
func isValidObjectPrefix(objectPrefix string) error {
if len(objectPrefix) > 1024 {
return ErrInvalidObjectPrefix("Object prefix cannot be greater than 1024 characters.")
}
if !utf8.ValidString(objectPrefix) {
return ErrInvalidObjectPrefix("Object prefix with non UTF-8 strings are not supported.")
}
return nil
}
// make a copy of http.Header
func cloneHeader(h http.Header) http.Header {
h2 := make(http.Header, len(h))
@@ -225,3 +169,26 @@ func filterHeader(header http.Header, filterKeys []string) (filteredHeader http.
}
return filteredHeader
}
// regCred matches credential string in HTTP header
var regCred = regexp.MustCompile("Credential=([A-Z0-9]+)/")
// regCred matches signature string in HTTP header
var regSign = regexp.MustCompile("Signature=([[0-9a-f]+)")
// Redact out signature value from authorization string.
func redactSignature(origAuth string) string {
if !strings.HasPrefix(origAuth, signV4Algorithm) {
// Set a temporary redacted auth
return "AWS **REDACTED**:**REDACTED**"
}
/// Signature V4 authorization header.
// Strip out accessKeyID from:
// Credential=<access-key-id>/<date>/<aws-region>/<aws-service>/aws4_request
newAuth := regCred.ReplaceAllString(origAuth, "Credential=**REDACTED**/")
// Strip out 256-bit signature from: Signature=<256-bit signature>
return regSign.ReplaceAllString(newAuth, "Signature=**REDACTED**")
}