Add ObjectOptions to ObjectLayer calls (#6382)

This commit is contained in:
poornas
2018-09-10 09:42:43 -07:00
committed by kannappanr
parent 30d4a2cf53
commit 5c0b98abf0
62 changed files with 812 additions and 7981 deletions

View File

@@ -139,7 +139,7 @@ The full API Reference is available here.
### API Reference : File Object Operations
* [`FPutObject`](https://docs.minio.io/docs/golang-client-api-reference#FPutObject)
* [`FGetObject`](https://docs.minio.io/docs/golang-client-api-reference#FPutObject)
* [`FGetObject`](https://docs.minio.io/docs/golang-client-api-reference#FGetObject)
* [`FPutObjectWithContext`](https://docs.minio.io/docs/golang-client-api-reference#FPutObjectWithContext)
* [`FGetObjectWithContext`](https://docs.minio.io/docs/golang-client-api-reference#FGetObjectWithContext)
@@ -183,6 +183,10 @@ The full API Reference is available here.
* [getbucketpolicy.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketpolicy.go)
* [listbucketpolicies.go](https://github.com/minio/minio-go/blob/master/examples/s3/listbucketpolicies.go)
### Full Examples : Bucket lifecycle Operations
* [setbucketlifecycle.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketlifecycle.go)
* [getbucketlifecycle.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketlifecycle.go)
### Full Examples : Bucket notification Operations
* [setbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketnotification.go)
* [getbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketnotification.go)

View File

@@ -101,7 +101,11 @@ func (d *DestinationInfo) getUserMetaHeadersMap(withCopyDirectiveHeader bool) ma
r["x-amz-metadata-directive"] = "REPLACE"
}
for k, v := range d.userMetadata {
r["x-amz-meta-"+k] = v
if isAmzHeader(k) || isStandardHeader(k) || isStorageClassHeader(k) {
r[k] = v
} else {
r["x-amz-meta-"+k] = v
}
}
return r
}
@@ -373,15 +377,6 @@ func (c Client) ComposeObjectWithProgress(dst DestinationInfo, srcs []SourceInfo
fmt.Sprintf("Client side encryption is used in source object %s/%s", src.bucket, src.object))
}
// Since we did a HEAD to get size, we use the ETag
// value to make sure the object has not changed by
// the time we perform the copy. This is done, only if
// the user has not set their own ETag match
// condition.
if src.Headers.Get("x-amz-copy-source-if-match") == "" {
src.SetMatchETagCond(etag)
}
// Check if a segment is specified, and if so, is the
// segment within object bounds?
if src.start != -1 {
@@ -429,7 +424,15 @@ func (c Client) ComposeObjectWithProgress(dst DestinationInfo, srcs []SourceInfo
// Now, handle multipart-copy cases.
// 1. Initiate a new multipart upload.
// 1. Ensure that the object has not been changed while
// we are copying data.
for _, src := range srcs {
if src.Headers.Get("x-amz-copy-source-if-match") == "" {
src.SetMatchETagCond(etag)
}
}
// 2. Initiate a new multipart upload.
// Set user-metadata on the destination object. If no
// user-metadata is specified, and there is only one source,
@@ -449,13 +452,13 @@ func (c Client) ComposeObjectWithProgress(dst DestinationInfo, srcs []SourceInfo
return err
}
// 2. Perform copy part uploads
// 3. Perform copy part uploads
objParts := []CompletePart{}
partIndex := 1
for i, src := range srcs {
h := src.Headers
if src.encryption != nil {
src.encryption.Marshal(h)
encrypt.SSECopy(src.encryption).Marshal(h)
}
// Add destination encryption headers
if dst.encryption != nil {
@@ -480,14 +483,14 @@ func (c Client) ComposeObjectWithProgress(dst DestinationInfo, srcs []SourceInfo
return err
}
if progress != nil {
io.CopyN(ioutil.Discard, progress, start+end-1)
io.CopyN(ioutil.Discard, progress, end-start+1)
}
objParts = append(objParts, complPart)
partIndex++
}
}
// 3. Make final complete-multipart request.
// 4. Make final complete-multipart request.
_, err = c.completeMultipartUpload(ctx, dst.bucket, dst.object, uploadID,
completeMultipartUpload{Parts: objParts})
if err != nil {

77
vendor/github.com/minio/minio-go/api-get-lifecycle.go generated vendored Normal file
View File

@@ -0,0 +1,77 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package minio
import (
"context"
"io/ioutil"
"net/http"
"net/url"
"github.com/minio/minio-go/pkg/s3utils"
)
// GetBucketLifecycle - get bucket lifecycle.
func (c Client) GetBucketLifecycle(bucketName string) (string, error) {
// Input validation.
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return "", err
}
bucketLifecycle, err := c.getBucketLifecycle(bucketName)
if err != nil {
errResponse := ToErrorResponse(err)
if errResponse.Code == "NoSuchLifecycleConfiguration" {
return "", nil
}
return "", err
}
return bucketLifecycle, nil
}
// Request server for current bucket lifecycle.
func (c Client) getBucketLifecycle(bucketName string) (string, error) {
// Get resources properly escaped and lined up before
// using them in http request.
urlValues := make(url.Values)
urlValues.Set("lifecycle", "")
// Execute GET on bucket to get lifecycle.
resp, err := c.executeMethod(context.Background(), "GET", requestMetadata{
bucketName: bucketName,
queryValues: urlValues,
})
defer closeResponse(resp)
if err != nil {
return "", err
}
if resp != nil {
if resp.StatusCode != http.StatusOK {
return "", httpRespToErrorResponse(resp, bucketName, "")
}
}
bucketLifecycleBuf, err := ioutil.ReadAll(resp.Body)
if err != nil {
return "", err
}
lifecycle := string(bucketLifecycleBuf)
return lifecycle, err
}

136
vendor/github.com/minio/minio-go/api-get-object-acl.go generated vendored Normal file
View File

@@ -0,0 +1,136 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2018 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package minio
import (
"context"
"net/http"
"net/url"
)
type accessControlPolicy struct {
Owner struct {
ID string `xml:"ID"`
DisplayName string `xml:"DisplayName"`
} `xml:"Owner"`
AccessControlList struct {
Grant []struct {
Grantee struct {
ID string `xml:"ID"`
DisplayName string `xml:"DisplayName"`
URI string `xml:"URI"`
} `xml:"Grantee"`
Permission string `xml:"Permission"`
} `xml:"Grant"`
} `xml:"AccessControlList"`
}
//GetObjectACL get object ACLs
func (c Client) GetObjectACL(bucketName, objectName string) (*ObjectInfo, error) {
resp, err := c.executeMethod(context.Background(), "GET", requestMetadata{
bucketName: bucketName,
objectName: objectName,
queryValues: url.Values{
"acl": []string{""},
},
})
if err != nil {
return nil, err
}
defer closeResponse(resp)
if resp.StatusCode != http.StatusOK {
return nil, httpRespToErrorResponse(resp, bucketName, objectName)
}
res := &accessControlPolicy{}
if err := xmlDecoder(resp.Body, res); err != nil {
return nil, err
}
objInfo, err := c.statObject(context.Background(), bucketName, objectName, StatObjectOptions{})
if err != nil {
return nil, err
}
cannedACL := getCannedACL(res)
if cannedACL != "" {
objInfo.Metadata.Add("X-Amz-Acl", cannedACL)
return &objInfo, nil
}
grantACL := getAmzGrantACL(res)
for k, v := range grantACL {
objInfo.Metadata[k] = v
}
return &objInfo, nil
}
func getCannedACL(aCPolicy *accessControlPolicy) string {
grants := aCPolicy.AccessControlList.Grant
switch {
case len(grants) == 1:
if grants[0].Grantee.URI == "" && grants[0].Permission == "FULL_CONTROL" {
return "private"
}
case len(grants) == 2:
for _, g := range grants {
if g.Grantee.URI == "http://acs.amazonaws.com/groups/global/AuthenticatedUsers" && g.Permission == "READ" {
return "authenticated-read"
}
if g.Grantee.URI == "http://acs.amazonaws.com/groups/global/AllUsers" && g.Permission == "READ" {
return "public-read"
}
if g.Permission == "READ" && g.Grantee.ID == aCPolicy.Owner.ID {
return "bucket-owner-read"
}
}
case len(grants) == 3:
for _, g := range grants {
if g.Grantee.URI == "http://acs.amazonaws.com/groups/global/AllUsers" && g.Permission == "WRITE" {
return "public-read-write"
}
}
}
return ""
}
func getAmzGrantACL(aCPolicy *accessControlPolicy) map[string][]string {
grants := aCPolicy.AccessControlList.Grant
res := map[string][]string{}
for _, g := range grants {
switch {
case g.Permission == "READ":
res["X-Amz-Grant-Read"] = append(res["X-Amz-Grant-Read"], "id="+g.Grantee.ID)
case g.Permission == "WRITE":
res["X-Amz-Grant-Write"] = append(res["X-Amz-Grant-Write"], "id="+g.Grantee.ID)
case g.Permission == "READ_ACP":
res["X-Amz-Grant-Read-Acp"] = append(res["X-Amz-Grant-Read-Acp"], "id="+g.Grantee.ID)
case g.Permission == "WRITE_ACP":
res["X-Amz-Grant-Write-Acp"] = append(res["X-Amz-Grant-Write-Acp"], "id="+g.Grantee.ID)
case g.Permission == "FULL_CONTROL":
res["X-Amz-Grant-Full-Control"] = append(res["X-Amz-Grant-Full-Control"], "id="+g.Grantee.ID)
}
}
return res
}

View File

@@ -44,7 +44,7 @@ func (o GetObjectOptions) Header() http.Header {
for k, v := range o.headers {
headers.Set(k, v)
}
if o.ServerSideEncryption != nil && o.ServerSideEncryption.Type() != encrypt.S3 {
if o.ServerSideEncryption != nil && o.ServerSideEncryption.Type() == encrypt.SSEC {
o.ServerSideEncryption.Marshal(headers)
}
return headers

View File

@@ -633,30 +633,27 @@ func (c Client) listObjectParts(bucketName, objectName, uploadID string) (partsI
return partsInfo, nil
}
// findUploadID lists all incomplete uploads and finds the uploadID of the matching object name.
func (c Client) findUploadID(bucketName, objectName string) (uploadID string, err error) {
// findUploadIDs lists all incomplete uploads and find the uploadIDs of the matching object name.
func (c Client) findUploadIDs(bucketName, objectName string) ([]string, error) {
var uploadIDs []string
// Make list incomplete uploads recursive.
isRecursive := true
// Turn off size aggregation of individual parts, in this request.
isAggregateSize := false
// latestUpload to track the latest multipart info for objectName.
var latestUpload ObjectMultipartInfo
// Create done channel to cleanup the routine.
doneCh := make(chan struct{})
defer close(doneCh)
// List all incomplete uploads.
for mpUpload := range c.listIncompleteUploads(bucketName, objectName, isRecursive, isAggregateSize, doneCh) {
if mpUpload.Err != nil {
return "", mpUpload.Err
return nil, mpUpload.Err
}
if objectName == mpUpload.Key {
if mpUpload.Initiated.Sub(latestUpload.Initiated) > 0 {
latestUpload = mpUpload
}
uploadIDs = append(uploadIDs, mpUpload.UploadID)
}
}
// Return the latest upload id.
return latestUpload.UploadID, nil
return uploadIDs, nil
}
// getTotalMultipartSize - calculate total uploaded size for the a given multipart object.

View File

@@ -178,6 +178,87 @@ func (c Client) removeBucketPolicy(bucketName string) error {
return nil
}
// SetBucketLifecycle set the lifecycle on an existing bucket.
func (c Client) SetBucketLifecycle(bucketName, lifecycle string) error {
// Input validation.
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return err
}
// If lifecycle is empty then delete it.
if lifecycle == "" {
return c.removeBucketLifecycle(bucketName)
}
// Save the updated lifecycle.
return c.putBucketLifecycle(bucketName, lifecycle)
}
// Saves a new bucket lifecycle.
func (c Client) putBucketLifecycle(bucketName, lifecycle string) error {
// Input validation.
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return err
}
// Get resources properly escaped and lined up before
// using them in http request.
urlValues := make(url.Values)
urlValues.Set("lifecycle", "")
// Content-length is mandatory for put lifecycle request
lifecycleReader := strings.NewReader(lifecycle)
b, err := ioutil.ReadAll(lifecycleReader)
if err != nil {
return err
}
reqMetadata := requestMetadata{
bucketName: bucketName,
queryValues: urlValues,
contentBody: lifecycleReader,
contentLength: int64(len(b)),
contentMD5Base64: sumMD5Base64(b),
}
// Execute PUT to upload a new bucket lifecycle.
resp, err := c.executeMethod(context.Background(), "PUT", reqMetadata)
defer closeResponse(resp)
if err != nil {
return err
}
if resp != nil {
if resp.StatusCode != http.StatusOK {
return httpRespToErrorResponse(resp, bucketName, "")
}
}
return nil
}
// Remove lifecycle from a bucket.
func (c Client) removeBucketLifecycle(bucketName string) error {
// Input validation.
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return err
}
// Get resources properly escaped and lined up before
// using them in http request.
urlValues := make(url.Values)
urlValues.Set("lifecycle", "")
// Execute DELETE on objectName.
resp, err := c.executeMethod(context.Background(), "DELETE", requestMetadata{
bucketName: bucketName,
queryValues: urlValues,
contentSHA256Hex: emptySHA256Hex,
})
defer closeResponse(resp)
if err != nil {
return err
}
return nil
}
// SetBucketNotification saves a new bucket notification.
func (c Client) SetBucketNotification(bucketName string, bucketNotification BucketNotification) error {
// Input validation.

View File

@@ -259,7 +259,7 @@ func (c Client) uploadPart(ctx context.Context, bucketName, objectName, uploadID
// Set encryption headers, if any.
customHeader := make(http.Header)
if sse != nil && sse.Type() != encrypt.S3 && sse.Type() != encrypt.KMS {
if sse != nil {
sse.Marshal(customHeader)
}

View File

@@ -233,18 +233,20 @@ func (c Client) RemoveIncompleteUpload(bucketName, objectName string) error {
if err := s3utils.CheckValidObjectName(objectName); err != nil {
return err
}
// Find multipart upload id of the object to be aborted.
uploadID, err := c.findUploadID(bucketName, objectName)
// Find multipart upload ids of the object to be aborted.
uploadIDs, err := c.findUploadIDs(bucketName, objectName)
if err != nil {
return err
}
if uploadID != "" {
// Upload id found, abort the incomplete multipart upload.
for _, uploadID := range uploadIDs {
// abort incomplete multipart upload, based on the upload id passed.
err := c.abortMultipartUpload(context.Background(), bucketName, objectName, uploadID)
if err != nil {
return err
}
}
return nil
}

View File

@@ -99,7 +99,7 @@ type Options struct {
// Global constants.
const (
libraryName = "minio-go"
libraryVersion = "v6.0.3"
libraryVersion = "v6.0.6"
)
// User Agent should always following the below style.
@@ -455,22 +455,9 @@ func (c Client) dumpHTTP(req *http.Request, resp *http.Response) error {
return err
}
} else {
// WORKAROUND for https://github.com/golang/go/issues/13942.
// httputil.DumpResponse does not print response headers for
// all successful calls which have response ContentLength set
// to zero. Keep this workaround until the above bug is fixed.
if resp.ContentLength == 0 {
var buffer bytes.Buffer
if err = resp.Header.Write(&buffer); err != nil {
return err
}
respTrace = buffer.Bytes()
respTrace = append(respTrace, []byte("\r\n")...)
} else {
respTrace, err = httputil.DumpResponse(resp, false)
if err != nil {
return err
}
respTrace, err = httputil.DumpResponse(resp, false)
if err != nil {
return err
}
}
@@ -599,8 +586,8 @@ func (c Client) executeMethod(ctx context.Context, method string, metadata reque
// Initiate the request.
res, err = c.do(req)
if err != nil {
// For supported network errors verify.
if isNetErrorRetryable(err) {
// For supported http requests errors verify.
if isHTTPReqErrorRetryable(err) {
continue // Retry.
}
// For other errors, return here no need to retry.

View File

@@ -21,6 +21,8 @@ import (
"context"
"io"
"strings"
"github.com/minio/minio-go/pkg/encrypt"
)
// Core - Inherits Client and adds new methods to expose the low level S3 APIs.
@@ -68,7 +70,7 @@ func (c Core) CopyObjectPart(srcBucket, srcObject, destBucket, destObject string
}
// PutObject - Upload object. Uploads using single PUT call.
func (c Core) PutObject(bucket, object string, data io.Reader, size int64, md5Base64, sha256Hex string, metadata map[string]string) (ObjectInfo, error) {
func (c Core) PutObject(bucket, object string, data io.Reader, size int64, md5Base64, sha256Hex string, metadata map[string]string, sse encrypt.ServerSide) (ObjectInfo, error) {
opts := PutObjectOptions{}
m := make(map[string]string)
for k, v := range metadata {
@@ -89,6 +91,7 @@ func (c Core) PutObject(bucket, object string, data io.Reader, size int64, md5Ba
}
}
opts.UserMetadata = m
opts.ServerSideEncryption = sse
return c.putObjectDo(context.Background(), bucket, object, data, md5Base64, sha256Hex, size, opts)
}
@@ -104,8 +107,8 @@ func (c Core) ListMultipartUploads(bucket, prefix, keyMarker, uploadIDMarker, de
}
// PutObjectPart - Upload an object part.
func (c Core) PutObjectPart(bucket, object, uploadID string, partID int, data io.Reader, size int64, md5Base64, sha256Hex string) (ObjectPart, error) {
return c.uploadPart(context.Background(), bucket, object, uploadID, data, partID, md5Base64, sha256Hex, size, nil)
func (c Core) PutObjectPart(bucket, object, uploadID string, partID int, data io.Reader, size int64, md5Base64, sha256Hex string, sse encrypt.ServerSide) (ObjectPart, error) {
return c.uploadPart(context.Background(), bucket, object, uploadID, data, partID, md5Base64, sha256Hex, size, sse)
}
// ListObjectParts - List uploaded parts of an incomplete upload.x

File diff suppressed because it is too large Load Diff

View File

@@ -85,36 +85,32 @@ func (c Client) newRetryTimer(maxRetry int, unit time.Duration, cap time.Duratio
return attemptCh
}
// isNetErrorRetryable - is network error retryable.
func isNetErrorRetryable(err error) bool {
// isHTTPReqErrorRetryable - is http requests error retryable, such
// as i/o timeout, connection broken etc..
func isHTTPReqErrorRetryable(err error) bool {
if err == nil {
return false
}
switch err.(type) {
case net.Error:
switch err.(type) {
switch e := err.(type) {
case *url.Error:
switch e.Err.(type) {
case *net.DNSError, *net.OpError, net.UnknownNetworkError:
return true
case *url.Error:
// For a URL error, where it replies back "connection closed"
// retry again.
if strings.Contains(err.Error(), "Connection closed by foreign host") {
return true
}
default:
if strings.Contains(err.Error(), "net/http: TLS handshake timeout") {
// If error is - tlsHandshakeTimeoutError, retry.
return true
} else if strings.Contains(err.Error(), "i/o timeout") {
// If error is - tcp timeoutError, retry.
return true
} else if strings.Contains(err.Error(), "connection timed out") {
// If err is a net.Dial timeout, retry.
return true
} else if strings.Contains(err.Error(), "net/http: HTTP/1.x transport connection broken") {
// If error is transport connection broken, retry.
return true
}
}
if strings.Contains(err.Error(), "Connection closed by foreign host") {
return true
} else if strings.Contains(err.Error(), "net/http: TLS handshake timeout") {
// If error is - tlsHandshakeTimeoutError, retry.
return true
} else if strings.Contains(err.Error(), "i/o timeout") {
// If error is - tcp timeoutError, retry.
return true
} else if strings.Contains(err.Error(), "connection timed out") {
// If err is a net.Dial timeout, retry.
return true
} else if strings.Contains(err.Error(), "net/http: HTTP/1.x transport connection broken") {
// If error is transport connection broken, retry.
return true
}
}
return false

View File

@@ -223,6 +223,7 @@ var supportedHeaders = []string{
"content-disposition",
"content-language",
"x-amz-website-redirect-location",
"expires",
// Add more supported headers here.
}
@@ -267,5 +268,5 @@ func isSSEHeader(headerKey string) bool {
func isAmzHeader(headerKey string) bool {
key := strings.ToLower(headerKey)
return strings.HasPrefix(key, "x-amz-meta-") || key == "x-amz-acl"
return strings.HasPrefix(key, "x-amz-meta-") || strings.HasPrefix(key, "x-amz-grant-") || key == "x-amz-acl" || isSSEHeader(headerKey)
}

6
vendor/vendor.json vendored
View File

@@ -645,10 +645,10 @@
"revisionTime": "2016-02-29T08:42:30-08:00"
},
{
"checksumSHA1": "hdWmWbGpljSQMBOcpcwhAnP2aaQ=",
"checksumSHA1": "Wbe5TjRIOZiWVu4l4dwzCw/uP9w=",
"path": "github.com/minio/minio-go",
"revision": "10531abd0af1579a12dc1977d67c0fec2b348679",
"revisionTime": "2018-06-13T23:01:28Z"
"revision": "519049881e73150d1bbeac1d443e7c96b76e1b8d",
"revisionTime": "2018-09-05T00:47:51Z"
},
{
"checksumSHA1": "Qsj+6JPmJ8R5rFNQSHqRb8xAwOw=",