mirror of
https://github.com/minio/minio.git
synced 2024-12-24 06:05:55 -05:00
Unify gateway and object layer. (#5487)
* Unify gateway and object layer. Bring bucket policies into object layer.
This commit is contained in:
parent
a7f6e14370
commit
4f73fd9487
@ -40,37 +40,10 @@ func validateListObjectsArgs(prefix, marker, delimiter string, maxKeys int) APIE
|
||||
if delimiter != "" && delimiter != "/" {
|
||||
return ErrNotImplemented
|
||||
}
|
||||
// Marker is set validate pre-condition.
|
||||
if marker != "" {
|
||||
// Marker not common with prefix is not implemented.
|
||||
if !hasPrefix(marker, prefix) {
|
||||
return ErrInvalidPrefixMarker
|
||||
}
|
||||
}
|
||||
// Success.
|
||||
return ErrNone
|
||||
}
|
||||
|
||||
// Validate all the ListObjectsV2 query arguments, returns an APIErrorCode
|
||||
// if one of the args do not meet the required conditions.
|
||||
// Special conditions required by Minio server are as below
|
||||
// - delimiter if set should be equal to '/', otherwise the request is rejected.
|
||||
func validateGatewayListObjectsV2Args(prefix, marker, delimiter string, maxKeys int) APIErrorCode {
|
||||
// Max keys cannot be negative.
|
||||
if maxKeys < 0 {
|
||||
return ErrInvalidMaxKeys
|
||||
}
|
||||
|
||||
/// Minio special conditions for ListObjects.
|
||||
|
||||
// Verify if delimiter is anything other than '/', which we do not support.
|
||||
if delimiter != "" && delimiter != "/" {
|
||||
return ErrNotImplemented
|
||||
}
|
||||
|
||||
return ErrNone
|
||||
}
|
||||
|
||||
// ListObjectsV2Handler - GET Bucket (List Objects) Version 2.
|
||||
// --------------------------
|
||||
// This implementation of the GET operation returns some or all (up to 1000)
|
||||
@ -104,17 +77,10 @@ func (api objectAPIHandlers) ListObjectsV2Handler(w http.ResponseWriter, r *http
|
||||
// Then we need to use 'start-after' as marker instead.
|
||||
marker = startAfter
|
||||
}
|
||||
|
||||
// Validate the query params before beginning to serve the request.
|
||||
// fetch-owner is not validated since it is a boolean
|
||||
if s3Error := validateListObjectsArgs(prefix, marker, delimiter, maxKeys); s3Error != ErrNone {
|
||||
// return empty response if invalid marker
|
||||
//TODO: avoid this pattern when moving to errors package
|
||||
if s3Error == ErrInvalidPrefixMarker {
|
||||
listObjectsInfo := ListObjectsInfo{}
|
||||
response := generateListObjectsV2Response(bucket, prefix, token, marker, startAfter, delimiter, fetchOwner, listObjectsInfo.IsTruncated, maxKeys, listObjectsInfo.Objects, listObjectsInfo.Prefixes)
|
||||
writeSuccessResponseXML(w, encodeResponse(response))
|
||||
return
|
||||
}
|
||||
writeErrorResponse(w, s3Error, r.URL)
|
||||
return
|
||||
}
|
||||
@ -122,13 +88,13 @@ func (api objectAPIHandlers) ListObjectsV2Handler(w http.ResponseWriter, r *http
|
||||
// Inititate a list objects operation based on the input params.
|
||||
// On success would return back ListObjectsInfo object to be
|
||||
// marshalled into S3 compatible XML header.
|
||||
listObjectsInfo, err := objectAPI.ListObjects(bucket, prefix, marker, delimiter, maxKeys)
|
||||
listObjectsV2Info, err := objectAPI.ListObjectsV2(bucket, prefix, marker, delimiter, maxKeys, fetchOwner, startAfter)
|
||||
if err != nil {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
response := generateListObjectsV2Response(bucket, prefix, token, listObjectsInfo.NextMarker, startAfter, delimiter, fetchOwner, listObjectsInfo.IsTruncated, maxKeys, listObjectsInfo.Objects, listObjectsInfo.Prefixes)
|
||||
response := generateListObjectsV2Response(bucket, prefix, token, listObjectsV2Info.NextContinuationToken, startAfter, delimiter, fetchOwner, listObjectsV2Info.IsTruncated, maxKeys, listObjectsV2Info.Objects, listObjectsV2Info.Prefixes)
|
||||
|
||||
// Write success response.
|
||||
writeSuccessResponseXML(w, encodeResponse(response))
|
||||
@ -158,15 +124,13 @@ func (api objectAPIHandlers) ListObjectsV1Handler(w http.ResponseWriter, r *http
|
||||
// Extract all the litsObjectsV1 query params to their native values.
|
||||
prefix, marker, delimiter, maxKeys, _ := getListObjectsV1Args(r.URL.Query())
|
||||
|
||||
// Validate all the query params before beginning to serve the request.
|
||||
// Validate the maxKeys lowerbound. When maxKeys > 1000, S3 returns 1000 but
|
||||
// does not throw an error.
|
||||
if maxKeys < 0 {
|
||||
writeErrorResponse(w, ErrInvalidMaxKeys, r.URL)
|
||||
return
|
||||
} // Validate all the query params before beginning to serve the request.
|
||||
if s3Error := validateListObjectsArgs(prefix, marker, delimiter, maxKeys); s3Error != ErrNone {
|
||||
// return empty response if invalid marker
|
||||
//TODO: avoid this pattern when moving to errors package
|
||||
if s3Error == ErrInvalidPrefixMarker {
|
||||
response := generateListObjectsV1Response(bucket, prefix, marker, delimiter, maxKeys, ListObjectsInfo{})
|
||||
writeSuccessResponseXML(w, encodeResponse(response))
|
||||
return
|
||||
}
|
||||
writeErrorResponse(w, s3Error, r.URL)
|
||||
return
|
||||
}
|
||||
|
@ -40,7 +40,8 @@ import (
|
||||
// Enforces bucket policies for a bucket for a given tatusaction.
|
||||
func enforceBucketPolicy(bucket, action, resource, referer, sourceIP string, queryParams url.Values) (s3Error APIErrorCode) {
|
||||
// Verify if bucket actually exists
|
||||
if err := checkBucketExist(bucket, newObjectLayerFn()); err != nil {
|
||||
objAPI := newObjectLayerFn()
|
||||
if err := checkBucketExist(bucket, objAPI); err != nil {
|
||||
err = errors.Cause(err)
|
||||
switch err.(type) {
|
||||
case BucketNameInvalid:
|
||||
@ -55,12 +56,11 @@ func enforceBucketPolicy(bucket, action, resource, referer, sourceIP string, que
|
||||
return ErrInternalError
|
||||
}
|
||||
|
||||
if globalBucketPolicies == nil {
|
||||
// Fetch bucket policy, if policy is not set return access denied.
|
||||
p, err := objAPI.GetBucketPolicy(bucket)
|
||||
if err != nil {
|
||||
return ErrAccessDenied
|
||||
}
|
||||
|
||||
// Fetch bucket policy, if policy is not set return access denied.
|
||||
p := globalBucketPolicies.GetBucketPolicy(bucket)
|
||||
if reflect.DeepEqual(p, emptyBucketPolicy) {
|
||||
return ErrAccessDenied
|
||||
}
|
||||
@ -89,12 +89,12 @@ func enforceBucketPolicy(bucket, action, resource, referer, sourceIP string, que
|
||||
}
|
||||
|
||||
// Check if the action is allowed on the bucket/prefix.
|
||||
func isBucketActionAllowed(action, bucket, prefix string) bool {
|
||||
if globalBucketPolicies == nil {
|
||||
func isBucketActionAllowed(action, bucket, prefix string, objectAPI ObjectLayer) bool {
|
||||
|
||||
bp, err := objectAPI.GetBucketPolicy(bucket)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
bp := globalBucketPolicies.GetBucketPolicy(bucket)
|
||||
if reflect.DeepEqual(bp, emptyBucketPolicy) {
|
||||
return false
|
||||
}
|
||||
@ -625,13 +625,6 @@ func (api objectAPIHandlers) HeadBucketHandler(w http.ResponseWriter, r *http.Re
|
||||
return
|
||||
}
|
||||
|
||||
bucketLock := globalNSMutex.NewNSLock(bucket, "")
|
||||
if bucketLock.GetRLock(globalObjectTimeout) != nil {
|
||||
writeErrorResponseHeadersOnly(w, ErrOperationTimedOut)
|
||||
return
|
||||
}
|
||||
defer bucketLock.RUnlock()
|
||||
|
||||
if _, err := objectAPI.GetBucketInfo(bucket); err != nil {
|
||||
writeErrorResponseHeadersOnly(w, toAPIErrorCode(err))
|
||||
return
|
||||
@ -657,37 +650,12 @@ func (api objectAPIHandlers) DeleteBucketHandler(w http.ResponseWriter, r *http.
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
|
||||
bucketLock := globalNSMutex.NewNSLock(bucket, "")
|
||||
if bucketLock.GetLock(globalObjectTimeout) != nil {
|
||||
writeErrorResponse(w, ErrOperationTimedOut, r.URL)
|
||||
return
|
||||
}
|
||||
defer bucketLock.Unlock()
|
||||
|
||||
// Attempt to delete bucket.
|
||||
if err := objectAPI.DeleteBucket(bucket); err != nil {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Delete bucket access policy, if present - ignore any errors.
|
||||
_ = removeBucketPolicy(bucket, objectAPI)
|
||||
|
||||
// Notify all peers (including self) to update in-memory state
|
||||
S3PeersUpdateBucketPolicy(bucket, policyChange{true, policy.BucketAccessPolicy{}})
|
||||
|
||||
// Delete notification config, if present - ignore any errors.
|
||||
_ = removeNotificationConfig(bucket, objectAPI)
|
||||
|
||||
// Notify all peers (including self) to update in-memory state
|
||||
S3PeersUpdateBucketNotification(bucket, nil)
|
||||
|
||||
// Delete listener config, if present - ignore any errors.
|
||||
_ = removeListenerConfig(bucket, objectAPI)
|
||||
|
||||
// Notify all peers (including self) to update in-memory state
|
||||
S3PeersUpdateBucketListener(bucket, []listenerConfig{})
|
||||
|
||||
// Write success response.
|
||||
writeSuccessNoContent(w)
|
||||
}
|
||||
|
@ -155,7 +155,7 @@ func testGetBucketLocationHandler(obj ObjectLayer, instanceType, bucketName stri
|
||||
// ExecObjectLayerAPIAnonTest - Calls the HTTP API handler using the anonymous request, validates the ErrAccessDeniedResponse,
|
||||
// sets the bucket policy using the policy statement generated from `getReadOnlyBucketStatement` so that the
|
||||
// unsigned request goes through and its validated again.
|
||||
ExecObjectLayerAPIAnonTest(t, "TestGetBucketLocationHandler", bucketName, "", instanceType, apiRouter, anonReq, getReadOnlyBucketStatement)
|
||||
ExecObjectLayerAPIAnonTest(t, obj, "TestGetBucketLocationHandler", bucketName, "", instanceType, apiRouter, anonReq, getReadOnlyBucketStatement)
|
||||
|
||||
// HTTP request for testing when `objectLayer` is set to `nil`.
|
||||
// There is no need to use an existing bucket and valid input for creating the request
|
||||
@ -261,7 +261,7 @@ func testHeadBucketHandler(obj ObjectLayer, instanceType, bucketName string, api
|
||||
// ExecObjectLayerAPIAnonTest - Calls the HTTP API handler using the anonymous request, validates the ErrAccessDeniedResponse,
|
||||
// sets the bucket policy using the policy statement generated from `getReadOnlyBucketStatement` so that the
|
||||
// unsigned request goes through and its validated again.
|
||||
ExecObjectLayerAPIAnonTest(t, "TestHeadBucketHandler", bucketName, "", instanceType, apiRouter, anonReq, getReadOnlyBucketStatement)
|
||||
ExecObjectLayerAPIAnonTest(t, obj, "TestHeadBucketHandler", bucketName, "", instanceType, apiRouter, anonReq, getReadOnlyBucketStatement)
|
||||
|
||||
// HTTP request for testing when `objectLayer` is set to `nil`.
|
||||
// There is no need to use an existing bucket and valid input for creating the request
|
||||
@ -496,7 +496,7 @@ func testListMultipartUploadsHandler(obj ObjectLayer, instanceType, bucketName s
|
||||
// ExecObjectLayerAPIAnonTest - Calls the HTTP API handler using the anonymous request, validates the ErrAccessDeniedResponse,
|
||||
// sets the bucket policy using the policy statement generated from `getWriteOnlyBucketStatement` so that the
|
||||
// unsigned request goes through and its validated again.
|
||||
ExecObjectLayerAPIAnonTest(t, "TestListMultipartUploadsHandler", bucketName, "", instanceType, apiRouter, anonReq, getWriteOnlyBucketStatement)
|
||||
ExecObjectLayerAPIAnonTest(t, obj, "TestListMultipartUploadsHandler", bucketName, "", instanceType, apiRouter, anonReq, getWriteOnlyBucketStatement)
|
||||
|
||||
// HTTP request for testing when `objectLayer` is set to `nil`.
|
||||
// There is no need to use an existing bucket and valid input for creating the request
|
||||
@ -594,7 +594,7 @@ func testListBucketsHandler(obj ObjectLayer, instanceType, bucketName string, ap
|
||||
// ExecObjectLayerAPIAnonTest - Calls the HTTP API handler using the anonymous request, validates the ErrAccessDeniedResponse,
|
||||
// sets the bucket policy using the policy statement generated from `getWriteOnlyObjectStatement` so that the
|
||||
// unsigned request goes through and its validated again.
|
||||
ExecObjectLayerAPIAnonTest(t, "ListBucketsHandler", "", "", instanceType, apiRouter, anonReq, getWriteOnlyObjectStatement)
|
||||
ExecObjectLayerAPIAnonTest(t, obj, "ListBucketsHandler", "", "", instanceType, apiRouter, anonReq, getWriteOnlyObjectStatement)
|
||||
|
||||
// HTTP request for testing when `objectLayer` is set to `nil`.
|
||||
// There is no need to use an existing bucket and valid input for creating the request
|
||||
@ -822,12 +822,8 @@ func testIsBucketActionAllowedHandler(obj ObjectLayer, instanceType, bucketName
|
||||
{"s3:ListObject", "mybucket", "abc", false, false},
|
||||
}
|
||||
for i, testCase := range testCases {
|
||||
if testCase.isGlobalPoliciesNil {
|
||||
globalBucketPolicies = nil
|
||||
} else {
|
||||
initBucketPolicies(obj)
|
||||
}
|
||||
isAllowed := isBucketActionAllowed(testCase.action, testCase.bucket, testCase.prefix)
|
||||
initBucketPolicies(obj)
|
||||
isAllowed := isBucketActionAllowed(testCase.action, testCase.bucket, testCase.prefix, obj)
|
||||
if isAllowed != testCase.shouldPass {
|
||||
t.Errorf("Case %d: Expected the response status to be `%t`, but instead found `%t`", i+1, testCase.shouldPass, isAllowed)
|
||||
}
|
||||
|
@ -16,8 +16,6 @@
|
||||
|
||||
package cmd
|
||||
|
||||
import "encoding/json"
|
||||
|
||||
// BucketMetaState - Interface to update bucket metadata in-memory
|
||||
// state.
|
||||
type BucketMetaState interface {
|
||||
@ -79,13 +77,7 @@ func (lc *localBucketMetaState) UpdateBucketPolicy(args *SetBucketPolicyPeerArgs
|
||||
if objAPI == nil {
|
||||
return errServerNotInitialized
|
||||
}
|
||||
|
||||
var pCh policyChange
|
||||
if err := json.Unmarshal(args.PChBytes, &pCh); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return globalBucketPolicies.SetBucketPolicy(args.Bucket, pCh)
|
||||
return objAPI.RefreshBucketPolicy(args.Bucket)
|
||||
}
|
||||
|
||||
// localBucketMetaState.SendEvent - sends event to local event notifier via
|
||||
|
@ -43,12 +43,17 @@ const (
|
||||
// not enabled on the bucket, the operation returns an empty
|
||||
// NotificationConfiguration element.
|
||||
func (api objectAPIHandlers) GetBucketNotificationHandler(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
objAPI := api.ObjectAPI()
|
||||
if objAPI == nil {
|
||||
writeErrorResponse(w, ErrServerNotInitialized, r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if !objAPI.IsNotificationSupported() {
|
||||
writeErrorResponse(w, ErrNotImplemented, r.URL)
|
||||
return
|
||||
}
|
||||
if s3Error := checkRequestAuthType(r, "", "", globalServerConfig.GetRegion()); s3Error != ErrNone {
|
||||
writeErrorResponse(w, s3Error, r.URL)
|
||||
return
|
||||
@ -96,12 +101,17 @@ func (api objectAPIHandlers) GetBucketNotificationHandler(w http.ResponseWriter,
|
||||
// By default, your bucket has no event notifications configured. That is,
|
||||
// the notification configuration will be an empty NotificationConfiguration.
|
||||
func (api objectAPIHandlers) PutBucketNotificationHandler(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
objectAPI := api.ObjectAPI()
|
||||
if objectAPI == nil {
|
||||
writeErrorResponse(w, ErrServerNotInitialized, r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if !objectAPI.IsNotificationSupported() {
|
||||
writeErrorResponse(w, ErrNotImplemented, r.URL)
|
||||
return
|
||||
}
|
||||
if s3Error := checkRequestAuthType(r, "", "", globalServerConfig.GetRegion()); s3Error != ErrNone {
|
||||
writeErrorResponse(w, s3Error, r.URL)
|
||||
return
|
||||
@ -294,7 +304,10 @@ func (api objectAPIHandlers) ListenBucketNotificationHandler(w http.ResponseWrit
|
||||
writeErrorResponse(w, ErrServerNotInitialized, r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if !objAPI.IsNotificationSupported() {
|
||||
writeErrorResponse(w, ErrNotImplemented, r.URL)
|
||||
return
|
||||
}
|
||||
if s3Error := checkRequestAuthType(r, "", "", globalServerConfig.GetRegion()); s3Error != ErrNone {
|
||||
writeErrorResponse(w, s3Error, r.URL)
|
||||
return
|
||||
|
@ -28,6 +28,7 @@ import (
|
||||
humanize "github.com/dustin/go-humanize"
|
||||
mux "github.com/gorilla/mux"
|
||||
"github.com/minio/minio-go/pkg/policy"
|
||||
"github.com/minio/minio/pkg/errors"
|
||||
"github.com/minio/minio/pkg/wildcard"
|
||||
)
|
||||
|
||||
@ -261,13 +262,29 @@ func (api objectAPIHandlers) PutBucketPolicyHandler(w http.ResponseWriter, r *ht
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Parse validate and save bucket policy.
|
||||
if s3Error := parseAndPersistBucketPolicy(bucket, policyBytes, objAPI); s3Error != ErrNone {
|
||||
policyInfo := policy.BucketAccessPolicy{}
|
||||
if err = json.Unmarshal(policyBytes, &policyInfo); err != nil {
|
||||
writeErrorResponse(w, ErrInvalidPolicyDocument, r.URL)
|
||||
return
|
||||
}
|
||||
// Parse check bucket policy.
|
||||
if s3Error := checkBucketPolicyResources(bucket, policyInfo); s3Error != ErrNone {
|
||||
writeErrorResponse(w, s3Error, r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if err = objAPI.SetBucketPolicy(bucket, policyInfo); err != nil {
|
||||
err = errors.Cause(err)
|
||||
switch err.(type) {
|
||||
case NotImplemented:
|
||||
// Return error for invalid bucket name.
|
||||
writeErrorResponse(w, ErrNotImplemented, r.URL)
|
||||
default:
|
||||
writeErrorResponse(w, ErrMalformedPolicy, r.URL)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Success.
|
||||
writeSuccessNoContent(w)
|
||||
}
|
||||
@ -300,10 +317,7 @@ func (api objectAPIHandlers) DeleteBucketPolicyHandler(w http.ResponseWriter, r
|
||||
|
||||
// Delete bucket access policy, by passing an empty policy
|
||||
// struct.
|
||||
err = persistAndNotifyBucketPolicyChange(bucket, policyChange{
|
||||
true, policy.BucketAccessPolicy{},
|
||||
}, objAPI)
|
||||
if err != nil {
|
||||
if err := objAPI.DeleteBucketPolicy(bucket); err != nil {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
return
|
||||
}
|
||||
@ -339,7 +353,7 @@ func (api objectAPIHandlers) GetBucketPolicyHandler(w http.ResponseWriter, r *ht
|
||||
}
|
||||
|
||||
// Read bucket access policy.
|
||||
policy, err := readBucketPolicy(bucket, objAPI)
|
||||
policy, err := objAPI.GetBucketPolicy(bucket)
|
||||
if err != nil {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
return
|
||||
|
@ -430,7 +430,7 @@ func testPutBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string
|
||||
// ExecObjectLayerAPIAnonTest - Calls the HTTP API handler using the anonymous request, validates the ErrAccessDeniedResponse,
|
||||
// sets the bucket policy using the policy statement generated from `getWriteOnlyObjectStatement` so that the
|
||||
// unsigned request goes through and its validated again.
|
||||
ExecObjectLayerAPIAnonTest(t, "PutBucketPolicyHandler", bucketName, "", instanceType, apiRouter, anonReq, getWriteOnlyObjectStatement)
|
||||
ExecObjectLayerAPIAnonTest(t, obj, "PutBucketPolicyHandler", bucketName, "", instanceType, apiRouter, anonReq, getWriteOnlyObjectStatement)
|
||||
|
||||
// HTTP request for testing when `objectLayer` is set to `nil`.
|
||||
// There is no need to use an existing bucket and valid input for creating the request
|
||||
@ -447,6 +447,7 @@ func testPutBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string
|
||||
// execute the object layer set to `nil` test.
|
||||
// `ExecObjectLayerAPINilTest` manages the operation.
|
||||
ExecObjectLayerAPINilTest(t, nilBucket, "", instanceType, apiRouter, nilReq)
|
||||
|
||||
}
|
||||
|
||||
// Wrapper for calling Get Bucket Policy HTTP handler tests for both XL multiple disks and single node setup.
|
||||
@ -619,7 +620,7 @@ func testGetBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string
|
||||
// ExecObjectLayerAPIAnonTest - Calls the HTTP API handler using the anonymous request, validates the ErrAccessDeniedResponse,
|
||||
// sets the bucket policy using the policy statement generated from `getWriteOnlyObjectStatement` so that the
|
||||
// unsigned request goes through and its validated again.
|
||||
ExecObjectLayerAPIAnonTest(t, "GetBucketPolicyHandler", bucketName, "", instanceType, apiRouter, anonReq, getReadOnlyObjectStatement)
|
||||
ExecObjectLayerAPIAnonTest(t, obj, "GetBucketPolicyHandler", bucketName, "", instanceType, apiRouter, anonReq, getReadOnlyObjectStatement)
|
||||
|
||||
// HTTP request for testing when `objectLayer` is set to `nil`.
|
||||
// There is no need to use an existing bucket and valid input for creating the request
|
||||
@ -824,7 +825,7 @@ func testDeleteBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName str
|
||||
// ExecObjectLayerAPIAnonTest - Calls the HTTP API handler using the anonymous request, validates the ErrAccessDeniedResponse,
|
||||
// sets the bucket policy using the policy statement generated from `getWriteOnlyObjectStatement` so that the
|
||||
// unsigned request goes through and its validated again.
|
||||
ExecObjectLayerAPIAnonTest(t, "DeleteBucketPolicyHandler", bucketName, "", instanceType, apiRouter, anonReq, getReadOnlyObjectStatement)
|
||||
ExecObjectLayerAPIAnonTest(t, obj, "DeleteBucketPolicyHandler", bucketName, "", instanceType, apiRouter, anonReq, getReadOnlyObjectStatement)
|
||||
|
||||
// HTTP request for testing when `objectLayer` is set to `nil`.
|
||||
// There is no need to use an existing bucket and valid input for creating the request
|
||||
|
@ -37,9 +37,6 @@ const (
|
||||
bucketPolicyConfig = "policy.json"
|
||||
)
|
||||
|
||||
// Variable represents bucket policies in memory.
|
||||
var globalBucketPolicies *bucketPolicies
|
||||
|
||||
// Global bucket policies list, policies are enforced on each bucket looking
|
||||
// through the policies here.
|
||||
type bucketPolicies struct {
|
||||
@ -49,16 +46,6 @@ type bucketPolicies struct {
|
||||
bucketPolicyConfigs map[string]policy.BucketAccessPolicy
|
||||
}
|
||||
|
||||
// Represent a policy change
|
||||
type policyChange struct {
|
||||
// isRemove is true if the policy change is to delete the
|
||||
// policy on a bucket.
|
||||
IsRemove bool
|
||||
|
||||
// represents the new policy for the bucket
|
||||
BktPolicy policy.BucketAccessPolicy
|
||||
}
|
||||
|
||||
// Fetch bucket policy for a given bucket.
|
||||
func (bp bucketPolicies) GetBucketPolicy(bucket string) policy.BucketAccessPolicy {
|
||||
bp.rwMutex.RLock()
|
||||
@ -68,31 +55,38 @@ func (bp bucketPolicies) GetBucketPolicy(bucket string) policy.BucketAccessPolic
|
||||
|
||||
// Set a new bucket policy for a bucket, this operation will overwrite
|
||||
// any previous bucket policies for the bucket.
|
||||
func (bp *bucketPolicies) SetBucketPolicy(bucket string, pCh policyChange) error {
|
||||
func (bp *bucketPolicies) SetBucketPolicy(bucket string, newpolicy policy.BucketAccessPolicy) error {
|
||||
bp.rwMutex.Lock()
|
||||
defer bp.rwMutex.Unlock()
|
||||
|
||||
if pCh.IsRemove {
|
||||
delete(bp.bucketPolicyConfigs, bucket)
|
||||
} else {
|
||||
if reflect.DeepEqual(pCh.BktPolicy, emptyBucketPolicy) {
|
||||
return errInvalidArgument
|
||||
}
|
||||
bp.bucketPolicyConfigs[bucket] = pCh.BktPolicy
|
||||
if reflect.DeepEqual(newpolicy, emptyBucketPolicy) {
|
||||
return errInvalidArgument
|
||||
}
|
||||
bp.bucketPolicyConfigs[bucket] = newpolicy
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Loads all bucket policies from persistent layer.
|
||||
func loadAllBucketPolicies(objAPI ObjectLayer) (policies map[string]policy.BucketAccessPolicy, err error) {
|
||||
// Delete bucket policy from struct for a given bucket.
|
||||
func (bp *bucketPolicies) DeleteBucketPolicy(bucket string) error {
|
||||
bp.rwMutex.Lock()
|
||||
defer bp.rwMutex.Unlock()
|
||||
delete(bp.bucketPolicyConfigs, bucket)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Intialize all bucket policies.
|
||||
func initBucketPolicies(objAPI ObjectLayer) error {
|
||||
if objAPI == nil {
|
||||
return errInvalidArgument
|
||||
}
|
||||
// List buckets to proceed loading all notification configuration.
|
||||
buckets, err := objAPI.ListBuckets()
|
||||
if err != nil {
|
||||
return nil, errors.Cause(err)
|
||||
return errors.Cause(err)
|
||||
}
|
||||
|
||||
policies = make(map[string]policy.BucketAccessPolicy)
|
||||
var pErrs []error
|
||||
policies := make(map[string]policy.BucketAccessPolicy)
|
||||
// Loads bucket policy.
|
||||
for _, bucket := range buckets {
|
||||
bp, pErr := readBucketPolicy(bucket.Name, objAPI)
|
||||
@ -101,7 +95,7 @@ func loadAllBucketPolicies(objAPI ObjectLayer) (policies map[string]policy.Bucke
|
||||
// other unexpected errors during net.Dial.
|
||||
if !errors.IsErrIgnored(pErr, errDiskNotFound) {
|
||||
if !isErrBucketPolicyNotFound(pErr) {
|
||||
pErrs = append(pErrs, pErr)
|
||||
return errors.Cause(pErr)
|
||||
}
|
||||
}
|
||||
// Continue to load other bucket policies if possible.
|
||||
@ -109,35 +103,17 @@ func loadAllBucketPolicies(objAPI ObjectLayer) (policies map[string]policy.Bucke
|
||||
}
|
||||
policies[bucket.Name] = bp
|
||||
}
|
||||
|
||||
// Look for any errors occurred while reading bucket policies.
|
||||
for _, pErr := range pErrs {
|
||||
if pErr != nil {
|
||||
return policies, pErr
|
||||
}
|
||||
}
|
||||
|
||||
// Success.
|
||||
return policies, nil
|
||||
}
|
||||
|
||||
// Intialize all bucket policies.
|
||||
func initBucketPolicies(objAPI ObjectLayer) error {
|
||||
if objAPI == nil {
|
||||
return errInvalidArgument
|
||||
}
|
||||
|
||||
// Read all bucket policies.
|
||||
policies, err := loadAllBucketPolicies(objAPI)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Populate global bucket collection.
|
||||
globalBucketPolicies = &bucketPolicies{
|
||||
bPolicies := &bucketPolicies{
|
||||
rwMutex: &sync.RWMutex{},
|
||||
bucketPolicyConfigs: policies,
|
||||
}
|
||||
switch objAPI.(type) {
|
||||
case *fsObjects:
|
||||
objAPI.(*fsObjects).bucketPolicies = bPolicies
|
||||
case *xlObjects:
|
||||
objAPI.(*xlObjects).bucketPolicies = bPolicies
|
||||
}
|
||||
|
||||
// Success.
|
||||
return nil
|
||||
@ -152,7 +128,7 @@ func readBucketPolicyJSON(bucket string, objAPI ObjectLayer) (bucketPolicyReader
|
||||
err = objAPI.GetObject(minioMetaBucket, policyPath, 0, -1, &buffer, "")
|
||||
if err != nil {
|
||||
if isErrObjectNotFound(err) || isErrIncompleteBody(err) {
|
||||
return nil, BucketPolicyNotFound{Bucket: bucket}
|
||||
return nil, PolicyNotFound{Bucket: bucket}
|
||||
}
|
||||
errorIf(err, "Unable to load policy for the bucket %s.", bucket)
|
||||
return nil, errors.Cause(err)
|
||||
@ -215,61 +191,25 @@ func writeBucketPolicy(bucket string, objAPI ObjectLayer, bpy policy.BucketAcces
|
||||
return nil
|
||||
}
|
||||
|
||||
func parseAndPersistBucketPolicy(bucket string, policyBytes []byte, objAPI ObjectLayer) APIErrorCode {
|
||||
// Parse bucket policy.
|
||||
var bktPolicy policy.BucketAccessPolicy
|
||||
err := parseBucketPolicy(bytes.NewReader(policyBytes), &bktPolicy)
|
||||
if err != nil {
|
||||
return ErrInvalidPolicyDocument
|
||||
}
|
||||
|
||||
// Parse check bucket policy.
|
||||
if s3Error := checkBucketPolicyResources(bucket, bktPolicy); s3Error != ErrNone {
|
||||
return s3Error
|
||||
}
|
||||
|
||||
// Acquire a write lock on bucket before modifying its configuration.
|
||||
bucketLock := globalNSMutex.NewNSLock(bucket, "")
|
||||
if bucketLock.GetLock(globalOperationTimeout) != nil {
|
||||
return ErrOperationTimedOut
|
||||
}
|
||||
// Release lock after notifying peers
|
||||
defer bucketLock.Unlock()
|
||||
|
||||
// Save bucket policy.
|
||||
if err = persistAndNotifyBucketPolicyChange(bucket, policyChange{false, bktPolicy}, objAPI); err != nil {
|
||||
switch err.(type) {
|
||||
case BucketNameInvalid:
|
||||
return ErrInvalidBucketName
|
||||
case BucketNotFound:
|
||||
return ErrNoSuchBucket
|
||||
default:
|
||||
errorIf(err, "Unable to save bucket policy.")
|
||||
return ErrInternalError
|
||||
}
|
||||
}
|
||||
return ErrNone
|
||||
}
|
||||
|
||||
// persistAndNotifyBucketPolicyChange - takes a policyChange argument,
|
||||
// persists it to storage, and notify nodes in the cluster about the
|
||||
// change. In-memory state is updated in response to the notification.
|
||||
func persistAndNotifyBucketPolicyChange(bucket string, pCh policyChange, objAPI ObjectLayer) error {
|
||||
if pCh.IsRemove {
|
||||
func persistAndNotifyBucketPolicyChange(bucket string, isRemove bool, bktPolicy policy.BucketAccessPolicy, objAPI ObjectLayer) error {
|
||||
if isRemove {
|
||||
err := removeBucketPolicy(bucket, objAPI)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if reflect.DeepEqual(pCh.BktPolicy, emptyBucketPolicy) {
|
||||
if reflect.DeepEqual(bktPolicy, emptyBucketPolicy) {
|
||||
return errInvalidArgument
|
||||
}
|
||||
if err := writeBucketPolicy(bucket, objAPI, pCh.BktPolicy); err != nil {
|
||||
if err := writeBucketPolicy(bucket, objAPI, bktPolicy); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Notify all peers (including self) to update in-memory state
|
||||
S3PeersUpdateBucketPolicy(bucket, pCh)
|
||||
S3PeersUpdateBucketPolicy(bucket)
|
||||
return nil
|
||||
}
|
||||
|
102
cmd/fs-v1.go
102
cmd/fs-v1.go
@ -24,11 +24,13 @@ import (
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"sort"
|
||||
"sync"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/minio/minio-go/pkg/policy"
|
||||
"github.com/minio/minio/pkg/errors"
|
||||
"github.com/minio/minio/pkg/hash"
|
||||
"github.com/minio/minio/pkg/lock"
|
||||
@ -58,6 +60,9 @@ type fsObjects struct {
|
||||
|
||||
// To manage the appendRoutine go-routines
|
||||
nsMutex *nsLockMap
|
||||
|
||||
// Variable represents bucket policies in memory.
|
||||
bucketPolicies *bucketPolicies
|
||||
}
|
||||
|
||||
// Represents the background append file.
|
||||
@ -254,6 +259,11 @@ func (fs *fsObjects) MakeBucketWithLocation(bucket, location string) error {
|
||||
|
||||
// GetBucketInfo - fetch bucket metadata info.
|
||||
func (fs *fsObjects) GetBucketInfo(bucket string) (bi BucketInfo, e error) {
|
||||
bucketLock := fs.nsMutex.NewNSLock(bucket, "")
|
||||
if e := bucketLock.GetRLock(globalObjectTimeout); e != nil {
|
||||
return bi, e
|
||||
}
|
||||
defer bucketLock.RUnlock()
|
||||
st, err := fs.statBucketDir(bucket)
|
||||
if err != nil {
|
||||
return bi, toObjectErr(err, bucket)
|
||||
@ -310,6 +320,11 @@ func (fs *fsObjects) ListBuckets() ([]BucketInfo, error) {
|
||||
// DeleteBucket - delete a bucket and all the metadata associated
|
||||
// with the bucket including pending multipart, object metadata.
|
||||
func (fs *fsObjects) DeleteBucket(bucket string) error {
|
||||
bucketLock := fs.nsMutex.NewNSLock(bucket, "")
|
||||
if err := bucketLock.GetLock(globalObjectTimeout); err != nil {
|
||||
return err
|
||||
}
|
||||
defer bucketLock.Unlock()
|
||||
bucketDir, err := fs.getBucketDir(bucket)
|
||||
if err != nil {
|
||||
return toObjectErr(err, bucket)
|
||||
@ -331,7 +346,22 @@ func (fs *fsObjects) DeleteBucket(bucket string) error {
|
||||
if err = fsRemoveAll(minioMetadataBucketDir); err != nil {
|
||||
return toObjectErr(err, bucket)
|
||||
}
|
||||
// Delete bucket access policy, if present - ignore any errors.
|
||||
_ = removeBucketPolicy(bucket, fs)
|
||||
|
||||
// Notify all peers (including self) to update in-memory state
|
||||
S3PeersUpdateBucketPolicy(bucket)
|
||||
|
||||
// Delete notification config, if present - ignore any errors.
|
||||
_ = removeNotificationConfig(bucket, fs)
|
||||
|
||||
// Notify all peers (including self) to update in-memory state
|
||||
S3PeersUpdateBucketNotification(bucket, nil)
|
||||
// Delete listener config, if present - ignore any errors.
|
||||
_ = removeListenerConfig(bucket, fs)
|
||||
|
||||
// Notify all peers (including self) to update in-memory state
|
||||
S3PeersUpdateBucketListener(bucket, []listenerConfig{})
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -621,6 +651,9 @@ func (fs *fsObjects) parentDirIsObject(bucket, parent string) bool {
|
||||
// Additionally writes `fs.json` which carries the necessary metadata
|
||||
// for future object operations.
|
||||
func (fs *fsObjects) PutObject(bucket string, object string, data *hash.Reader, metadata map[string]string) (objInfo ObjectInfo, retErr error) {
|
||||
if err := checkPutObjectArgs(bucket, object, fs, data.Size()); err != nil {
|
||||
return ObjectInfo{}, err
|
||||
}
|
||||
// Lock the object.
|
||||
objectLock := fs.nsMutex.NewNSLock(bucket, object)
|
||||
if err := objectLock.GetLock(globalObjectTimeout); err != nil {
|
||||
@ -664,7 +697,7 @@ func (fs *fsObjects) putObject(bucket string, object string, data *hash.Reader,
|
||||
return fsMeta.ToObjectInfo(bucket, object, fi), nil
|
||||
}
|
||||
|
||||
if err = checkPutObjectArgs(bucket, object, fs); err != nil {
|
||||
if err = checkPutObjectArgs(bucket, object, fs, data.Size()); err != nil {
|
||||
return ObjectInfo{}, err
|
||||
}
|
||||
|
||||
@ -872,7 +905,13 @@ func (fs *fsObjects) ListObjects(bucket, prefix, marker, delimiter string, maxKe
|
||||
if err := checkListObjsArgs(bucket, prefix, marker, delimiter, fs); err != nil {
|
||||
return loi, err
|
||||
}
|
||||
|
||||
// Marker is set validate pre-condition.
|
||||
if marker != "" {
|
||||
// Marker not common with prefix is not implemented.Send an empty response
|
||||
if !hasPrefix(marker, prefix) {
|
||||
return ListObjectsInfo{}, e
|
||||
}
|
||||
}
|
||||
if _, err := fs.statBucketDir(bucket); err != nil {
|
||||
return loi, err
|
||||
}
|
||||
@ -1042,3 +1081,62 @@ func (fs *fsObjects) ListObjectsHeal(bucket, prefix, marker, delimiter string, m
|
||||
func (fs *fsObjects) ListBucketsHeal() ([]BucketInfo, error) {
|
||||
return []BucketInfo{}, errors.Trace(NotImplemented{})
|
||||
}
|
||||
|
||||
// SetBucketPolicy sets policy on bucket
|
||||
func (fs *fsObjects) SetBucketPolicy(bucket string, policy policy.BucketAccessPolicy) error {
|
||||
return persistAndNotifyBucketPolicyChange(bucket, false, policy, fs)
|
||||
}
|
||||
|
||||
// GetBucketPolicy will get policy on bucket
|
||||
func (fs *fsObjects) GetBucketPolicy(bucket string) (policy.BucketAccessPolicy, error) {
|
||||
policy := fs.bucketPolicies.GetBucketPolicy(bucket)
|
||||
if reflect.DeepEqual(policy, emptyBucketPolicy) {
|
||||
return readBucketPolicy(bucket, fs)
|
||||
}
|
||||
return policy, nil
|
||||
}
|
||||
|
||||
// DeleteBucketPolicy deletes all policies on bucket
|
||||
func (fs *fsObjects) DeleteBucketPolicy(bucket string) error {
|
||||
return persistAndNotifyBucketPolicyChange(bucket, true, emptyBucketPolicy, fs)
|
||||
}
|
||||
|
||||
// ListObjectsV2 lists all blobs in bucket filtered by prefix
|
||||
func (fs *fsObjects) ListObjectsV2(bucket, prefix, continuationToken, delimiter string, maxKeys int, fetchOwner bool, startAfter string) (result ListObjectsV2Info, err error) {
|
||||
loi, err := fs.ListObjects(bucket, prefix, continuationToken, delimiter, maxKeys)
|
||||
if err != nil {
|
||||
return result, err
|
||||
}
|
||||
|
||||
listObjectsV2Info := ListObjectsV2Info{
|
||||
IsTruncated: loi.IsTruncated,
|
||||
ContinuationToken: continuationToken,
|
||||
NextContinuationToken: loi.NextMarker,
|
||||
Objects: loi.Objects,
|
||||
Prefixes: loi.Prefixes,
|
||||
}
|
||||
return listObjectsV2Info, err
|
||||
}
|
||||
|
||||
// RefreshBucketPolicy refreshes cache policy with what's on disk.
|
||||
func (fs *fsObjects) RefreshBucketPolicy(bucket string) error {
|
||||
policy, err := readBucketPolicy(bucket, fs)
|
||||
|
||||
if err != nil {
|
||||
if reflect.DeepEqual(policy, emptyBucketPolicy) {
|
||||
return fs.bucketPolicies.DeleteBucketPolicy(bucket)
|
||||
}
|
||||
return err
|
||||
}
|
||||
return fs.bucketPolicies.SetBucketPolicy(bucket, policy)
|
||||
}
|
||||
|
||||
// IsNotificationSupported returns whether bucket notification is applicable for this layer.
|
||||
func (fs *fsObjects) IsNotificationSupported() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// IsEncryptionSupported returns whether server side encryption is applicable for this layer.
|
||||
func (fs *fsObjects) IsEncryptionSupported() bool {
|
||||
return true
|
||||
}
|
||||
|
@ -1,923 +0,0 @@
|
||||
/*
|
||||
* Minio Cloud Storage, (C) 2017 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"io"
|
||||
goioutil "io/ioutil"
|
||||
"net"
|
||||
"net/http"
|
||||
"strconv"
|
||||
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
|
||||
router "github.com/gorilla/mux"
|
||||
"github.com/minio/minio-go/pkg/policy"
|
||||
"github.com/minio/minio/pkg/hash"
|
||||
"github.com/minio/minio/pkg/ioutil"
|
||||
)
|
||||
|
||||
// GetObjectHandler - GET Object
|
||||
// ----------
|
||||
// This implementation of the GET operation retrieves object. To use GET,
|
||||
// you must have READ access to the object.
|
||||
func (api gatewayAPIHandlers) GetObjectHandler(w http.ResponseWriter, r *http.Request) {
|
||||
var object, bucket string
|
||||
vars := router.Vars(r)
|
||||
bucket = vars["bucket"]
|
||||
object = vars["object"]
|
||||
|
||||
// Fetch object stat info.
|
||||
objectAPI := api.ObjectAPI()
|
||||
if objectAPI == nil {
|
||||
writeErrorResponse(w, ErrServerNotInitialized, r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
reqAuthType := getRequestAuthType(r)
|
||||
|
||||
switch reqAuthType {
|
||||
case authTypePresignedV2, authTypeSignedV2:
|
||||
// Signature V2 validation.
|
||||
s3Error := isReqAuthenticatedV2(r)
|
||||
if s3Error != ErrNone {
|
||||
errorIf(errSignatureMismatch, "%s", dumpRequest(r))
|
||||
writeErrorResponse(w, s3Error, r.URL)
|
||||
return
|
||||
}
|
||||
case authTypeSigned, authTypePresigned:
|
||||
s3Error := isReqAuthenticated(r, globalServerConfig.GetRegion())
|
||||
if s3Error != ErrNone {
|
||||
errorIf(errSignatureMismatch, "%s", dumpRequest(r))
|
||||
writeErrorResponse(w, s3Error, r.URL)
|
||||
return
|
||||
}
|
||||
case authTypeAnonymous:
|
||||
// No verification needed for anonymous requests.
|
||||
default:
|
||||
// For all unknown auth types return error.
|
||||
writeErrorResponse(w, ErrAccessDenied, r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
getObjectInfo := objectAPI.GetObjectInfo
|
||||
if reqAuthType == authTypeAnonymous {
|
||||
getObjectInfo = objectAPI.AnonGetObjectInfo
|
||||
}
|
||||
objInfo, err := getObjectInfo(bucket, object)
|
||||
if err != nil {
|
||||
apiErr := toAPIErrorCode(err)
|
||||
if apiErr == ErrNoSuchKey {
|
||||
apiErr = errAllowableObjectNotFound(bucket, r)
|
||||
}
|
||||
writeErrorResponse(w, apiErr, r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Get request range.
|
||||
var hrange *httpRange
|
||||
rangeHeader := r.Header.Get("Range")
|
||||
if rangeHeader != "" {
|
||||
if hrange, err = parseRequestRange(rangeHeader, objInfo.Size); err != nil {
|
||||
// Handle only errInvalidRange
|
||||
// Ignore other parse error and treat it as regular Get request like Amazon S3.
|
||||
if err == errInvalidRange {
|
||||
writeErrorResponse(w, ErrInvalidRange, r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// log the error.
|
||||
errorIf(err, "Invalid request range")
|
||||
}
|
||||
}
|
||||
|
||||
// Validate pre-conditions if any.
|
||||
if checkPreconditions(w, r, objInfo) {
|
||||
return
|
||||
}
|
||||
|
||||
// Get the object.
|
||||
var startOffset int64
|
||||
length := objInfo.Size
|
||||
if hrange != nil {
|
||||
startOffset = hrange.offsetBegin
|
||||
length = hrange.getLength()
|
||||
}
|
||||
|
||||
getObject := objectAPI.GetObject
|
||||
if reqAuthType == authTypeAnonymous {
|
||||
getObject = objectAPI.AnonGetObject
|
||||
}
|
||||
|
||||
setObjectHeaders(w, objInfo, hrange)
|
||||
setHeadGetRespHeaders(w, r.URL.Query())
|
||||
httpWriter := ioutil.WriteOnClose(w)
|
||||
// Reads the object at startOffset and writes to mw.
|
||||
if err = getObject(bucket, object, startOffset, length, httpWriter, objInfo.ETag); err != nil {
|
||||
errorIf(err, "Unable to write to client.")
|
||||
if !httpWriter.HasWritten() {
|
||||
// Error response only if no data has been written to client yet. i.e if
|
||||
// partial data has already been written before an error
|
||||
// occurred then no point in setting StatusCode and
|
||||
// sending error XML.
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
}
|
||||
return
|
||||
}
|
||||
if err = httpWriter.Close(); err != nil {
|
||||
if !httpWriter.HasWritten() { // write error response only if no data has been written to client yet
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Get host and port from Request.RemoteAddr.
|
||||
host, port, err := net.SplitHostPort(r.RemoteAddr)
|
||||
if err != nil {
|
||||
host, port = "", ""
|
||||
}
|
||||
|
||||
// Notify object accessed via a GET request.
|
||||
eventNotify(eventData{
|
||||
Type: ObjectAccessedGet,
|
||||
Bucket: bucket,
|
||||
ObjInfo: objInfo,
|
||||
ReqParams: extractReqParams(r),
|
||||
UserAgent: r.UserAgent(),
|
||||
Host: host,
|
||||
Port: port,
|
||||
})
|
||||
}
|
||||
|
||||
// PutObjectHandler - PUT Object
|
||||
// ----------
|
||||
// This implementation of the PUT operation adds an object to a bucket.
|
||||
func (api gatewayAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Request) {
|
||||
objectAPI := api.ObjectAPI()
|
||||
if objectAPI == nil {
|
||||
writeErrorResponse(w, ErrServerNotInitialized, r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// X-Amz-Copy-Source shouldn't be set for this call.
|
||||
if _, ok := r.Header["X-Amz-Copy-Source"]; ok {
|
||||
writeErrorResponse(w, ErrInvalidCopySource, r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
var object, bucket string
|
||||
vars := router.Vars(r)
|
||||
bucket = vars["bucket"]
|
||||
object = vars["object"]
|
||||
|
||||
// Validate storage class metadata if present
|
||||
if _, ok := r.Header[amzStorageClassCanonical]; ok {
|
||||
if !isValidStorageClassMeta(r.Header.Get(amzStorageClassCanonical)) {
|
||||
writeErrorResponse(w, ErrInvalidStorageClass, r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: we should validate the object name here
|
||||
|
||||
// Get Content-Md5 sent by client and verify if valid
|
||||
md5Bytes, err := checkValidMD5(r.Header.Get("Content-Md5"))
|
||||
if err != nil {
|
||||
writeErrorResponse(w, ErrInvalidDigest, r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
/// if Content-Length is unknown/missing, deny the request
|
||||
size := r.ContentLength
|
||||
reqAuthType := getRequestAuthType(r)
|
||||
if reqAuthType == authTypeStreamingSigned {
|
||||
sizeStr := r.Header.Get("x-amz-decoded-content-length")
|
||||
size, err = strconv.ParseInt(sizeStr, 10, 64)
|
||||
if err != nil {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
if size == -1 {
|
||||
writeErrorResponse(w, ErrMissingContentLength, r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
/// maximum Upload size for objects in a single operation
|
||||
if isMaxObjectSize(size) {
|
||||
writeErrorResponse(w, ErrEntityTooLarge, r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Extract metadata to be saved from incoming HTTP header.
|
||||
metadata, err := extractMetadataFromHeader(r.Header)
|
||||
if err != nil {
|
||||
writeErrorResponse(w, ErrInternalError, r.URL)
|
||||
return
|
||||
}
|
||||
if reqAuthType == authTypeStreamingSigned {
|
||||
if contentEncoding, ok := metadata["content-encoding"]; ok {
|
||||
contentEncoding = trimAwsChunkedContentEncoding(contentEncoding)
|
||||
if contentEncoding != "" {
|
||||
// Make sure to trim and save the content-encoding
|
||||
// parameter for a streaming signature which is set
|
||||
// to a custom value for example: "aws-chunked,gzip".
|
||||
metadata["content-encoding"] = contentEncoding
|
||||
} else {
|
||||
// Trimmed content encoding is empty when the header
|
||||
// value is set to "aws-chunked" only.
|
||||
|
||||
// Make sure to delete the content-encoding parameter
|
||||
// for a streaming signature which is set to value
|
||||
// for example: "aws-chunked"
|
||||
delete(metadata, "content-encoding")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
// Make sure we hex encode md5sum here.
|
||||
md5hex = hex.EncodeToString(md5Bytes)
|
||||
sha256hex = ""
|
||||
putObject = objectAPI.PutObject
|
||||
reader = r.Body
|
||||
)
|
||||
|
||||
switch reqAuthType {
|
||||
default:
|
||||
// For all unknown auth types return error.
|
||||
writeErrorResponse(w, ErrAccessDenied, r.URL)
|
||||
return
|
||||
case authTypeAnonymous:
|
||||
putObject = objectAPI.AnonPutObject
|
||||
case authTypeStreamingSigned:
|
||||
// Initialize stream signature verifier.
|
||||
var s3Error APIErrorCode
|
||||
reader, s3Error = newSignV4ChunkedReader(r)
|
||||
if s3Error != ErrNone {
|
||||
errorIf(errSignatureMismatch, "%s", dumpRequest(r))
|
||||
writeErrorResponse(w, s3Error, r.URL)
|
||||
return
|
||||
}
|
||||
case authTypeSignedV2, authTypePresignedV2:
|
||||
s3Error := isReqAuthenticatedV2(r)
|
||||
if s3Error != ErrNone {
|
||||
errorIf(errSignatureMismatch, "%s", dumpRequest(r))
|
||||
writeErrorResponse(w, s3Error, r.URL)
|
||||
return
|
||||
}
|
||||
case authTypePresigned, authTypeSigned:
|
||||
if s3Error := reqSignatureV4Verify(r, globalServerConfig.GetRegion()); s3Error != ErrNone {
|
||||
errorIf(errSignatureMismatch, "%s", dumpRequest(r))
|
||||
writeErrorResponse(w, s3Error, r.URL)
|
||||
return
|
||||
}
|
||||
if !skipContentSha256Cksum(r) {
|
||||
sha256hex = getContentSha256Cksum(r)
|
||||
}
|
||||
}
|
||||
|
||||
hashReader, err := hash.NewReader(reader, size, md5hex, sha256hex)
|
||||
if err != nil {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
objInfo, err := putObject(bucket, object, hashReader, metadata)
|
||||
if err != nil {
|
||||
errorIf(err, "Unable to save an object %s", r.URL.Path)
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
w.Header().Set("ETag", "\""+objInfo.ETag+"\"")
|
||||
writeSuccessResponseHeadersOnly(w)
|
||||
|
||||
// Get host and port from Request.RemoteAddr.
|
||||
host, port, err := net.SplitHostPort(r.RemoteAddr)
|
||||
if err != nil {
|
||||
host, port = "", ""
|
||||
}
|
||||
|
||||
// Notify object created event.
|
||||
eventNotify(eventData{
|
||||
Type: ObjectCreatedPut,
|
||||
Bucket: bucket,
|
||||
ObjInfo: objInfo,
|
||||
ReqParams: extractReqParams(r),
|
||||
UserAgent: r.UserAgent(),
|
||||
Host: host,
|
||||
Port: port,
|
||||
})
|
||||
}
|
||||
|
||||
// HeadObjectHandler - HEAD Object
|
||||
// -----------
|
||||
// The HEAD operation retrieves metadata from an object without returning the object itself.
|
||||
func (api gatewayAPIHandlers) HeadObjectHandler(w http.ResponseWriter, r *http.Request) {
|
||||
var object, bucket string
|
||||
vars := router.Vars(r)
|
||||
bucket = vars["bucket"]
|
||||
object = vars["object"]
|
||||
|
||||
objectAPI := api.ObjectAPI()
|
||||
if objectAPI == nil {
|
||||
writeErrorResponseHeadersOnly(w, ErrServerNotInitialized)
|
||||
return
|
||||
}
|
||||
|
||||
reqAuthType := getRequestAuthType(r)
|
||||
|
||||
switch reqAuthType {
|
||||
case authTypePresignedV2, authTypeSignedV2:
|
||||
// Signature V2 validation.
|
||||
s3Error := isReqAuthenticatedV2(r)
|
||||
if s3Error != ErrNone {
|
||||
errorIf(errSignatureMismatch, "%s", dumpRequest(r))
|
||||
writeErrorResponse(w, s3Error, r.URL)
|
||||
return
|
||||
}
|
||||
case authTypeSigned, authTypePresigned:
|
||||
s3Error := isReqAuthenticated(r, globalServerConfig.GetRegion())
|
||||
if s3Error != ErrNone {
|
||||
errorIf(errSignatureMismatch, "%s", dumpRequest(r))
|
||||
writeErrorResponse(w, s3Error, r.URL)
|
||||
return
|
||||
}
|
||||
case authTypeAnonymous:
|
||||
// No verification needed for anonymous requests.
|
||||
default:
|
||||
// For all unknown auth types return error.
|
||||
writeErrorResponse(w, ErrAccessDenied, r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
getObjectInfo := objectAPI.GetObjectInfo
|
||||
if reqAuthType == authTypeAnonymous {
|
||||
getObjectInfo = objectAPI.AnonGetObjectInfo
|
||||
}
|
||||
objInfo, err := getObjectInfo(bucket, object)
|
||||
if err != nil {
|
||||
apiErr := toAPIErrorCode(err)
|
||||
if apiErr == ErrNoSuchKey {
|
||||
apiErr = errAllowableObjectNotFound(bucket, r)
|
||||
}
|
||||
writeErrorResponse(w, apiErr, r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Validate pre-conditions if any.
|
||||
if checkPreconditions(w, r, objInfo) {
|
||||
return
|
||||
}
|
||||
|
||||
// Set standard object headers.
|
||||
setObjectHeaders(w, objInfo, nil)
|
||||
|
||||
// Set any additional requested response headers.
|
||||
setHeadGetRespHeaders(w, r.URL.Query())
|
||||
|
||||
// Successful response.
|
||||
w.WriteHeader(http.StatusOK)
|
||||
|
||||
// Get host and port from Request.RemoteAddr.
|
||||
host, port, err := net.SplitHostPort(r.RemoteAddr)
|
||||
if err != nil {
|
||||
host, port = "", ""
|
||||
}
|
||||
|
||||
// Notify object accessed via a HEAD request.
|
||||
eventNotify(eventData{
|
||||
Type: ObjectAccessedHead,
|
||||
Bucket: bucket,
|
||||
ObjInfo: objInfo,
|
||||
ReqParams: extractReqParams(r),
|
||||
UserAgent: r.UserAgent(),
|
||||
Host: host,
|
||||
Port: port,
|
||||
})
|
||||
}
|
||||
|
||||
// PutBucketPolicyHandler - PUT Bucket policy
|
||||
// -----------------
|
||||
// This implementation of the PUT operation uses the policy
|
||||
// subresource to add to or replace a policy on a bucket
|
||||
func (api gatewayAPIHandlers) PutBucketPolicyHandler(w http.ResponseWriter, r *http.Request) {
|
||||
objAPI := api.ObjectAPI()
|
||||
if objAPI == nil {
|
||||
writeErrorResponse(w, ErrServerNotInitialized, r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if s3Error := checkRequestAuthType(r, "", "", globalServerConfig.GetRegion()); s3Error != ErrNone {
|
||||
writeErrorResponse(w, s3Error, r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
vars := router.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
|
||||
// Before proceeding validate if bucket exists.
|
||||
_, err := objAPI.GetBucketInfo(bucket)
|
||||
if err != nil {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// If Content-Length is unknown or zero, deny the
|
||||
// request. PutBucketPolicy always needs a Content-Length.
|
||||
if r.ContentLength == -1 || r.ContentLength == 0 {
|
||||
writeErrorResponse(w, ErrMissingContentLength, r.URL)
|
||||
return
|
||||
}
|
||||
// If Content-Length is greater than maximum allowed policy size.
|
||||
if r.ContentLength > maxAccessPolicySize {
|
||||
writeErrorResponse(w, ErrEntityTooLarge, r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Read access policy up to maxAccessPolicySize.
|
||||
// http://docs.aws.amazon.com/AmazonS3/latest/dev/access-policy-language-overview.html
|
||||
// bucket policies are limited to 20KB in size, using a limit reader.
|
||||
policyBytes, err := goioutil.ReadAll(io.LimitReader(r.Body, maxAccessPolicySize))
|
||||
if err != nil {
|
||||
errorIf(err, "Unable to read from client.")
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
policyInfo := policy.BucketAccessPolicy{}
|
||||
if err = json.Unmarshal(policyBytes, &policyInfo); err != nil {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if err = objAPI.SetBucketPolicies(bucket, policyInfo); err != nil {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
return
|
||||
}
|
||||
// Success.
|
||||
writeSuccessNoContent(w)
|
||||
}
|
||||
|
||||
// DeleteBucketPolicyHandler - DELETE Bucket policy
|
||||
// -----------------
|
||||
// This implementation of the DELETE operation uses the policy
|
||||
// subresource to add to remove a policy on a bucket.
|
||||
func (api gatewayAPIHandlers) DeleteBucketPolicyHandler(w http.ResponseWriter, r *http.Request) {
|
||||
objAPI := api.ObjectAPI()
|
||||
if objAPI == nil {
|
||||
writeErrorResponse(w, ErrServerNotInitialized, r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if s3Error := checkRequestAuthType(r, "", "", globalServerConfig.GetRegion()); s3Error != ErrNone {
|
||||
writeErrorResponse(w, s3Error, r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
vars := router.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
|
||||
// Before proceeding validate if bucket exists.
|
||||
_, err := objAPI.GetBucketInfo(bucket)
|
||||
if err != nil {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Delete bucket access policy, by passing an empty policy
|
||||
// struct.
|
||||
objAPI.DeleteBucketPolicies(bucket)
|
||||
// Success.
|
||||
writeSuccessNoContent(w)
|
||||
}
|
||||
|
||||
// GetBucketPolicyHandler - GET Bucket policy
|
||||
// -----------------
|
||||
// This operation uses the policy
|
||||
// subresource to return the policy of a specified bucket.
|
||||
func (api gatewayAPIHandlers) GetBucketPolicyHandler(w http.ResponseWriter, r *http.Request) {
|
||||
objAPI := api.ObjectAPI()
|
||||
if objAPI == nil {
|
||||
writeErrorResponse(w, ErrServerNotInitialized, r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if s3Error := checkRequestAuthType(r, "", "", globalServerConfig.GetRegion()); s3Error != ErrNone {
|
||||
writeErrorResponse(w, s3Error, r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
vars := router.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
|
||||
// Before proceeding validate if bucket exists.
|
||||
_, err := objAPI.GetBucketInfo(bucket)
|
||||
if err != nil {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
bp, err := objAPI.GetBucketPolicies(bucket)
|
||||
if err != nil {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
policyBytes, err := json.Marshal(bp)
|
||||
if err != nil {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
return
|
||||
}
|
||||
// Write to client.
|
||||
w.Write(policyBytes)
|
||||
}
|
||||
|
||||
// GetBucketNotificationHandler - This implementation of the GET
|
||||
// operation uses the notification subresource to return the
|
||||
// notification configuration of a bucket. If notifications are
|
||||
// not enabled on the bucket, the operation returns an empty
|
||||
// NotificationConfiguration element.
|
||||
func (api gatewayAPIHandlers) GetBucketNotificationHandler(w http.ResponseWriter, r *http.Request) {
|
||||
writeErrorResponse(w, ErrNotImplemented, r.URL)
|
||||
}
|
||||
|
||||
// PutBucketNotificationHandler - Minio notification feature enables
|
||||
// you to receive notifications when certain events happen in your bucket.
|
||||
// Using this API, you can replace an existing notification configuration.
|
||||
// The configuration is an XML file that defines the event types that you
|
||||
// want Minio to publish and the destination where you want Minio to publish
|
||||
// an event notification when it detects an event of the specified type.
|
||||
// By default, your bucket has no event notifications configured. That is,
|
||||
// the notification configuration will be an empty NotificationConfiguration.
|
||||
func (api gatewayAPIHandlers) PutBucketNotificationHandler(w http.ResponseWriter, r *http.Request) {
|
||||
writeErrorResponse(w, ErrNotImplemented, r.URL)
|
||||
}
|
||||
|
||||
// ListenBucketNotificationHandler - list bucket notifications.
|
||||
func (api gatewayAPIHandlers) ListenBucketNotificationHandler(w http.ResponseWriter, r *http.Request) {
|
||||
writeErrorResponse(w, ErrNotImplemented, r.URL)
|
||||
}
|
||||
|
||||
// PutBucketHandler - PUT Bucket
|
||||
// ----------
|
||||
// This implementation of the PUT operation creates a new bucket for authenticated request
|
||||
func (api gatewayAPIHandlers) PutBucketHandler(w http.ResponseWriter, r *http.Request) {
|
||||
objectAPI := api.ObjectAPI()
|
||||
if objectAPI == nil {
|
||||
writeErrorResponse(w, ErrServerNotInitialized, r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// PutBucket does not have any bucket action.
|
||||
s3Error := checkRequestAuthType(r, "", "", globalServerConfig.GetRegion())
|
||||
if s3Error != ErrNone {
|
||||
writeErrorResponse(w, s3Error, r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
vars := router.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
|
||||
// Validate if incoming location constraint is valid, reject
|
||||
// requests which do not follow valid region requirements.
|
||||
location, s3Error := parseLocationConstraint(r)
|
||||
if s3Error != ErrNone {
|
||||
writeErrorResponse(w, s3Error, r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
bucketLock := globalNSMutex.NewNSLock(bucket, "")
|
||||
if bucketLock.GetLock(globalOperationTimeout) != nil {
|
||||
writeErrorResponse(w, ErrOperationTimedOut, r.URL)
|
||||
return
|
||||
}
|
||||
defer bucketLock.Unlock()
|
||||
|
||||
// Proceed to creating a bucket.
|
||||
err := objectAPI.MakeBucketWithLocation(bucket, location)
|
||||
if err != nil {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Make sure to add Location information here only for bucket
|
||||
w.Header().Set("Location", getLocation(r))
|
||||
|
||||
writeSuccessResponseHeadersOnly(w)
|
||||
}
|
||||
|
||||
// DeleteBucketHandler - Delete bucket
|
||||
func (api gatewayAPIHandlers) DeleteBucketHandler(w http.ResponseWriter, r *http.Request) {
|
||||
objectAPI := api.ObjectAPI()
|
||||
if objectAPI == nil {
|
||||
writeErrorResponse(w, ErrServerNotInitialized, r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// DeleteBucket does not have any bucket action.
|
||||
if s3Error := checkRequestAuthType(r, "", "", globalServerConfig.GetRegion()); s3Error != ErrNone {
|
||||
writeErrorResponse(w, s3Error, r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
vars := router.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
|
||||
// Attempt to delete bucket.
|
||||
if err := objectAPI.DeleteBucket(bucket); err != nil {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Write success response.
|
||||
writeSuccessNoContent(w)
|
||||
}
|
||||
|
||||
// ListObjectsV1Handler - GET Bucket (List Objects) Version 1.
|
||||
// --------------------------
|
||||
// This implementation of the GET operation returns some or all (up to 1000)
|
||||
// of the objects in a bucket. You can use the request parameters as selection
|
||||
// criteria to return a subset of the objects in a bucket.
|
||||
//
|
||||
func (api gatewayAPIHandlers) ListObjectsV1Handler(w http.ResponseWriter, r *http.Request) {
|
||||
vars := router.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
|
||||
objectAPI := api.ObjectAPI()
|
||||
if objectAPI == nil {
|
||||
writeErrorResponse(w, ErrServerNotInitialized, r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
reqAuthType := getRequestAuthType(r)
|
||||
|
||||
switch reqAuthType {
|
||||
case authTypePresignedV2, authTypeSignedV2:
|
||||
// Signature V2 validation.
|
||||
s3Error := isReqAuthenticatedV2(r)
|
||||
if s3Error != ErrNone {
|
||||
errorIf(errSignatureMismatch, "%s", dumpRequest(r))
|
||||
writeErrorResponse(w, s3Error, r.URL)
|
||||
return
|
||||
}
|
||||
case authTypeSigned, authTypePresigned:
|
||||
s3Error := isReqAuthenticated(r, globalServerConfig.GetRegion())
|
||||
if s3Error != ErrNone {
|
||||
errorIf(errSignatureMismatch, "%s", dumpRequest(r))
|
||||
writeErrorResponse(w, s3Error, r.URL)
|
||||
return
|
||||
}
|
||||
case authTypeAnonymous:
|
||||
// No verification needed for anonymous requests.
|
||||
default:
|
||||
// For all unknown auth types return error.
|
||||
writeErrorResponse(w, ErrAccessDenied, r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Extract all the listObjectsV1 query params to their native
|
||||
// values. N B We delegate validation of params to respective
|
||||
// gateway backends.
|
||||
prefix, marker, delimiter, maxKeys, _ := getListObjectsV1Args(r.URL.Query())
|
||||
|
||||
// Validate the maxKeys lowerbound. When maxKeys > 1000, S3 returns 1000 but
|
||||
// does not throw an error.
|
||||
if maxKeys < 0 {
|
||||
writeErrorResponse(w, ErrInvalidMaxKeys, r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
listObjects := objectAPI.ListObjects
|
||||
if reqAuthType == authTypeAnonymous {
|
||||
listObjects = objectAPI.AnonListObjects
|
||||
}
|
||||
// Inititate a list objects operation based on the input params.
|
||||
// On success would return back ListObjectsInfo object to be
|
||||
// marshalled into S3 compatible XML header.
|
||||
listObjectsInfo, err := listObjects(bucket, prefix, marker, delimiter, maxKeys)
|
||||
if err != nil {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
return
|
||||
}
|
||||
response := generateListObjectsV1Response(bucket, prefix, marker, delimiter, maxKeys, listObjectsInfo)
|
||||
// Write success response.
|
||||
writeSuccessResponseXML(w, encodeResponse(response))
|
||||
}
|
||||
|
||||
// ListObjectsV2Handler - GET Bucket (List Objects) Version 2.
|
||||
// --------------------------
|
||||
// This implementation of the GET operation returns some or all (up to 1000)
|
||||
// of the objects in a bucket. You can use the request parameters as selection
|
||||
// criteria to return a subset of the objects in a bucket.
|
||||
//
|
||||
// NOTE: It is recommended that this API to be used for application development.
|
||||
// Minio continues to support ListObjectsV1 for supporting legacy tools.
|
||||
func (api gatewayAPIHandlers) ListObjectsV2Handler(w http.ResponseWriter, r *http.Request) {
|
||||
vars := router.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
|
||||
objectAPI := api.ObjectAPI()
|
||||
if objectAPI == nil {
|
||||
writeErrorResponse(w, ErrServerNotInitialized, r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
reqAuthType := getRequestAuthType(r)
|
||||
|
||||
switch reqAuthType {
|
||||
case authTypePresignedV2, authTypeSignedV2:
|
||||
// Signature V2 validation.
|
||||
s3Error := isReqAuthenticatedV2(r)
|
||||
if s3Error != ErrNone {
|
||||
errorIf(errSignatureMismatch, dumpRequest(r))
|
||||
writeErrorResponse(w, s3Error, r.URL)
|
||||
return
|
||||
}
|
||||
case authTypeSigned, authTypePresigned:
|
||||
s3Error := isReqAuthenticated(r, globalServerConfig.GetRegion())
|
||||
if s3Error != ErrNone {
|
||||
errorIf(errSignatureMismatch, dumpRequest(r))
|
||||
writeErrorResponse(w, s3Error, r.URL)
|
||||
return
|
||||
}
|
||||
case authTypeAnonymous:
|
||||
// No verification needed for anonymous requests.
|
||||
default:
|
||||
// For all unknown auth types return error.
|
||||
writeErrorResponse(w, ErrAccessDenied, r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Extract all the listObjectsV2 query params to their native values.
|
||||
prefix, token, startAfter, delimiter, fetchOwner, maxKeys, _ := getListObjectsV2Args(r.URL.Query())
|
||||
|
||||
// In ListObjectsV2 'continuation-token' is the marker.
|
||||
marker := token
|
||||
// Check if 'continuation-token' is empty.
|
||||
if token == "" {
|
||||
// Then we need to use 'start-after' as marker instead.
|
||||
marker = startAfter
|
||||
}
|
||||
|
||||
listObjectsV2 := objectAPI.ListObjectsV2
|
||||
if reqAuthType == authTypeAnonymous {
|
||||
listObjectsV2 = objectAPI.AnonListObjectsV2
|
||||
}
|
||||
|
||||
// Validate the query params before beginning to serve the request.
|
||||
// fetch-owner is not validated since it is a boolean
|
||||
if s3Error := validateGatewayListObjectsV2Args(prefix, marker, delimiter, maxKeys); s3Error != ErrNone {
|
||||
writeErrorResponse(w, s3Error, r.URL)
|
||||
return
|
||||
}
|
||||
// Inititate a list objects operation based on the input params.
|
||||
// On success would return back ListObjectsV2Info object to be
|
||||
// serialized as XML and sent as S3 compatible response body.
|
||||
listObjectsV2Info, err := listObjectsV2(bucket, prefix, token, delimiter, maxKeys, fetchOwner, startAfter)
|
||||
if err != nil {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
response := generateListObjectsV2Response(bucket, prefix, token, listObjectsV2Info.NextContinuationToken, startAfter, delimiter, fetchOwner, listObjectsV2Info.IsTruncated, maxKeys, listObjectsV2Info.Objects, listObjectsV2Info.Prefixes)
|
||||
// Write success response.
|
||||
writeSuccessResponseXML(w, encodeResponse(response))
|
||||
}
|
||||
|
||||
// HeadBucketHandler - HEAD Bucket
|
||||
// ----------
|
||||
// This operation is useful to determine if a bucket exists.
|
||||
// The operation returns a 200 OK if the bucket exists and you
|
||||
// have permission to access it. Otherwise, the operation might
|
||||
// return responses such as 404 Not Found and 403 Forbidden.
|
||||
func (api gatewayAPIHandlers) HeadBucketHandler(w http.ResponseWriter, r *http.Request) {
|
||||
vars := router.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
|
||||
objectAPI := api.ObjectAPI()
|
||||
if objectAPI == nil {
|
||||
writeErrorResponseHeadersOnly(w, ErrServerNotInitialized)
|
||||
return
|
||||
}
|
||||
|
||||
reqAuthType := getRequestAuthType(r)
|
||||
|
||||
switch reqAuthType {
|
||||
case authTypePresignedV2, authTypeSignedV2:
|
||||
// Signature V2 validation.
|
||||
s3Error := isReqAuthenticatedV2(r)
|
||||
if s3Error != ErrNone {
|
||||
errorIf(errSignatureMismatch, "%s", dumpRequest(r))
|
||||
writeErrorResponse(w, s3Error, r.URL)
|
||||
return
|
||||
}
|
||||
case authTypeSigned, authTypePresigned:
|
||||
s3Error := isReqAuthenticated(r, globalServerConfig.GetRegion())
|
||||
if s3Error != ErrNone {
|
||||
errorIf(errSignatureMismatch, "%s", dumpRequest(r))
|
||||
writeErrorResponse(w, s3Error, r.URL)
|
||||
return
|
||||
}
|
||||
case authTypeAnonymous:
|
||||
// No verification needed for anonymous requests.
|
||||
default:
|
||||
// For all unknown auth types return error.
|
||||
writeErrorResponse(w, ErrAccessDenied, r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
getBucketInfo := objectAPI.GetBucketInfo
|
||||
if reqAuthType == authTypeAnonymous {
|
||||
getBucketInfo = objectAPI.AnonGetBucketInfo
|
||||
}
|
||||
|
||||
if _, err := getBucketInfo(bucket); err != nil {
|
||||
writeErrorResponseHeadersOnly(w, toAPIErrorCode(err))
|
||||
return
|
||||
}
|
||||
|
||||
writeSuccessResponseHeadersOnly(w)
|
||||
}
|
||||
|
||||
// GetBucketLocationHandler - GET Bucket location.
|
||||
// -------------------------
|
||||
// This operation returns bucket location.
|
||||
func (api gatewayAPIHandlers) GetBucketLocationHandler(w http.ResponseWriter, r *http.Request) {
|
||||
vars := router.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
|
||||
objectAPI := api.ObjectAPI()
|
||||
if objectAPI == nil {
|
||||
writeErrorResponse(w, ErrServerNotInitialized, r.URL)
|
||||
return
|
||||
}
|
||||
reqAuthType := getRequestAuthType(r)
|
||||
|
||||
switch reqAuthType {
|
||||
case authTypePresignedV2, authTypeSignedV2:
|
||||
// Signature V2 validation.
|
||||
s3Error := isReqAuthenticatedV2(r)
|
||||
if s3Error != ErrNone {
|
||||
errorIf(errSignatureMismatch, "%s", dumpRequest(r))
|
||||
writeErrorResponse(w, s3Error, r.URL)
|
||||
return
|
||||
}
|
||||
case authTypeSigned, authTypePresigned:
|
||||
s3Error := isReqAuthenticated(r, globalMinioDefaultRegion)
|
||||
if s3Error == ErrInvalidRegion {
|
||||
// Clients like boto3 send getBucketLocation() call signed with region that is configured.
|
||||
s3Error = isReqAuthenticated(r, globalServerConfig.GetRegion())
|
||||
}
|
||||
if s3Error != ErrNone {
|
||||
errorIf(errSignatureMismatch, "%s", dumpRequest(r))
|
||||
writeErrorResponse(w, s3Error, r.URL)
|
||||
return
|
||||
}
|
||||
case authTypeAnonymous:
|
||||
// No verification needed for anonymous requests.
|
||||
default:
|
||||
// For all unknown auth types return error.
|
||||
writeErrorResponse(w, ErrAccessDenied, r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
getBucketInfo := objectAPI.GetBucketInfo
|
||||
if reqAuthType == authTypeAnonymous {
|
||||
getBucketInfo = objectAPI.AnonGetBucketInfo
|
||||
}
|
||||
|
||||
if _, err := getBucketInfo(bucket); err != nil {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Generate response.
|
||||
encodedSuccessResponse := encodeResponse(LocationResponse{})
|
||||
// Get current region.
|
||||
region := globalServerConfig.GetRegion()
|
||||
if region != globalMinioDefaultRegion {
|
||||
encodedSuccessResponse = encodeResponse(LocationResponse{
|
||||
Location: region,
|
||||
})
|
||||
}
|
||||
|
||||
// Write success response.
|
||||
writeSuccessResponseXML(w, encodedSuccessResponse)
|
||||
}
|
@ -171,7 +171,7 @@ func StartGateway(ctx *cli.Context, gw Gateway) {
|
||||
if globalIsBrowserEnabled {
|
||||
fatalIf(registerWebRouter(router), "Unable to configure web browser")
|
||||
}
|
||||
registerGatewayAPIRouter(router, newObject)
|
||||
registerAPIRouter(router)
|
||||
|
||||
var handlerFns = []HandlerFunc{
|
||||
// Validate all the incoming paths.
|
||||
|
@ -17,13 +17,7 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"io"
|
||||
"net/http"
|
||||
|
||||
router "github.com/gorilla/mux"
|
||||
"github.com/minio/minio-go/pkg/policy"
|
||||
"github.com/minio/minio/pkg/auth"
|
||||
"github.com/minio/minio/pkg/hash"
|
||||
)
|
||||
|
||||
// GatewayMinioSysTmp prefix is used in Azure/GCS gateway for save metadata sent by Initialize Multipart Upload API.
|
||||
@ -34,123 +28,9 @@ type Gateway interface {
|
||||
// Name returns the unique name of the gateway.
|
||||
Name() string
|
||||
|
||||
// NewGatewayLayer returns a new gateway layer.
|
||||
NewGatewayLayer(creds auth.Credentials) (GatewayLayer, error)
|
||||
// NewGatewayLayer returns a new ObjectLayer.
|
||||
NewGatewayLayer(creds auth.Credentials) (ObjectLayer, error)
|
||||
|
||||
// Returns true if gateway is ready for production.
|
||||
Production() bool
|
||||
}
|
||||
|
||||
// GatewayLayer - interface to implement gateway mode.
|
||||
type GatewayLayer interface {
|
||||
ObjectLayer
|
||||
|
||||
AnonGetObject(bucket, object string, startOffset int64, length int64, writer io.Writer, etag string) (err error)
|
||||
AnonGetObjectInfo(bucket, object string) (objInfo ObjectInfo, err error)
|
||||
|
||||
AnonPutObject(bucket string, object string, data *hash.Reader, metadata map[string]string) (ObjectInfo, error)
|
||||
|
||||
SetBucketPolicies(string, policy.BucketAccessPolicy) error
|
||||
GetBucketPolicies(string) (policy.BucketAccessPolicy, error)
|
||||
DeleteBucketPolicies(string) error
|
||||
AnonListObjects(bucket, prefix, marker, delimiter string, maxKeys int) (result ListObjectsInfo, err error)
|
||||
AnonListObjectsV2(bucket, prefix, continuationToken, delimiter string, maxKeys int, fetchOwner bool, startAfter string) (result ListObjectsV2Info, err error)
|
||||
ListObjectsV2(bucket, prefix, continuationToken, delimiter string, maxKeys int, fetchOwner bool, startAfter string) (result ListObjectsV2Info, err error)
|
||||
AnonGetBucketInfo(bucket string) (bucketInfo BucketInfo, err error)
|
||||
}
|
||||
|
||||
// Implements and provides http handlers for S3 API.
|
||||
// Overrides GetObject HeadObject and Policy related handlers.
|
||||
type gatewayAPIHandlers struct {
|
||||
objectAPIHandlers
|
||||
ObjectAPI func() GatewayLayer
|
||||
}
|
||||
|
||||
// registerAPIRouter - registers S3 compatible APIs.
|
||||
func registerGatewayAPIRouter(mux *router.Router, gw GatewayLayer) {
|
||||
// Initialize API.
|
||||
api := gatewayAPIHandlers{
|
||||
ObjectAPI: func() GatewayLayer { return gw },
|
||||
objectAPIHandlers: objectAPIHandlers{
|
||||
ObjectAPI: newObjectLayerFn,
|
||||
},
|
||||
}
|
||||
|
||||
// API Router
|
||||
apiRouter := mux.NewRoute().PathPrefix("/").Subrouter()
|
||||
|
||||
var routers []*router.Router
|
||||
if globalDomainName != "" {
|
||||
routers = append(routers, apiRouter.Host("{bucket:.+}."+globalDomainName).Subrouter())
|
||||
}
|
||||
routers = append(routers, apiRouter.PathPrefix("/{bucket}").Subrouter())
|
||||
|
||||
// Object operations
|
||||
for _, bucket := range routers {
|
||||
/// Object operations
|
||||
|
||||
// HeadObject
|
||||
bucket.Methods("HEAD").Path("/{object:.+}").HandlerFunc(httpTraceAll(api.HeadObjectHandler))
|
||||
// CopyObjectPart
|
||||
bucket.Methods("PUT").Path("/{object:.+}").HeadersRegexp("X-Amz-Copy-Source", ".*?(\\/|%2F).*?").HandlerFunc(httpTraceAll(api.CopyObjectPartHandler)).Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}")
|
||||
// PutObjectPart
|
||||
bucket.Methods("PUT").Path("/{object:.+}").HandlerFunc(httpTraceHdrs(api.PutObjectPartHandler)).Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}")
|
||||
// ListObjectPxarts
|
||||
bucket.Methods("GET").Path("/{object:.+}").HandlerFunc(httpTraceAll(api.ListObjectPartsHandler)).Queries("uploadId", "{uploadId:.*}")
|
||||
// CompleteMultipartUpload
|
||||
bucket.Methods("POST").Path("/{object:.+}").HandlerFunc(httpTraceAll(api.CompleteMultipartUploadHandler)).Queries("uploadId", "{uploadId:.*}")
|
||||
// NewMultipartUpload
|
||||
bucket.Methods("POST").Path("/{object:.+}").HandlerFunc(httpTraceAll(api.NewMultipartUploadHandler)).Queries("uploads", "")
|
||||
// AbortMultipartUpload
|
||||
bucket.Methods("DELETE").Path("/{object:.+}").HandlerFunc(httpTraceAll(api.AbortMultipartUploadHandler)).Queries("uploadId", "{uploadId:.*}")
|
||||
// GetObject
|
||||
bucket.Methods("GET").Path("/{object:.+}").HandlerFunc(httpTraceHdrs(api.GetObjectHandler))
|
||||
// CopyObject
|
||||
bucket.Methods("PUT").Path("/{object:.+}").HeadersRegexp("X-Amz-Copy-Source", ".*?(\\/|%2F).*?").HandlerFunc(httpTraceAll(api.CopyObjectHandler))
|
||||
// PutObject
|
||||
bucket.Methods("PUT").Path("/{object:.+}").HandlerFunc(httpTraceHdrs(api.PutObjectHandler))
|
||||
// DeleteObject
|
||||
bucket.Methods("DELETE").Path("/{object:.+}").HandlerFunc(httpTraceAll(api.DeleteObjectHandler))
|
||||
|
||||
/// Bucket operations
|
||||
|
||||
// GetBucketLocation
|
||||
bucket.Methods("GET").HandlerFunc(httpTraceAll(api.GetBucketLocationHandler)).Queries("location", "")
|
||||
// GetBucketPolicy
|
||||
bucket.Methods("GET").HandlerFunc(httpTraceAll(api.GetBucketPolicyHandler)).Queries("policy", "")
|
||||
// GetBucketNotification
|
||||
bucket.Methods("GET").HandlerFunc(httpTraceAll(api.GetBucketNotificationHandler)).Queries("notification", "")
|
||||
// ListenBucketNotification
|
||||
bucket.Methods("GET").HandlerFunc(httpTraceAll(api.ListenBucketNotificationHandler)).Queries("events", "{events:.*}")
|
||||
// ListMultipartUploads
|
||||
bucket.Methods("GET").HandlerFunc(httpTraceAll(api.ListMultipartUploadsHandler)).Queries("uploads", "")
|
||||
// ListObjectsV2
|
||||
bucket.Methods("GET").HandlerFunc(httpTraceAll(api.ListObjectsV2Handler)).Queries("list-type", "2")
|
||||
// ListObjectsV1 (Legacy)
|
||||
bucket.Methods("GET").HandlerFunc(httpTraceAll(api.ListObjectsV1Handler))
|
||||
// PutBucketPolicy
|
||||
bucket.Methods("PUT").HandlerFunc(httpTraceAll(api.PutBucketPolicyHandler)).Queries("policy", "")
|
||||
// PutBucketNotification
|
||||
bucket.Methods("PUT").HandlerFunc(httpTraceAll(api.PutBucketNotificationHandler)).Queries("notification", "")
|
||||
// PutBucket
|
||||
bucket.Methods("PUT").HandlerFunc(httpTraceAll(api.PutBucketHandler))
|
||||
// HeadBucket
|
||||
bucket.Methods("HEAD").HandlerFunc(httpTraceAll(api.HeadBucketHandler))
|
||||
// PostPolicy
|
||||
bucket.Methods("POST").HeadersRegexp("Content-Type", "multipart/form-data*").HandlerFunc(httpTraceAll(api.PostPolicyBucketHandler))
|
||||
// DeleteMultipleObjects
|
||||
bucket.Methods("POST").HandlerFunc(httpTraceAll(api.DeleteMultipleObjectsHandler)).Queries("delete", "")
|
||||
// DeleteBucketPolicy
|
||||
bucket.Methods("DELETE").HandlerFunc(httpTraceAll(api.DeleteBucketPolicyHandler)).Queries("policy", "")
|
||||
// DeleteBucket
|
||||
bucket.Methods("DELETE").HandlerFunc(httpTraceAll(api.DeleteBucketHandler))
|
||||
}
|
||||
|
||||
/// Root operation
|
||||
|
||||
// ListBuckets
|
||||
apiRouter.Methods("GET").Path("/").HandlerFunc(httpTraceAll(api.ListBucketsHandler))
|
||||
|
||||
// If none of the routes match.
|
||||
apiRouter.NotFoundHandler = http.HandlerFunc(httpTraceAll(notFoundHandler))
|
||||
}
|
||||
|
@ -17,7 +17,6 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"io"
|
||||
"time"
|
||||
|
||||
"github.com/minio/minio-go/pkg/policy"
|
||||
@ -64,18 +63,18 @@ func (a GatewayUnsupported) CompleteMultipartUpload(bucket string, object string
|
||||
return oi, errors.Trace(NotImplemented{})
|
||||
}
|
||||
|
||||
// SetBucketPolicies sets policy on bucket
|
||||
func (a GatewayUnsupported) SetBucketPolicies(bucket string, policyInfo policy.BucketAccessPolicy) error {
|
||||
// SetBucketPolicy sets policy on bucket
|
||||
func (a GatewayUnsupported) SetBucketPolicy(bucket string, policyInfo policy.BucketAccessPolicy) error {
|
||||
return errors.Trace(NotImplemented{})
|
||||
}
|
||||
|
||||
// GetBucketPolicies will get policy on bucket
|
||||
func (a GatewayUnsupported) GetBucketPolicies(bucket string) (bal policy.BucketAccessPolicy, err error) {
|
||||
// GetBucketPolicy will get policy on bucket
|
||||
func (a GatewayUnsupported) GetBucketPolicy(bucket string) (bal policy.BucketAccessPolicy, err error) {
|
||||
return bal, errors.Trace(NotImplemented{})
|
||||
}
|
||||
|
||||
// DeleteBucketPolicies deletes all policies on bucket
|
||||
func (a GatewayUnsupported) DeleteBucketPolicies(bucket string) error {
|
||||
// DeleteBucketPolicy deletes all policies on bucket
|
||||
func (a GatewayUnsupported) DeleteBucketPolicy(bucket string) error {
|
||||
return errors.Trace(NotImplemented{})
|
||||
}
|
||||
|
||||
@ -104,39 +103,6 @@ func (a GatewayUnsupported) ListObjectsHeal(bucket, prefix, marker, delimiter st
|
||||
return loi, errors.Trace(NotImplemented{})
|
||||
}
|
||||
|
||||
// AnonListObjects - List objects anonymously
|
||||
func (a GatewayUnsupported) AnonListObjects(bucket string, prefix string, marker string, delimiter string,
|
||||
maxKeys int) (loi ListObjectsInfo, err error) {
|
||||
return loi, errors.Trace(NotImplemented{})
|
||||
}
|
||||
|
||||
// AnonListObjectsV2 - List objects in V2 mode, anonymously
|
||||
func (a GatewayUnsupported) AnonListObjectsV2(bucket, prefix, continuationToken, delimiter string, maxKeys int,
|
||||
fetchOwner bool, startAfter string) (loi ListObjectsV2Info, err error) {
|
||||
return loi, errors.Trace(NotImplemented{})
|
||||
}
|
||||
|
||||
// AnonGetBucketInfo - Get bucket metadata anonymously.
|
||||
func (a GatewayUnsupported) AnonGetBucketInfo(bucket string) (bi BucketInfo, err error) {
|
||||
return bi, errors.Trace(NotImplemented{})
|
||||
}
|
||||
|
||||
// AnonPutObject creates a new object anonymously with the incoming data,
|
||||
func (a GatewayUnsupported) AnonPutObject(bucket, object string, data *hash.Reader,
|
||||
metadata map[string]string) (ObjectInfo, error) {
|
||||
return ObjectInfo{}, errors.Trace(NotImplemented{})
|
||||
}
|
||||
|
||||
// AnonGetObject downloads object anonymously.
|
||||
func (a GatewayUnsupported) AnonGetObject(bucket, object string, startOffset int64, length int64, writer io.Writer, etag string) (err error) {
|
||||
return errors.Trace(NotImplemented{})
|
||||
}
|
||||
|
||||
// AnonGetObjectInfo returns stat information about an object anonymously.
|
||||
func (a GatewayUnsupported) AnonGetObjectInfo(bucket, object string) (objInfo ObjectInfo, err error) {
|
||||
return objInfo, errors.Trace(NotImplemented{})
|
||||
}
|
||||
|
||||
// CopyObject copies a blob from source container to destination container.
|
||||
func (a GatewayUnsupported) CopyObject(srcBucket string, srcObject string, destBucket string, destObject string,
|
||||
metadata map[string]string, srcEtag string) (objInfo ObjectInfo, err error) {
|
||||
@ -154,3 +120,18 @@ func (a GatewayUnsupported) ListLocks(bucket, prefix string, duration time.Durat
|
||||
func (a GatewayUnsupported) ClearLocks([]VolumeLockInfo) error {
|
||||
return errors.Trace(NotImplemented{})
|
||||
}
|
||||
|
||||
// RefreshBucketPolicy refreshes cache policy with what's on disk.
|
||||
func (a GatewayUnsupported) RefreshBucketPolicy(bucket string) error {
|
||||
return errors.Trace(NotImplemented{})
|
||||
}
|
||||
|
||||
// IsNotificationSupported returns whether bucket notification is applicable for this layer.
|
||||
func (a GatewayUnsupported) IsNotificationSupported() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// IsEncryptionSupported returns whether server side encryption is applicable for this layer.
|
||||
func (a GatewayUnsupported) IsEncryptionSupported() bool {
|
||||
return false
|
||||
}
|
||||
|
@ -1,322 +0,0 @@
|
||||
/*
|
||||
* Minio Cloud Storage, (C) 2017 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package azure
|
||||
|
||||
import (
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/storage"
|
||||
"github.com/minio/minio/pkg/errors"
|
||||
|
||||
minio "github.com/minio/minio/cmd"
|
||||
)
|
||||
|
||||
// Copied from github.com/Azure/azure-sdk-for-go/storage/container.go
|
||||
func azureListBlobsGetParameters(p storage.ListBlobsParameters) url.Values {
|
||||
out := url.Values{}
|
||||
|
||||
if p.Prefix != "" {
|
||||
out.Set("prefix", p.Prefix)
|
||||
}
|
||||
if p.Delimiter != "" {
|
||||
out.Set("delimiter", p.Delimiter)
|
||||
}
|
||||
if p.Marker != "" {
|
||||
out.Set("marker", p.Marker)
|
||||
}
|
||||
if p.Include != nil {
|
||||
addString := func(datasets []string, include bool, text string) []string {
|
||||
if include {
|
||||
datasets = append(datasets, text)
|
||||
}
|
||||
return datasets
|
||||
}
|
||||
|
||||
include := []string{}
|
||||
include = addString(include, p.Include.Snapshots, "snapshots")
|
||||
include = addString(include, p.Include.Metadata, "metadata")
|
||||
include = addString(include, p.Include.UncommittedBlobs, "uncommittedblobs")
|
||||
include = addString(include, p.Include.Copy, "copy")
|
||||
fullInclude := strings.Join(include, ",")
|
||||
out.Set("include", fullInclude)
|
||||
}
|
||||
if p.MaxResults != 0 {
|
||||
out.Set("maxresults", fmt.Sprintf("%v", p.MaxResults))
|
||||
}
|
||||
if p.Timeout != 0 {
|
||||
out.Set("timeout", fmt.Sprintf("%v", p.Timeout))
|
||||
}
|
||||
|
||||
return out
|
||||
}
|
||||
|
||||
// Make anonymous HTTP request to azure endpoint.
|
||||
func azureAnonRequest(verb, urlStr string, header http.Header) (*http.Response, error) {
|
||||
req, err := http.NewRequest(verb, urlStr, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if header != nil {
|
||||
req.Header = header
|
||||
}
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// 4XX and 5XX are error HTTP codes.
|
||||
if resp.StatusCode >= 400 && resp.StatusCode <= 511 {
|
||||
defer resp.Body.Close()
|
||||
respBody, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(respBody) == 0 {
|
||||
// no error in response body, might happen in HEAD requests
|
||||
return nil, storage.AzureStorageServiceError{
|
||||
StatusCode: resp.StatusCode,
|
||||
Code: resp.Status,
|
||||
Message: "no response body was available for error status code",
|
||||
}
|
||||
}
|
||||
// Response contains Azure storage service error object.
|
||||
var storageErr storage.AzureStorageServiceError
|
||||
if err := xml.Unmarshal(respBody, &storageErr); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
storageErr.StatusCode = resp.StatusCode
|
||||
return nil, storageErr
|
||||
}
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// AnonGetBucketInfo - Get bucket metadata from azure anonymously.
|
||||
func (a *azureObjects) AnonGetBucketInfo(bucket string) (bucketInfo minio.BucketInfo, err error) {
|
||||
blobURL := a.client.GetContainerReference(bucket).GetBlobReference("").GetURL()
|
||||
url, err := url.Parse(blobURL)
|
||||
if err != nil {
|
||||
return bucketInfo, azureToObjectError(errors.Trace(err))
|
||||
}
|
||||
url.RawQuery = "restype=container"
|
||||
resp, err := azureAnonRequest(http.MethodHead, url.String(), nil)
|
||||
if err != nil {
|
||||
return bucketInfo, azureToObjectError(errors.Trace(err), bucket)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return bucketInfo, azureToObjectError(errors.Trace(minio.AnonErrToObjectErr(resp.StatusCode, bucket)), bucket)
|
||||
}
|
||||
|
||||
t, err := time.Parse(time.RFC1123, resp.Header.Get("Last-Modified"))
|
||||
if err != nil {
|
||||
return bucketInfo, errors.Trace(err)
|
||||
}
|
||||
|
||||
return minio.BucketInfo{
|
||||
Name: bucket,
|
||||
Created: t,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// AnonGetObject - SendGET request without authentication.
|
||||
// This is needed when clients send GET requests on objects that can be downloaded without auth.
|
||||
func (a *azureObjects) AnonGetObject(bucket, object string, startOffset int64, length int64, writer io.Writer, etag string) (err error) {
|
||||
h := make(http.Header)
|
||||
if length > 0 && startOffset > 0 {
|
||||
h.Add("Range", fmt.Sprintf("bytes=%d-%d", startOffset, startOffset+length-1))
|
||||
} else if startOffset > 0 {
|
||||
h.Add("Range", fmt.Sprintf("bytes=%d-", startOffset))
|
||||
}
|
||||
|
||||
blobURL := a.client.GetContainerReference(bucket).GetBlobReference(object).GetURL()
|
||||
resp, err := azureAnonRequest(http.MethodGet, blobURL, h)
|
||||
if err != nil {
|
||||
return azureToObjectError(errors.Trace(err), bucket, object)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusPartialContent && resp.StatusCode != http.StatusOK {
|
||||
return azureToObjectError(errors.Trace(minio.AnonErrToObjectErr(resp.StatusCode, bucket, object)), bucket, object)
|
||||
}
|
||||
|
||||
_, err = io.Copy(writer, resp.Body)
|
||||
return errors.Trace(err)
|
||||
}
|
||||
|
||||
// AnonGetObjectInfo - Send HEAD request without authentication and convert the
|
||||
// result to ObjectInfo.
|
||||
func (a *azureObjects) AnonGetObjectInfo(bucket, object string) (objInfo minio.ObjectInfo, err error) {
|
||||
blobURL := a.client.GetContainerReference(bucket).GetBlobReference(object).GetURL()
|
||||
resp, err := azureAnonRequest(http.MethodHead, blobURL, nil)
|
||||
if err != nil {
|
||||
return objInfo, azureToObjectError(errors.Trace(err), bucket, object)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return objInfo, azureToObjectError(errors.Trace(minio.AnonErrToObjectErr(resp.StatusCode, bucket, object)), bucket, object)
|
||||
}
|
||||
|
||||
var contentLength int64
|
||||
contentLengthStr := resp.Header.Get("Content-Length")
|
||||
if contentLengthStr != "" {
|
||||
contentLength, err = strconv.ParseInt(contentLengthStr, 0, 64)
|
||||
if err != nil {
|
||||
return objInfo, azureToObjectError(errors.Trace(fmt.Errorf("Unexpected error")), bucket, object)
|
||||
}
|
||||
}
|
||||
|
||||
t, err := time.Parse(time.RFC1123, resp.Header.Get("Last-Modified"))
|
||||
if err != nil {
|
||||
return objInfo, errors.Trace(err)
|
||||
}
|
||||
|
||||
objInfo.ModTime = t
|
||||
objInfo.Bucket = bucket
|
||||
objInfo.UserDefined = make(map[string]string)
|
||||
if resp.Header.Get("Content-Encoding") != "" {
|
||||
objInfo.UserDefined["Content-Encoding"] = resp.Header.Get("Content-Encoding")
|
||||
}
|
||||
objInfo.UserDefined["Content-Type"] = resp.Header.Get("Content-Type")
|
||||
objInfo.ETag = resp.Header.Get("Etag")
|
||||
objInfo.ModTime = t
|
||||
objInfo.Name = object
|
||||
objInfo.Size = contentLength
|
||||
return
|
||||
}
|
||||
|
||||
// AnonListObjects - Use Azure equivalent ListBlobs.
|
||||
func (a *azureObjects) AnonListObjects(bucket, prefix, marker, delimiter string, maxKeys int) (result minio.ListObjectsInfo, err error) {
|
||||
params := storage.ListBlobsParameters{
|
||||
Prefix: prefix,
|
||||
Marker: marker,
|
||||
Delimiter: delimiter,
|
||||
MaxResults: uint(maxKeys),
|
||||
}
|
||||
|
||||
q := azureListBlobsGetParameters(params)
|
||||
q.Set("restype", "container")
|
||||
q.Set("comp", "list")
|
||||
|
||||
blobURL := a.client.GetContainerReference(bucket).GetBlobReference("").GetURL()
|
||||
url, err := url.Parse(blobURL)
|
||||
if err != nil {
|
||||
return result, azureToObjectError(errors.Trace(err))
|
||||
}
|
||||
url.RawQuery = q.Encode()
|
||||
|
||||
resp, err := azureAnonRequest(http.MethodGet, url.String(), nil)
|
||||
if err != nil {
|
||||
return result, azureToObjectError(errors.Trace(err))
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
var listResp storage.BlobListResponse
|
||||
|
||||
data, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return result, azureToObjectError(errors.Trace(err))
|
||||
}
|
||||
err = xml.Unmarshal(data, &listResp)
|
||||
if err != nil {
|
||||
return result, azureToObjectError(errors.Trace(err))
|
||||
}
|
||||
|
||||
result.IsTruncated = listResp.NextMarker != ""
|
||||
result.NextMarker = listResp.NextMarker
|
||||
for _, object := range listResp.Blobs {
|
||||
result.Objects = append(result.Objects, minio.ObjectInfo{
|
||||
Bucket: bucket,
|
||||
Name: object.Name,
|
||||
ModTime: time.Time(object.Properties.LastModified),
|
||||
Size: object.Properties.ContentLength,
|
||||
ETag: object.Properties.Etag,
|
||||
ContentType: object.Properties.ContentType,
|
||||
ContentEncoding: object.Properties.ContentEncoding,
|
||||
})
|
||||
}
|
||||
result.Prefixes = listResp.BlobPrefixes
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// AnonListObjectsV2 - List objects in V2 mode, anonymously
|
||||
func (a *azureObjects) AnonListObjectsV2(bucket, prefix, continuationToken, delimiter string, maxKeys int, fetchOwner bool, startAfter string) (result minio.ListObjectsV2Info, err error) {
|
||||
params := storage.ListBlobsParameters{
|
||||
Prefix: prefix,
|
||||
Marker: continuationToken,
|
||||
Delimiter: delimiter,
|
||||
MaxResults: uint(maxKeys),
|
||||
}
|
||||
|
||||
q := azureListBlobsGetParameters(params)
|
||||
q.Set("restype", "container")
|
||||
q.Set("comp", "list")
|
||||
|
||||
blobURL := a.client.GetContainerReference(bucket).GetBlobReference("").GetURL()
|
||||
url, err := url.Parse(blobURL)
|
||||
if err != nil {
|
||||
return result, azureToObjectError(errors.Trace(err))
|
||||
}
|
||||
url.RawQuery = q.Encode()
|
||||
|
||||
resp, err := http.Get(url.String())
|
||||
if err != nil {
|
||||
return result, azureToObjectError(errors.Trace(err))
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
var listResp storage.BlobListResponse
|
||||
|
||||
data, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return result, azureToObjectError(errors.Trace(err))
|
||||
}
|
||||
err = xml.Unmarshal(data, &listResp)
|
||||
if err != nil {
|
||||
return result, azureToObjectError(errors.Trace(err))
|
||||
}
|
||||
|
||||
// If NextMarker is not empty, this means response is truncated and NextContinuationToken should be set
|
||||
if listResp.NextMarker != "" {
|
||||
result.IsTruncated = true
|
||||
result.NextContinuationToken = listResp.NextMarker
|
||||
}
|
||||
for _, object := range listResp.Blobs {
|
||||
result.Objects = append(result.Objects, minio.ObjectInfo{
|
||||
Bucket: bucket,
|
||||
Name: object.Name,
|
||||
ModTime: time.Time(object.Properties.LastModified),
|
||||
Size: object.Properties.ContentLength,
|
||||
ETag: minio.CanonicalizeETag(object.Properties.Etag),
|
||||
ContentType: object.Properties.ContentType,
|
||||
ContentEncoding: object.Properties.ContentEncoding,
|
||||
})
|
||||
}
|
||||
result.Prefixes = listResp.BlobPrefixes
|
||||
return result, nil
|
||||
}
|
@ -117,7 +117,7 @@ func (g *Azure) Name() string {
|
||||
}
|
||||
|
||||
// NewGatewayLayer initializes azure blob storage client and returns AzureObjects.
|
||||
func (g *Azure) NewGatewayLayer(creds auth.Credentials) (minio.GatewayLayer, error) {
|
||||
func (g *Azure) NewGatewayLayer(creds auth.Credentials) (minio.ObjectLayer, error) {
|
||||
var err error
|
||||
var endpoint = storage.DefaultBaseURL
|
||||
var secure = true
|
||||
@ -959,13 +959,13 @@ func (a *azureObjects) CompleteMultipartUpload(bucket, object, uploadID string,
|
||||
return a.GetObjectInfo(bucket, object)
|
||||
}
|
||||
|
||||
// SetBucketPolicies - Azure supports three types of container policies:
|
||||
// SetBucketPolicy - Azure supports three types of container policies:
|
||||
// storage.ContainerAccessTypeContainer - readonly in minio terminology
|
||||
// storage.ContainerAccessTypeBlob - readonly without listing in minio terminology
|
||||
// storage.ContainerAccessTypePrivate - none in minio terminology
|
||||
// As the common denominator for minio and azure is readonly and none, we support
|
||||
// these two policies at the bucket level.
|
||||
func (a *azureObjects) SetBucketPolicies(bucket string, policyInfo policy.BucketAccessPolicy) error {
|
||||
func (a *azureObjects) SetBucketPolicy(bucket string, policyInfo policy.BucketAccessPolicy) error {
|
||||
var policies []minio.BucketAccessPolicy
|
||||
|
||||
for prefix, policy := range policy.GetPolicies(policyInfo.Statements, bucket) {
|
||||
@ -993,8 +993,8 @@ func (a *azureObjects) SetBucketPolicies(bucket string, policyInfo policy.Bucket
|
||||
return azureToObjectError(errors.Trace(err), bucket)
|
||||
}
|
||||
|
||||
// GetBucketPolicies - Get the container ACL and convert it to canonical []bucketAccessPolicy
|
||||
func (a *azureObjects) GetBucketPolicies(bucket string) (policy.BucketAccessPolicy, error) {
|
||||
// GetBucketPolicy - Get the container ACL and convert it to canonical []bucketAccessPolicy
|
||||
func (a *azureObjects) GetBucketPolicy(bucket string) (policy.BucketAccessPolicy, error) {
|
||||
policyInfo := policy.BucketAccessPolicy{Version: "2012-10-17"}
|
||||
container := a.client.GetContainerReference(bucket)
|
||||
perm, err := container.GetPermissions(nil)
|
||||
@ -1012,8 +1012,8 @@ func (a *azureObjects) GetBucketPolicies(bucket string) (policy.BucketAccessPoli
|
||||
return policyInfo, nil
|
||||
}
|
||||
|
||||
// DeleteBucketPolicies - Set the container ACL to "private"
|
||||
func (a *azureObjects) DeleteBucketPolicies(bucket string) error {
|
||||
// DeleteBucketPolicy - Set the container ACL to "private"
|
||||
func (a *azureObjects) DeleteBucketPolicy(bucket string) error {
|
||||
perm := storage.ContainerPermissions{
|
||||
AccessType: storage.ContainerAccessTypePrivate,
|
||||
AccessPolicies: nil,
|
||||
|
@ -19,7 +19,6 @@ package azure
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
@ -262,56 +261,6 @@ func TestAzureParseBlockID(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// Test azureListBlobsGetParameters()
|
||||
func TestAzureListBlobsGetParameters(t *testing.T) {
|
||||
|
||||
// Test values set 1
|
||||
expectedURLValues := url.Values{}
|
||||
expectedURLValues.Set("prefix", "test")
|
||||
expectedURLValues.Set("delimiter", "_")
|
||||
expectedURLValues.Set("marker", "marker")
|
||||
expectedURLValues.Set("include", "metadata")
|
||||
expectedURLValues.Set("maxresults", "20")
|
||||
expectedURLValues.Set("timeout", "10")
|
||||
|
||||
setBlobParameters := storage.ListBlobsParameters{
|
||||
Prefix: "test",
|
||||
Delimiter: "_",
|
||||
Marker: "marker",
|
||||
Include: &storage.IncludeBlobDataset{Metadata: true},
|
||||
MaxResults: 20,
|
||||
Timeout: 10,
|
||||
}
|
||||
|
||||
// Test values set 2
|
||||
expectedURLValues1 := url.Values{}
|
||||
|
||||
setBlobParameters1 := storage.ListBlobsParameters{
|
||||
Prefix: "",
|
||||
Delimiter: "",
|
||||
Marker: "",
|
||||
Include: nil,
|
||||
MaxResults: 0,
|
||||
Timeout: 0,
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
args storage.ListBlobsParameters
|
||||
want url.Values
|
||||
}{
|
||||
{"TestIfValuesSet", setBlobParameters, expectedURLValues},
|
||||
{"TestIfValuesNotSet", setBlobParameters1, expectedURLValues1},
|
||||
}
|
||||
for _, test := range testCases {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
if got := azureListBlobsGetParameters(test.args); !reflect.DeepEqual(got, test.want) {
|
||||
t.Errorf("azureListBlobsGetParameters() = %v, want %v", got, test.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestAnonErrToObjectErr(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
|
@ -1,136 +0,0 @@
|
||||
/*
|
||||
* Minio Cloud Storage, (C) 2017 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package b2
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/minio/minio/pkg/errors"
|
||||
|
||||
minio "github.com/minio/minio/cmd"
|
||||
)
|
||||
|
||||
// mkRange converts offset, size into Range header equivalent.
|
||||
func mkRange(offset, size int64) string {
|
||||
if offset == 0 && size == 0 {
|
||||
return ""
|
||||
}
|
||||
if size == 0 {
|
||||
return fmt.Sprintf("bytes=%d-", offset)
|
||||
}
|
||||
return fmt.Sprintf("bytes=%d-%d", offset, offset+size-1)
|
||||
}
|
||||
|
||||
// AnonGetObject - performs a plain http GET request on a public resource,
|
||||
// fails if the resource is not public.
|
||||
func (l *b2Objects) AnonGetObject(bucket string, object string, startOffset int64, length int64, writer io.Writer, etag string) error {
|
||||
uri := fmt.Sprintf("%s/file/%s/%s", l.b2Client.DownloadURI, bucket, object)
|
||||
req, err := http.NewRequest("GET", uri, nil)
|
||||
if err != nil {
|
||||
return b2ToObjectError(errors.Trace(err), bucket, object)
|
||||
}
|
||||
rng := mkRange(startOffset, length)
|
||||
if rng != "" {
|
||||
req.Header.Set("Range", rng)
|
||||
}
|
||||
resp, err := l.anonClient.Do(req)
|
||||
if err != nil {
|
||||
return b2ToObjectError(errors.Trace(err), bucket, object)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return b2ToObjectError(errors.Trace(fmt.Errorf(resp.Status)), bucket, object)
|
||||
}
|
||||
_, err = io.Copy(writer, resp.Body)
|
||||
return b2ToObjectError(errors.Trace(err), bucket, object)
|
||||
}
|
||||
|
||||
// Converts http Header into ObjectInfo. This function looks for all the
|
||||
// standard Backblaze B2 headers to convert into ObjectInfo.
|
||||
//
|
||||
// Content-Length is converted to Size.
|
||||
// X-Bz-Upload-Timestamp is converted to ModTime.
|
||||
// X-Bz-Info-<header>:<value> is converted to <header>:<value>
|
||||
// Content-Type is converted to ContentType.
|
||||
// X-Bz-Content-Sha1 is converted to ETag.
|
||||
func headerToObjectInfo(bucket, object string, header http.Header) (objInfo minio.ObjectInfo, err error) {
|
||||
clen, err := strconv.ParseInt(header.Get("Content-Length"), 10, 64)
|
||||
if err != nil {
|
||||
return objInfo, b2ToObjectError(errors.Trace(err), bucket, object)
|
||||
}
|
||||
|
||||
// Converting upload timestamp in milliseconds to a time.Time value for ObjectInfo.ModTime.
|
||||
timeStamp, err := strconv.ParseInt(header.Get("X-Bz-Upload-Timestamp"), 10, 64)
|
||||
if err != nil {
|
||||
return objInfo, b2ToObjectError(errors.Trace(err), bucket, object)
|
||||
}
|
||||
|
||||
// Populate user metadata by looking for all the X-Bz-Info-<name>
|
||||
// HTTP headers, ignore other headers since they have their own
|
||||
// designated meaning, for more details refer B2 API documentation.
|
||||
userMetadata := make(map[string]string)
|
||||
for key := range header {
|
||||
if strings.HasPrefix(key, "X-Bz-Info-") {
|
||||
var name string
|
||||
name, err = url.QueryUnescape(strings.TrimPrefix(key, "X-Bz-Info-"))
|
||||
if err != nil {
|
||||
return objInfo, b2ToObjectError(errors.Trace(err), bucket, object)
|
||||
}
|
||||
var val string
|
||||
val, err = url.QueryUnescape(header.Get(key))
|
||||
if err != nil {
|
||||
return objInfo, b2ToObjectError(errors.Trace(err), bucket, object)
|
||||
}
|
||||
userMetadata[name] = val
|
||||
}
|
||||
}
|
||||
|
||||
return minio.ObjectInfo{
|
||||
Bucket: bucket,
|
||||
Name: object,
|
||||
ContentType: header.Get("Content-Type"),
|
||||
ModTime: time.Unix(0, 0).Add(time.Duration(timeStamp) * time.Millisecond),
|
||||
Size: clen,
|
||||
ETag: header.Get("X-Bz-File-Id"),
|
||||
UserDefined: userMetadata,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// AnonGetObjectInfo - performs a plain http HEAD request on a public resource,
|
||||
// fails if the resource is not public.
|
||||
func (l *b2Objects) AnonGetObjectInfo(bucket string, object string) (objInfo minio.ObjectInfo, err error) {
|
||||
uri := fmt.Sprintf("%s/file/%s/%s", l.b2Client.DownloadURI, bucket, object)
|
||||
req, err := http.NewRequest("HEAD", uri, nil)
|
||||
if err != nil {
|
||||
return objInfo, b2ToObjectError(errors.Trace(err), bucket, object)
|
||||
}
|
||||
resp, err := l.anonClient.Do(req)
|
||||
if err != nil {
|
||||
return objInfo, b2ToObjectError(errors.Trace(err), bucket, object)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return objInfo, b2ToObjectError(errors.Trace(fmt.Errorf(resp.Status)), bucket, object)
|
||||
}
|
||||
return headerToObjectInfo(bucket, object, resp.Header)
|
||||
}
|
@ -24,7 +24,6 @@ import (
|
||||
"io"
|
||||
"io/ioutil"
|
||||
|
||||
"net/http"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
@ -96,9 +95,9 @@ func (g *B2) Name() string {
|
||||
return b2Backend
|
||||
}
|
||||
|
||||
// NewGatewayLayer returns b2 gateway layer, implements GatewayLayer interface to
|
||||
// NewGatewayLayer returns b2 gateway layer, implements ObjectLayer interface to
|
||||
// talk to B2 remote backend.
|
||||
func (g *B2) NewGatewayLayer(creds auth.Credentials) (minio.GatewayLayer, error) {
|
||||
func (g *B2) NewGatewayLayer(creds auth.Credentials) (minio.ObjectLayer, error) {
|
||||
ctx := context.Background()
|
||||
client, err := b2.AuthorizeAccount(ctx, creds.AccessKey, creds.SecretKey, b2.Transport(minio.NewCustomHTTPTransport()))
|
||||
if err != nil {
|
||||
@ -108,10 +107,7 @@ func (g *B2) NewGatewayLayer(creds auth.Credentials) (minio.GatewayLayer, error)
|
||||
return &b2Objects{
|
||||
creds: creds,
|
||||
b2Client: client,
|
||||
anonClient: &http.Client{
|
||||
Transport: minio.NewCustomHTTPTransport(),
|
||||
},
|
||||
ctx: ctx,
|
||||
ctx: ctx,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@ -124,11 +120,10 @@ func (g *B2) Production() bool {
|
||||
// b2Object implements gateway for Minio and BackBlaze B2 compatible object storage servers.
|
||||
type b2Objects struct {
|
||||
minio.GatewayUnsupported
|
||||
mu sync.Mutex
|
||||
creds auth.Credentials
|
||||
b2Client *b2.B2
|
||||
anonClient *http.Client
|
||||
ctx context.Context
|
||||
mu sync.Mutex
|
||||
creds auth.Credentials
|
||||
b2Client *b2.B2
|
||||
ctx context.Context
|
||||
}
|
||||
|
||||
// Convert B2 errors to minio object layer errors.
|
||||
@ -694,11 +689,11 @@ func (l *b2Objects) CompleteMultipartUpload(bucket string, object string, upload
|
||||
return l.GetObjectInfo(bucket, object)
|
||||
}
|
||||
|
||||
// SetBucketPolicies - B2 supports 2 types of bucket policies:
|
||||
// SetBucketPolicy - B2 supports 2 types of bucket policies:
|
||||
// bucketType.AllPublic - bucketTypeReadOnly means that anybody can download the files is the bucket;
|
||||
// bucketType.AllPrivate - bucketTypePrivate means that you need an authorization token to download them.
|
||||
// Default is AllPrivate for all buckets.
|
||||
func (l *b2Objects) SetBucketPolicies(bucket string, policyInfo policy.BucketAccessPolicy) error {
|
||||
func (l *b2Objects) SetBucketPolicy(bucket string, policyInfo policy.BucketAccessPolicy) error {
|
||||
var policies []minio.BucketAccessPolicy
|
||||
|
||||
for prefix, policy := range policy.GetPolicies(policyInfo.Statements, bucket) {
|
||||
@ -726,9 +721,9 @@ func (l *b2Objects) SetBucketPolicies(bucket string, policyInfo policy.BucketAcc
|
||||
return b2ToObjectError(errors.Trace(err))
|
||||
}
|
||||
|
||||
// GetBucketPolicies, returns the current bucketType from B2 backend and convert
|
||||
// GetBucketPolicy, returns the current bucketType from B2 backend and convert
|
||||
// it into S3 compatible bucket policy info.
|
||||
func (l *b2Objects) GetBucketPolicies(bucket string) (policy.BucketAccessPolicy, error) {
|
||||
func (l *b2Objects) GetBucketPolicy(bucket string) (policy.BucketAccessPolicy, error) {
|
||||
policyInfo := policy.BucketAccessPolicy{Version: "2012-10-17"}
|
||||
bkt, err := l.Bucket(bucket)
|
||||
if err != nil {
|
||||
@ -744,8 +739,8 @@ func (l *b2Objects) GetBucketPolicies(bucket string) (policy.BucketAccessPolicy,
|
||||
return policy.BucketAccessPolicy{}, errors.Trace(minio.PolicyNotFound{Bucket: bucket})
|
||||
}
|
||||
|
||||
// DeleteBucketPolicies - resets the bucketType of bucket on B2 to 'allPrivate'.
|
||||
func (l *b2Objects) DeleteBucketPolicies(bucket string) error {
|
||||
// DeleteBucketPolicy - resets the bucketType of bucket on B2 to 'allPrivate'.
|
||||
func (l *b2Objects) DeleteBucketPolicy(bucket string) error {
|
||||
bkt, err := l.Bucket(bucket)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -18,7 +18,6 @@ package b2
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"testing"
|
||||
|
||||
b2 "github.com/minio/blazer/base"
|
||||
@ -27,88 +26,6 @@ import (
|
||||
minio "github.com/minio/minio/cmd"
|
||||
)
|
||||
|
||||
// Tests headerToObjectInfo
|
||||
func TestHeaderToObjectInfo(t *testing.T) {
|
||||
testCases := []struct {
|
||||
bucket, object string
|
||||
header http.Header
|
||||
objInfo minio.ObjectInfo
|
||||
}{
|
||||
{
|
||||
bucket: "bucket",
|
||||
object: "object",
|
||||
header: http.Header{
|
||||
"Content-Length": []string{"10"},
|
||||
"Content-Type": []string{"application/javascript"},
|
||||
"X-Bz-Upload-Timestamp": []string{"1000"},
|
||||
"X-Bz-Info-X-Amz-Meta-1": []string{"test1"},
|
||||
"X-Bz-File-Id": []string{"xxxxx"},
|
||||
},
|
||||
objInfo: minio.ObjectInfo{
|
||||
Bucket: "bucket",
|
||||
Name: "object",
|
||||
ContentType: "application/javascript",
|
||||
Size: 10,
|
||||
UserDefined: map[string]string{
|
||||
"X-Amz-Meta-1": "test1",
|
||||
},
|
||||
ETag: "xxxxx",
|
||||
},
|
||||
},
|
||||
}
|
||||
for i, testCase := range testCases {
|
||||
gotObjInfo, err := headerToObjectInfo(testCase.bucket, testCase.object, testCase.header)
|
||||
if err != nil {
|
||||
t.Fatalf("Test %d: %s", i+1, err)
|
||||
}
|
||||
if gotObjInfo.Bucket != testCase.objInfo.Bucket {
|
||||
t.Errorf("Test %d: Expected %s, got %s", i+1, testCase.objInfo.Bucket, gotObjInfo.Bucket)
|
||||
}
|
||||
if gotObjInfo.Name != testCase.objInfo.Name {
|
||||
t.Errorf("Test %d: Expected %s, got %s", i+1, testCase.objInfo.Name, gotObjInfo.Name)
|
||||
}
|
||||
if gotObjInfo.ContentType != testCase.objInfo.ContentType {
|
||||
t.Errorf("Test %d: Expected %s, got %s", i+1, testCase.objInfo.ContentType, gotObjInfo.ContentType)
|
||||
}
|
||||
if gotObjInfo.ETag != testCase.objInfo.ETag {
|
||||
t.Errorf("Test %d: Expected %s, got %s", i+1, testCase.objInfo.ETag, gotObjInfo.ETag)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Tests mkRange test.
|
||||
func TestMkRange(t *testing.T) {
|
||||
testCases := []struct {
|
||||
offset, size int64
|
||||
expectedRng string
|
||||
}{
|
||||
// No offset set, size not set.
|
||||
{
|
||||
offset: 0,
|
||||
size: 0,
|
||||
expectedRng: "",
|
||||
},
|
||||
// Offset set, size not set.
|
||||
{
|
||||
offset: 10,
|
||||
size: 0,
|
||||
expectedRng: "bytes=10-",
|
||||
},
|
||||
// Offset set, size set.
|
||||
{
|
||||
offset: 10,
|
||||
size: 11,
|
||||
expectedRng: "bytes=10-20",
|
||||
},
|
||||
}
|
||||
for i, testCase := range testCases {
|
||||
gotRng := mkRange(testCase.offset, testCase.size)
|
||||
if gotRng != testCase.expectedRng {
|
||||
t.Errorf("Test %d: expected %s, got %s", i+1, testCase.expectedRng, gotRng)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Test b2 object error.
|
||||
func TestB2ObjectError(t *testing.T) {
|
||||
testCases := []struct {
|
||||
|
@ -1,146 +0,0 @@
|
||||
/*
|
||||
* Minio Cloud Storage, (C) 2017 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package gcs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/minio/minio/pkg/errors"
|
||||
|
||||
minio "github.com/minio/minio/cmd"
|
||||
)
|
||||
|
||||
func toGCSPublicURL(bucket, object string) string {
|
||||
return fmt.Sprintf("https://storage.googleapis.com/%s/%s", bucket, object)
|
||||
}
|
||||
|
||||
// AnonGetObject - Get object anonymously
|
||||
func (l *gcsGateway) AnonGetObject(bucket string, object string, startOffset int64, length int64, writer io.Writer, etag string) error {
|
||||
req, err := http.NewRequest("GET", toGCSPublicURL(bucket, object), nil)
|
||||
if err != nil {
|
||||
return gcsToObjectError(errors.Trace(err), bucket, object)
|
||||
}
|
||||
|
||||
if length > 0 && startOffset > 0 {
|
||||
req.Header.Add("Range", fmt.Sprintf("bytes=%d-%d", startOffset, startOffset+length-1))
|
||||
} else if startOffset > 0 {
|
||||
req.Header.Add("Range", fmt.Sprintf("bytes=%d-", startOffset))
|
||||
}
|
||||
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
return gcsToObjectError(errors.Trace(err), bucket, object)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusPartialContent && resp.StatusCode != http.StatusOK {
|
||||
return gcsToObjectError(errors.Trace(minio.AnonErrToObjectErr(resp.StatusCode, bucket, object)), bucket, object)
|
||||
}
|
||||
|
||||
_, err = io.Copy(writer, resp.Body)
|
||||
return gcsToObjectError(errors.Trace(err), bucket, object)
|
||||
}
|
||||
|
||||
// AnonGetObjectInfo - Get object info anonymously
|
||||
func (l *gcsGateway) AnonGetObjectInfo(bucket string, object string) (objInfo minio.ObjectInfo, err error) {
|
||||
resp, err := http.Head(toGCSPublicURL(bucket, object))
|
||||
if err != nil {
|
||||
return objInfo, gcsToObjectError(errors.Trace(err), bucket, object)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return objInfo, gcsToObjectError(errors.Trace(minio.AnonErrToObjectErr(resp.StatusCode, bucket, object)), bucket, object)
|
||||
}
|
||||
|
||||
var contentLength int64
|
||||
contentLengthStr := resp.Header.Get("Content-Length")
|
||||
if contentLengthStr != "" {
|
||||
contentLength, err = strconv.ParseInt(contentLengthStr, 0, 64)
|
||||
if err != nil {
|
||||
return objInfo, gcsToObjectError(errors.Trace(fmt.Errorf("Unexpected error")), bucket, object)
|
||||
}
|
||||
}
|
||||
|
||||
t, err := time.Parse(time.RFC1123, resp.Header.Get("Last-Modified"))
|
||||
if err != nil {
|
||||
return objInfo, errors.Trace(err)
|
||||
}
|
||||
|
||||
objInfo.ModTime = t
|
||||
objInfo.Bucket = bucket
|
||||
objInfo.UserDefined = make(map[string]string)
|
||||
if resp.Header.Get("Content-Encoding") != "" {
|
||||
objInfo.UserDefined["Content-Encoding"] = resp.Header.Get("Content-Encoding")
|
||||
}
|
||||
objInfo.UserDefined["Content-Type"] = resp.Header.Get("Content-Type")
|
||||
objInfo.ETag = resp.Header.Get("Etag")
|
||||
objInfo.ModTime = t
|
||||
objInfo.Name = object
|
||||
objInfo.Size = contentLength
|
||||
return
|
||||
}
|
||||
|
||||
// AnonListObjects - List objects anonymously
|
||||
func (l *gcsGateway) AnonListObjects(bucket string, prefix string, marker string, delimiter string, maxKeys int) (minio.ListObjectsInfo, error) {
|
||||
result, err := l.anonClient.ListObjects(bucket, prefix, marker, delimiter, maxKeys)
|
||||
if err != nil {
|
||||
return minio.ListObjectsInfo{}, minio.ErrorRespToObjectError(errors.Trace(err), bucket)
|
||||
}
|
||||
|
||||
return minio.FromMinioClientListBucketResult(bucket, result), nil
|
||||
}
|
||||
|
||||
// AnonListObjectsV2 - List objects in V2 mode, anonymously
|
||||
func (l *gcsGateway) AnonListObjectsV2(bucket, prefix, continuationToken, delimiter string, maxKeys int, fetchOwner bool, startAfter string) (minio.ListObjectsV2Info, error) {
|
||||
// Request V1 List Object to the backend
|
||||
result, err := l.anonClient.ListObjects(bucket, prefix, continuationToken, delimiter, maxKeys)
|
||||
if err != nil {
|
||||
return minio.ListObjectsV2Info{}, minio.ErrorRespToObjectError(errors.Trace(err), bucket)
|
||||
}
|
||||
// translate V1 Result to V2Info
|
||||
return minio.FromMinioClientListBucketResultToV2Info(bucket, result), nil
|
||||
}
|
||||
|
||||
// AnonGetBucketInfo - Get bucket metadata anonymously.
|
||||
func (l *gcsGateway) AnonGetBucketInfo(bucket string) (bucketInfo minio.BucketInfo, err error) {
|
||||
resp, err := http.Head(toGCSPublicURL(bucket, ""))
|
||||
if err != nil {
|
||||
return bucketInfo, gcsToObjectError(errors.Trace(err))
|
||||
}
|
||||
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return bucketInfo, gcsToObjectError(errors.Trace(minio.AnonErrToObjectErr(resp.StatusCode, bucket)), bucket)
|
||||
}
|
||||
|
||||
t, err := time.Parse(time.RFC1123, resp.Header.Get("Last-Modified"))
|
||||
if err != nil {
|
||||
return bucketInfo, errors.Trace(err)
|
||||
}
|
||||
|
||||
// Last-Modified date being returned by GCS
|
||||
return minio.BucketInfo{
|
||||
Name: bucket,
|
||||
Created: t,
|
||||
}, nil
|
||||
}
|
@ -42,7 +42,6 @@ import (
|
||||
"google.golang.org/api/iterator"
|
||||
"google.golang.org/api/option"
|
||||
|
||||
miniogo "github.com/minio/minio-go"
|
||||
minio "github.com/minio/minio/cmd"
|
||||
)
|
||||
|
||||
@ -155,13 +154,13 @@ type GCS struct {
|
||||
projectID string
|
||||
}
|
||||
|
||||
// Name returns the name of gcs gatewaylayer.
|
||||
// Name returns the name of gcs ObjectLayer.
|
||||
func (g *GCS) Name() string {
|
||||
return gcsBackend
|
||||
}
|
||||
|
||||
// NewGatewayLayer returns gcs gatewaylayer.
|
||||
func (g *GCS) NewGatewayLayer(creds auth.Credentials) (minio.GatewayLayer, error) {
|
||||
// NewGatewayLayer returns gcs ObjectLayer.
|
||||
func (g *GCS) NewGatewayLayer(creds auth.Credentials) (minio.ObjectLayer, error) {
|
||||
ctx := context.Background()
|
||||
|
||||
var err error
|
||||
@ -182,18 +181,10 @@ func (g *GCS) NewGatewayLayer(creds auth.Credentials) (minio.GatewayLayer, error
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Initialize a anonymous client with minio core APIs.
|
||||
anonClient, err := miniogo.NewCore(googleStorageEndpoint, "", "", true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
anonClient.SetCustomTransport(minio.NewCustomHTTPTransport())
|
||||
|
||||
gcs := &gcsGateway{
|
||||
client: client,
|
||||
projectID: g.projectID,
|
||||
ctx: ctx,
|
||||
anonClient: anonClient,
|
||||
client: client,
|
||||
projectID: g.projectID,
|
||||
ctx: ctx,
|
||||
}
|
||||
|
||||
// Start background process to cleanup old files in minio.sys.tmp
|
||||
@ -349,10 +340,9 @@ func isValidGCSProjectIDFormat(projectID string) bool {
|
||||
// gcsGateway - Implements gateway for Minio and GCS compatible object storage servers.
|
||||
type gcsGateway struct {
|
||||
minio.GatewayUnsupported
|
||||
client *storage.Client
|
||||
anonClient *miniogo.Core
|
||||
projectID string
|
||||
ctx context.Context
|
||||
client *storage.Client
|
||||
projectID string
|
||||
ctx context.Context
|
||||
}
|
||||
|
||||
const googleStorageEndpoint = "storage.googleapis.com"
|
||||
@ -1057,8 +1047,8 @@ func (l *gcsGateway) CompleteMultipartUpload(bucket string, key string, uploadID
|
||||
return fromGCSAttrsToObjectInfo(attrs), nil
|
||||
}
|
||||
|
||||
// SetBucketPolicies - Set policy on bucket
|
||||
func (l *gcsGateway) SetBucketPolicies(bucket string, policyInfo policy.BucketAccessPolicy) error {
|
||||
// SetBucketPolicy - Set policy on bucket
|
||||
func (l *gcsGateway) SetBucketPolicy(bucket string, policyInfo policy.BucketAccessPolicy) error {
|
||||
var policies []minio.BucketAccessPolicy
|
||||
|
||||
for prefix, policy := range policy.GetPolicies(policyInfo.Statements, bucket) {
|
||||
@ -1102,8 +1092,8 @@ func (l *gcsGateway) SetBucketPolicies(bucket string, policyInfo policy.BucketAc
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetBucketPolicies - Get policy on bucket
|
||||
func (l *gcsGateway) GetBucketPolicies(bucket string) (policy.BucketAccessPolicy, error) {
|
||||
// GetBucketPolicy - Get policy on bucket
|
||||
func (l *gcsGateway) GetBucketPolicy(bucket string) (policy.BucketAccessPolicy, error) {
|
||||
rules, err := l.client.Bucket(bucket).ACL().List(l.ctx)
|
||||
if err != nil {
|
||||
return policy.BucketAccessPolicy{}, gcsToObjectError(errors.Trace(err), bucket)
|
||||
@ -1127,8 +1117,8 @@ func (l *gcsGateway) GetBucketPolicies(bucket string) (policy.BucketAccessPolicy
|
||||
return policyInfo, nil
|
||||
}
|
||||
|
||||
// DeleteBucketPolicies - Delete all policies on bucket
|
||||
func (l *gcsGateway) DeleteBucketPolicies(bucket string) error {
|
||||
// DeleteBucketPolicy - Delete all policies on bucket
|
||||
func (l *gcsGateway) DeleteBucketPolicy(bucket string) error {
|
||||
// This only removes the storage.AllUsers policies
|
||||
if err := l.client.Bucket(bucket).ACL().Delete(l.ctx, storage.AllUsers); err != nil {
|
||||
return gcsToObjectError(errors.Trace(err), bucket)
|
||||
|
@ -223,13 +223,6 @@ func TestGCSParseProjectID(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestGCSPublicURL(t *testing.T) {
|
||||
gcsURL := toGCSPublicURL("bucket", "testing")
|
||||
if gcsURL != "https://storage.googleapis.com/bucket/testing" {
|
||||
t.Errorf(`Expected "https://storage.googleapis.com/bucket/testing", got %s"`, gcsURL)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGCSToObjectError(t *testing.T) {
|
||||
testCases := []struct {
|
||||
params []string
|
||||
|
@ -118,9 +118,9 @@ func (g *Manta) Name() string {
|
||||
return mantaBackend
|
||||
}
|
||||
|
||||
// NewGatewayLayer returns manta gateway layer, implements GatewayLayer interface to
|
||||
// NewGatewayLayer returns manta gateway layer, implements ObjectLayer interface to
|
||||
// talk to manta remote backend.
|
||||
func (g *Manta) NewGatewayLayer(creds auth.Credentials) (minio.GatewayLayer, error) {
|
||||
func (g *Manta) NewGatewayLayer(creds auth.Credentials) (minio.ObjectLayer, error) {
|
||||
var err error
|
||||
var signer authentication.Signer
|
||||
var endpoint = defaultMantaURL
|
||||
|
@ -1,54 +0,0 @@
|
||||
/*
|
||||
* Minio Cloud Storage, (C) 2017 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package oss
|
||||
|
||||
import (
|
||||
"io"
|
||||
|
||||
minio "github.com/minio/minio/cmd"
|
||||
"github.com/minio/minio/pkg/hash"
|
||||
)
|
||||
|
||||
// AnonPutObject creates a new object anonymously with the incoming data,
|
||||
func (l *ossObjects) AnonPutObject(bucket, object string, data *hash.Reader, metadata map[string]string) (objInfo minio.ObjectInfo, err error) {
|
||||
return ossPutObject(l.anonClient, bucket, object, data, metadata)
|
||||
}
|
||||
|
||||
// AnonGetObject - Get object anonymously
|
||||
func (l *ossObjects) AnonGetObject(bucket, key string, startOffset, length int64, writer io.Writer, etag string) error {
|
||||
return ossGetObject(l.anonClient, bucket, key, startOffset, length, writer, etag)
|
||||
}
|
||||
|
||||
// AnonGetObjectInfo - Get object info anonymously
|
||||
func (l *ossObjects) AnonGetObjectInfo(bucket, object string) (objInfo minio.ObjectInfo, err error) {
|
||||
return ossGetObjectInfo(l.anonClient, bucket, object)
|
||||
}
|
||||
|
||||
// AnonListObjects lists objects anonymously.
|
||||
func (l *ossObjects) AnonListObjects(bucket, prefix, marker, delimiter string, maxKeys int) (loi minio.ListObjectsInfo, err error) {
|
||||
return ossListObjects(l.anonClient, bucket, prefix, marker, delimiter, maxKeys)
|
||||
}
|
||||
|
||||
// AnonListObjectsV2 lists objects in V2 mode, anonymously.
|
||||
func (l *ossObjects) AnonListObjectsV2(bucket, prefix, continuationToken, delimiter string, maxKeys int, fetchOwner bool, startAfter string) (loi minio.ListObjectsV2Info, err error) {
|
||||
return ossListObjectsV2(l.anonClient, bucket, prefix, continuationToken, delimiter, maxKeys, fetchOwner, startAfter)
|
||||
}
|
||||
|
||||
// AnonGetBucketInfo gets bucket metadata anonymously.
|
||||
func (l *ossObjects) AnonGetBucketInfo(bucket string) (bi minio.BucketInfo, err error) {
|
||||
return ossGeBucketInfo(l.anonClient, bucket)
|
||||
}
|
@ -109,8 +109,8 @@ func (g *OSS) Name() string {
|
||||
return ossBackend
|
||||
}
|
||||
|
||||
// NewGatewayLayer implements Gateway interface and returns OSS GatewayLayer.
|
||||
func (g *OSS) NewGatewayLayer(creds auth.Credentials) (minio.GatewayLayer, error) {
|
||||
// NewGatewayLayer implements Gateway interface and returns OSS ObjectLayer.
|
||||
func (g *OSS) NewGatewayLayer(creds auth.Credentials) (minio.ObjectLayer, error) {
|
||||
var err error
|
||||
|
||||
// Regions and endpoints
|
||||
@ -125,14 +125,8 @@ func (g *OSS) NewGatewayLayer(creds auth.Credentials) (minio.GatewayLayer, error
|
||||
return nil, err
|
||||
}
|
||||
|
||||
anonClient, err := oss.New(g.host, "", "")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &ossObjects{
|
||||
Client: client,
|
||||
anonClient: anonClient,
|
||||
Client: client,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@ -324,8 +318,7 @@ func ossToObjectError(err error, params ...string) error {
|
||||
// ossObjects implements gateway for Aliyun Object Storage Service.
|
||||
type ossObjects struct {
|
||||
minio.GatewayUnsupported
|
||||
Client *oss.Client
|
||||
anonClient *oss.Client
|
||||
Client *oss.Client
|
||||
}
|
||||
|
||||
// Shutdown saves any gateway metadata to disk
|
||||
@ -920,12 +913,12 @@ func (l *ossObjects) CompleteMultipartUpload(bucket, object, uploadID string, up
|
||||
return l.GetObjectInfo(bucket, object)
|
||||
}
|
||||
|
||||
// SetBucketPolicies sets policy on bucket.
|
||||
// SetBucketPolicy sets policy on bucket.
|
||||
// OSS supports three types of bucket policies:
|
||||
// oss.ACLPublicReadWrite: readwrite in minio terminology
|
||||
// oss.ACLPublicRead: readonly in minio terminology
|
||||
// oss.ACLPrivate: none in minio terminology
|
||||
func (l *ossObjects) SetBucketPolicies(bucket string, policyInfo policy.BucketAccessPolicy) error {
|
||||
func (l *ossObjects) SetBucketPolicy(bucket string, policyInfo policy.BucketAccessPolicy) error {
|
||||
bucketPolicies := policy.GetPolicies(policyInfo.Statements, bucket)
|
||||
if len(bucketPolicies) != 1 {
|
||||
return errors.Trace(minio.NotImplemented{})
|
||||
@ -958,8 +951,8 @@ func (l *ossObjects) SetBucketPolicies(bucket string, policyInfo policy.BucketAc
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetBucketPolicies will get policy on bucket.
|
||||
func (l *ossObjects) GetBucketPolicies(bucket string) (policy.BucketAccessPolicy, error) {
|
||||
// GetBucketPolicy will get policy on bucket.
|
||||
func (l *ossObjects) GetBucketPolicy(bucket string) (policy.BucketAccessPolicy, error) {
|
||||
result, err := l.Client.GetBucketACL(bucket)
|
||||
if err != nil {
|
||||
return policy.BucketAccessPolicy{}, ossToObjectError(errors.Trace(err))
|
||||
@ -981,8 +974,8 @@ func (l *ossObjects) GetBucketPolicies(bucket string) (policy.BucketAccessPolicy
|
||||
return policyInfo, nil
|
||||
}
|
||||
|
||||
// DeleteBucketPolicies deletes all policies on bucket.
|
||||
func (l *ossObjects) DeleteBucketPolicies(bucket string) error {
|
||||
// DeleteBucketPolicy deletes all policies on bucket.
|
||||
func (l *ossObjects) DeleteBucketPolicy(bucket string) error {
|
||||
err := l.Client.SetBucketACL(bucket, oss.ACLPrivate)
|
||||
if err != nil {
|
||||
return ossToObjectError(errors.Trace(err), bucket)
|
||||
|
@ -1,114 +0,0 @@
|
||||
/*
|
||||
* Minio Cloud Storage, (C) 2017 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package s3
|
||||
|
||||
import (
|
||||
"io"
|
||||
|
||||
miniogo "github.com/minio/minio-go"
|
||||
"github.com/minio/minio/pkg/errors"
|
||||
"github.com/minio/minio/pkg/hash"
|
||||
|
||||
minio "github.com/minio/minio/cmd"
|
||||
)
|
||||
|
||||
// AnonPutObject creates a new object anonymously with the incoming data,
|
||||
func (l *s3Objects) AnonPutObject(bucket string, object string, data *hash.Reader, metadata map[string]string) (objInfo minio.ObjectInfo, e error) {
|
||||
oi, err := l.anonClient.PutObject(bucket, object, data, data.Size(), data.MD5Base64String(), data.SHA256HexString(), minio.ToMinioClientMetadata(metadata))
|
||||
if err != nil {
|
||||
return objInfo, minio.ErrorRespToObjectError(errors.Trace(err), bucket, object)
|
||||
}
|
||||
|
||||
return minio.FromMinioClientObjectInfo(bucket, oi), nil
|
||||
}
|
||||
|
||||
// AnonGetObject - Get object anonymously
|
||||
func (l *s3Objects) AnonGetObject(bucket string, key string, startOffset int64, length int64, writer io.Writer, etag string) error {
|
||||
opts := miniogo.GetObjectOptions{}
|
||||
if err := opts.SetRange(startOffset, startOffset+length-1); err != nil {
|
||||
return minio.ErrorRespToObjectError(errors.Trace(err), bucket, key)
|
||||
}
|
||||
object, _, err := l.anonClient.GetObject(bucket, key, opts)
|
||||
if err != nil {
|
||||
return minio.ErrorRespToObjectError(errors.Trace(err), bucket, key)
|
||||
}
|
||||
|
||||
defer object.Close()
|
||||
|
||||
if _, err := io.CopyN(writer, object, length); err != nil {
|
||||
return minio.ErrorRespToObjectError(errors.Trace(err), bucket, key)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// AnonGetObjectInfo - Get object info anonymously
|
||||
func (l *s3Objects) AnonGetObjectInfo(bucket string, object string) (objInfo minio.ObjectInfo, e error) {
|
||||
oi, err := l.anonClient.StatObject(bucket, object, miniogo.StatObjectOptions{})
|
||||
if err != nil {
|
||||
return objInfo, minio.ErrorRespToObjectError(errors.Trace(err), bucket, object)
|
||||
}
|
||||
|
||||
return minio.FromMinioClientObjectInfo(bucket, oi), nil
|
||||
}
|
||||
|
||||
// AnonListObjects - List objects anonymously
|
||||
func (l *s3Objects) AnonListObjects(bucket string, prefix string, marker string, delimiter string, maxKeys int) (loi minio.ListObjectsInfo, e error) {
|
||||
result, err := l.anonClient.ListObjects(bucket, prefix, marker, delimiter, maxKeys)
|
||||
if err != nil {
|
||||
return loi, minio.ErrorRespToObjectError(errors.Trace(err), bucket)
|
||||
}
|
||||
|
||||
return minio.FromMinioClientListBucketResult(bucket, result), nil
|
||||
}
|
||||
|
||||
// AnonListObjectsV2 - List objects in V2 mode, anonymously
|
||||
func (l *s3Objects) AnonListObjectsV2(bucket, prefix, continuationToken, delimiter string, maxKeys int, fetchOwner bool, startAfter string) (loi minio.ListObjectsV2Info, e error) {
|
||||
result, err := l.anonClient.ListObjectsV2(bucket, prefix, continuationToken, fetchOwner, delimiter, maxKeys)
|
||||
if err != nil {
|
||||
return loi, minio.ErrorRespToObjectError(errors.Trace(err), bucket)
|
||||
}
|
||||
|
||||
return minio.FromMinioClientListBucketV2Result(bucket, result), nil
|
||||
}
|
||||
|
||||
// AnonGetBucketInfo - Get bucket metadata anonymously.
|
||||
func (l *s3Objects) AnonGetBucketInfo(bucket string) (bi minio.BucketInfo, e error) {
|
||||
if exists, err := l.anonClient.BucketExists(bucket); err != nil {
|
||||
return bi, minio.ErrorRespToObjectError(errors.Trace(err), bucket)
|
||||
} else if !exists {
|
||||
return bi, errors.Trace(minio.BucketNotFound{Bucket: bucket})
|
||||
}
|
||||
|
||||
buckets, err := l.anonClient.ListBuckets()
|
||||
if err != nil {
|
||||
return bi, minio.ErrorRespToObjectError(errors.Trace(err), bucket)
|
||||
}
|
||||
|
||||
for _, bi := range buckets {
|
||||
if bi.Name != bucket {
|
||||
continue
|
||||
}
|
||||
|
||||
return minio.BucketInfo{
|
||||
Name: bi.Name,
|
||||
Created: bi.CreationDate,
|
||||
}, nil
|
||||
}
|
||||
|
||||
return bi, errors.Trace(minio.BucketNotFound{Bucket: bucket})
|
||||
}
|
@ -99,8 +99,8 @@ func (g *S3) Name() string {
|
||||
return s3Backend
|
||||
}
|
||||
|
||||
// NewGatewayLayer returns s3 gatewaylayer.
|
||||
func (g *S3) NewGatewayLayer(creds auth.Credentials) (minio.GatewayLayer, error) {
|
||||
// NewGatewayLayer returns s3 ObjectLayer.
|
||||
func (g *S3) NewGatewayLayer(creds auth.Credentials) (minio.ObjectLayer, error) {
|
||||
var err error
|
||||
var endpoint string
|
||||
var secure = true
|
||||
@ -125,15 +125,8 @@ func (g *S3) NewGatewayLayer(creds auth.Credentials) (minio.GatewayLayer, error)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
anonClient, err := miniogo.NewCore(endpoint, "", "", secure)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
anonClient.SetCustomTransport(minio.NewCustomHTTPTransport())
|
||||
|
||||
return &s3Objects{
|
||||
Client: client,
|
||||
anonClient: anonClient,
|
||||
Client: client,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@ -145,8 +138,7 @@ func (g *S3) Production() bool {
|
||||
// s3Objects implements gateway for Minio and S3 compatible object storage servers.
|
||||
type s3Objects struct {
|
||||
minio.GatewayUnsupported
|
||||
Client *miniogo.Core
|
||||
anonClient *miniogo.Core
|
||||
Client *miniogo.Core
|
||||
}
|
||||
|
||||
// Shutdown saves any gateway metadata to disk
|
||||
@ -392,8 +384,8 @@ func (l *s3Objects) CompleteMultipartUpload(bucket string, object string, upload
|
||||
return l.GetObjectInfo(bucket, object)
|
||||
}
|
||||
|
||||
// SetBucketPolicies sets policy on bucket
|
||||
func (l *s3Objects) SetBucketPolicies(bucket string, policyInfo policy.BucketAccessPolicy) error {
|
||||
// SetBucketPolicy sets policy on bucket
|
||||
func (l *s3Objects) SetBucketPolicy(bucket string, policyInfo policy.BucketAccessPolicy) error {
|
||||
if err := l.Client.PutBucketPolicy(bucket, policyInfo); err != nil {
|
||||
return minio.ErrorRespToObjectError(errors.Trace(err), bucket, "")
|
||||
}
|
||||
@ -401,8 +393,8 @@ func (l *s3Objects) SetBucketPolicies(bucket string, policyInfo policy.BucketAcc
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetBucketPolicies will get policy on bucket
|
||||
func (l *s3Objects) GetBucketPolicies(bucket string) (policy.BucketAccessPolicy, error) {
|
||||
// GetBucketPolicy will get policy on bucket
|
||||
func (l *s3Objects) GetBucketPolicy(bucket string) (policy.BucketAccessPolicy, error) {
|
||||
policyInfo, err := l.Client.GetBucketPolicy(bucket)
|
||||
if err != nil {
|
||||
return policy.BucketAccessPolicy{}, minio.ErrorRespToObjectError(errors.Trace(err), bucket, "")
|
||||
@ -410,8 +402,8 @@ func (l *s3Objects) GetBucketPolicies(bucket string) (policy.BucketAccessPolicy,
|
||||
return policyInfo, nil
|
||||
}
|
||||
|
||||
// DeleteBucketPolicies deletes all policies on bucket
|
||||
func (l *s3Objects) DeleteBucketPolicies(bucket string) error {
|
||||
// DeleteBucketPolicy deletes all policies on bucket
|
||||
func (l *s3Objects) DeleteBucketPolicy(bucket string) error {
|
||||
if err := l.Client.PutBucketPolicy(bucket, policy.BucketAccessPolicy{}); err != nil {
|
||||
return minio.ErrorRespToObjectError(errors.Trace(err), bucket, "")
|
||||
}
|
||||
|
@ -113,9 +113,9 @@ func (g *Sia) Name() string {
|
||||
return siaBackend
|
||||
}
|
||||
|
||||
// NewGatewayLayer returns Sia gateway layer, implements GatewayLayer interface to
|
||||
// NewGatewayLayer returns Sia gateway layer, implements ObjectLayer interface to
|
||||
// talk to Sia backend.
|
||||
func (g *Sia) NewGatewayLayer(creds auth.Credentials) (minio.GatewayLayer, error) {
|
||||
func (g *Sia) NewGatewayLayer(creds auth.Credentials) (minio.ObjectLayer, error) {
|
||||
sia := &siaObjects{
|
||||
Address: g.host,
|
||||
// RootDir uses access key directly, provides partitioning for
|
||||
|
@ -398,7 +398,7 @@ func isErrIncompleteBody(err error) bool {
|
||||
func isErrBucketPolicyNotFound(err error) bool {
|
||||
err = errors.Cause(err)
|
||||
switch err.(type) {
|
||||
case BucketPolicyNotFound:
|
||||
case PolicyNotFound:
|
||||
return true
|
||||
}
|
||||
return false
|
||||
|
@ -107,31 +107,31 @@ func checkListMultipartArgs(bucket, prefix, keyMarker, uploadIDMarker, delimiter
|
||||
|
||||
// Checks for NewMultipartUpload arguments validity, also validates if bucket exists.
|
||||
func checkNewMultipartArgs(bucket, object string, obj ObjectLayer) error {
|
||||
return checkPutObjectArgs(bucket, object, obj)
|
||||
return checkObjectArgs(bucket, object, obj)
|
||||
}
|
||||
|
||||
// Checks for PutObjectPart arguments validity, also validates if bucket exists.
|
||||
func checkPutObjectPartArgs(bucket, object string, obj ObjectLayer) error {
|
||||
return checkPutObjectArgs(bucket, object, obj)
|
||||
return checkObjectArgs(bucket, object, obj)
|
||||
}
|
||||
|
||||
// Checks for ListParts arguments validity, also validates if bucket exists.
|
||||
func checkListPartsArgs(bucket, object string, obj ObjectLayer) error {
|
||||
return checkPutObjectArgs(bucket, object, obj)
|
||||
return checkObjectArgs(bucket, object, obj)
|
||||
}
|
||||
|
||||
// Checks for CompleteMultipartUpload arguments validity, also validates if bucket exists.
|
||||
func checkCompleteMultipartArgs(bucket, object string, obj ObjectLayer) error {
|
||||
return checkPutObjectArgs(bucket, object, obj)
|
||||
return checkObjectArgs(bucket, object, obj)
|
||||
}
|
||||
|
||||
// Checks for AbortMultipartUpload arguments validity, also validates if bucket exists.
|
||||
func checkAbortMultipartArgs(bucket, object string, obj ObjectLayer) error {
|
||||
return checkPutObjectArgs(bucket, object, obj)
|
||||
return checkObjectArgs(bucket, object, obj)
|
||||
}
|
||||
|
||||
// Checks for PutObject arguments validity, also validates if bucket exists.
|
||||
func checkPutObjectArgs(bucket, object string, obj ObjectLayer) error {
|
||||
// Checks Object arguments validity, also validates if bucket exists.
|
||||
func checkObjectArgs(bucket, object string, obj ObjectLayer) error {
|
||||
// Verify if bucket exists before validating object name.
|
||||
// This is done on purpose since the order of errors is
|
||||
// important here bucket does not exist error should
|
||||
@ -150,6 +150,29 @@ func checkPutObjectArgs(bucket, object string, obj ObjectLayer) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Checks for PutObject arguments validity, also validates if bucket exists.
|
||||
func checkPutObjectArgs(bucket, object string, obj ObjectLayer, size int64) error {
|
||||
// Verify if bucket exists before validating object name.
|
||||
// This is done on purpose since the order of errors is
|
||||
// important here bucket does not exist error should
|
||||
// happen before we return an error for invalid object name.
|
||||
// FIXME: should be moved to handler layer.
|
||||
if err := checkBucketExist(bucket, obj); err != nil {
|
||||
return errors.Trace(err)
|
||||
}
|
||||
|
||||
if len(object) == 0 ||
|
||||
hasPrefix(object, slashSeparator) ||
|
||||
(hasSuffix(object, slashSeparator) && size != 0) ||
|
||||
!IsValidObjectPrefix(object) {
|
||||
return errors.Trace(ObjectNameInvalid{
|
||||
Bucket: bucket,
|
||||
Object: object,
|
||||
})
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Checks whether bucket exists and returns appropriate error if not.
|
||||
func checkBucketExist(bucket string, obj ObjectLayer) error {
|
||||
_, err := obj.GetBucketInfo(bucket)
|
||||
|
@ -20,6 +20,7 @@ import (
|
||||
"io"
|
||||
"time"
|
||||
|
||||
"github.com/minio/minio-go/pkg/policy"
|
||||
"github.com/minio/minio/pkg/hash"
|
||||
"github.com/minio/minio/pkg/madmin"
|
||||
)
|
||||
@ -36,6 +37,7 @@ type ObjectLayer interface {
|
||||
ListBuckets() (buckets []BucketInfo, err error)
|
||||
DeleteBucket(bucket string) error
|
||||
ListObjects(bucket, prefix, marker, delimiter string, maxKeys int) (result ListObjectsInfo, err error)
|
||||
ListObjectsV2(bucket, prefix, continuationToken, delimiter string, maxKeys int, fetchOwner bool, startAfter string) (result ListObjectsV2Info, err error)
|
||||
|
||||
// Object operations.
|
||||
GetObject(bucket, object string, startOffset int64, length int64, writer io.Writer, etag string) (err error)
|
||||
@ -62,4 +64,14 @@ type ObjectLayer interface {
|
||||
// Locking operations
|
||||
ListLocks(bucket, prefix string, duration time.Duration) ([]VolumeLockInfo, error)
|
||||
ClearLocks([]VolumeLockInfo) error
|
||||
|
||||
// Policy operations
|
||||
SetBucketPolicy(string, policy.BucketAccessPolicy) error
|
||||
GetBucketPolicy(string) (policy.BucketAccessPolicy, error)
|
||||
RefreshBucketPolicy(string) error
|
||||
DeleteBucketPolicy(string) error
|
||||
|
||||
// Supported operations check
|
||||
IsNotificationSupported() bool
|
||||
IsEncryptionSupported() bool
|
||||
}
|
||||
|
@ -116,9 +116,12 @@ func (api objectAPIHandlers) GetObjectHandler(w http.ResponseWriter, r *http.Req
|
||||
writeErrorResponse(w, apiErr, r.URL)
|
||||
return
|
||||
}
|
||||
if apiErr, _ := DecryptObjectInfo(&objInfo, r.Header); apiErr != ErrNone {
|
||||
writeErrorResponse(w, apiErr, r.URL)
|
||||
return
|
||||
|
||||
if objectAPI.IsEncryptionSupported() {
|
||||
if apiErr, _ := DecryptObjectInfo(&objInfo, r.Header); apiErr != ErrNone {
|
||||
writeErrorResponse(w, apiErr, r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Get request range.
|
||||
@ -153,26 +156,28 @@ func (api objectAPIHandlers) GetObjectHandler(w http.ResponseWriter, r *http.Req
|
||||
|
||||
var writer io.Writer
|
||||
writer = w
|
||||
if IsSSECustomerRequest(r.Header) {
|
||||
writer, err = DecryptRequest(writer, r, objInfo.UserDefined)
|
||||
if err != nil {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
return
|
||||
}
|
||||
w.Header().Set(SSECustomerAlgorithm, r.Header.Get(SSECustomerAlgorithm))
|
||||
w.Header().Set(SSECustomerKeyMD5, r.Header.Get(SSECustomerKeyMD5))
|
||||
if objectAPI.IsEncryptionSupported() {
|
||||
if IsSSECustomerRequest(r.Header) {
|
||||
writer, err = DecryptRequest(writer, r, objInfo.UserDefined)
|
||||
if err != nil {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
return
|
||||
}
|
||||
w.Header().Set(SSECustomerAlgorithm, r.Header.Get(SSECustomerAlgorithm))
|
||||
w.Header().Set(SSECustomerKeyMD5, r.Header.Get(SSECustomerKeyMD5))
|
||||
|
||||
if startOffset != 0 || length < objInfo.Size {
|
||||
writeErrorResponse(w, ErrNotImplemented, r.URL) // SSE-C requests with HTTP range are not supported yet
|
||||
return
|
||||
if startOffset != 0 || length < objInfo.Size {
|
||||
writeErrorResponse(w, ErrNotImplemented, r.URL) // SSE-C requests with HTTP range are not supported yet
|
||||
return
|
||||
}
|
||||
length = objInfo.EncryptedSize()
|
||||
}
|
||||
length = objInfo.EncryptedSize()
|
||||
}
|
||||
|
||||
setObjectHeaders(w, objInfo, hrange)
|
||||
setHeadGetRespHeaders(w, r.URL.Query())
|
||||
|
||||
httpWriter := ioutil.WriteOnClose(writer)
|
||||
|
||||
// Reads the object at startOffset and writes to mw.
|
||||
if err = objectAPI.GetObject(bucket, object, startOffset, length, httpWriter, objInfo.ETag); err != nil {
|
||||
errorIf(err, "Unable to write to client.")
|
||||
@ -235,13 +240,15 @@ func (api objectAPIHandlers) HeadObjectHandler(w http.ResponseWriter, r *http.Re
|
||||
writeErrorResponseHeadersOnly(w, apiErr)
|
||||
return
|
||||
}
|
||||
if apiErr, encrypted := DecryptObjectInfo(&objInfo, r.Header); apiErr != ErrNone {
|
||||
writeErrorResponse(w, apiErr, r.URL)
|
||||
return
|
||||
} else if encrypted {
|
||||
if _, err = DecryptRequest(w, r, objInfo.UserDefined); err != nil {
|
||||
writeErrorResponse(w, ErrSSEEncryptedObject, r.URL)
|
||||
if objectAPI.IsEncryptionSupported() {
|
||||
if apiErr, encrypted := DecryptObjectInfo(&objInfo, r.Header); apiErr != ErrNone {
|
||||
writeErrorResponse(w, apiErr, r.URL)
|
||||
return
|
||||
} else if encrypted {
|
||||
if _, err = DecryptRequest(w, r, objInfo.UserDefined); err != nil {
|
||||
writeErrorResponse(w, ErrSSEEncryptedObject, r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -548,18 +555,19 @@ func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Req
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if IsSSECustomerRequest(r.Header) { // handle SSE-C requests
|
||||
reader, err = EncryptRequest(hashReader, r, metadata)
|
||||
if err != nil {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
return
|
||||
}
|
||||
info := ObjectInfo{Size: size}
|
||||
hashReader, err = hash.NewReader(reader, info.EncryptedSize(), "", "") // do not try to verify encrypted content
|
||||
if err != nil {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
return
|
||||
if objectAPI.IsEncryptionSupported() {
|
||||
if IsSSECustomerRequest(r.Header) { // handle SSE-C requests
|
||||
reader, err = EncryptRequest(hashReader, r, metadata)
|
||||
if err != nil {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
return
|
||||
}
|
||||
info := ObjectInfo{Size: size}
|
||||
hashReader, err = hash.NewReader(reader, info.EncryptedSize(), "", "") // do not try to verify encrypted content
|
||||
if err != nil {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -569,10 +577,13 @@ func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Req
|
||||
return
|
||||
}
|
||||
w.Header().Set("ETag", "\""+objInfo.ETag+"\"")
|
||||
if IsSSECustomerRequest(r.Header) {
|
||||
w.Header().Set(SSECustomerAlgorithm, r.Header.Get(SSECustomerAlgorithm))
|
||||
w.Header().Set(SSECustomerKeyMD5, r.Header.Get(SSECustomerKeyMD5))
|
||||
if objectAPI.IsEncryptionSupported() {
|
||||
if IsSSECustomerRequest(r.Header) {
|
||||
w.Header().Set(SSECustomerAlgorithm, r.Header.Get(SSECustomerAlgorithm))
|
||||
w.Header().Set(SSECustomerKeyMD5, r.Header.Get(SSECustomerKeyMD5))
|
||||
}
|
||||
}
|
||||
|
||||
writeSuccessResponseHeadersOnly(w)
|
||||
|
||||
// Get host and port from Request.RemoteAddr.
|
||||
|
@ -171,7 +171,7 @@ func testAPIHeadObjectHandler(obj ObjectLayer, instanceType, bucketName string,
|
||||
// ExecObjectLayerAPIAnonTest - Calls the HTTP API handler using the anonymous request, validates the ErrAccessDeniedResponse,
|
||||
// sets the bucket policy using the policy statement generated from `getWriteOnlyObjectStatement` so that the
|
||||
// unsigned request goes through and its validated again.
|
||||
ExecObjectLayerAPIAnonTest(t, "TestAPIHeadObjectHandler", bucketName, objectName, instanceType, apiRouter, anonReq, getReadOnlyObjectStatement)
|
||||
ExecObjectLayerAPIAnonTest(t, obj, "TestAPIHeadObjectHandler", bucketName, objectName, instanceType, apiRouter, anonReq, getReadOnlyObjectStatement)
|
||||
|
||||
// HTTP request for testing when `objectLayer` is set to `nil`.
|
||||
// There is no need to use an existing bucket and valid input for creating the request
|
||||
@ -444,7 +444,7 @@ func testAPIGetObjectHandler(obj ObjectLayer, instanceType, bucketName string, a
|
||||
// ExecObjectLayerAPIAnonTest - Calls the HTTP API handler using the anonymous request, validates the ErrAccessDeniedResponse,
|
||||
// sets the bucket policy using the policy statement generated from `getWriteOnlyObjectStatement` so that the
|
||||
// unsigned request goes through and its validated again.
|
||||
ExecObjectLayerAPIAnonTest(t, "TestAPIGetObjectHandler", bucketName, objectName, instanceType, apiRouter, anonReq, getReadOnlyObjectStatement)
|
||||
ExecObjectLayerAPIAnonTest(t, obj, "TestAPIGetObjectHandler", bucketName, objectName, instanceType, apiRouter, anonReq, getReadOnlyObjectStatement)
|
||||
|
||||
// HTTP request for testing when `objectLayer` is set to `nil`.
|
||||
// There is no need to use an existing bucket and valid input for creating the request
|
||||
@ -1008,7 +1008,7 @@ func testAPIPutObjectHandler(obj ObjectLayer, instanceType, bucketName string, a
|
||||
// ExecObjectLayerAPIAnonTest - Calls the HTTP API handler using the anonymous request, validates the ErrAccessDeniedResponse,
|
||||
// sets the bucket policy using the policy statement generated from `getWriteOnlyObjectStatement` so that the
|
||||
// unsigned request goes through and its validated again.
|
||||
ExecObjectLayerAPIAnonTest(t, "TestAPIPutObjectHandler", bucketName, objectName, instanceType, apiRouter, anonReq, getWriteOnlyObjectStatement)
|
||||
ExecObjectLayerAPIAnonTest(t, obj, "TestAPIPutObjectHandler", bucketName, objectName, instanceType, apiRouter, anonReq, getWriteOnlyObjectStatement)
|
||||
|
||||
// HTTP request to test the case of `objectLayer` being set to `nil`.
|
||||
// There is no need to use an existing bucket or valid input for creating the request,
|
||||
@ -1866,7 +1866,7 @@ func testAPICopyObjectHandler(obj ObjectLayer, instanceType, bucketName string,
|
||||
// ExecObjectLayerAPIAnonTest - Calls the HTTP API handler using the anonymous request, validates the ErrAccessDeniedResponse,
|
||||
// sets the bucket policy using the policy statement generated from `getWriteOnlyObjectStatement` so that the
|
||||
// unsigned request goes through and its validated again.
|
||||
ExecObjectLayerAPIAnonTest(t, "TestAPICopyObjectHandler", bucketName, newCopyAnonObject, instanceType, apiRouter, anonReq, getWriteOnlyObjectStatement)
|
||||
ExecObjectLayerAPIAnonTest(t, obj, "TestAPICopyObjectHandler", bucketName, newCopyAnonObject, instanceType, apiRouter, anonReq, getWriteOnlyObjectStatement)
|
||||
|
||||
// HTTP request to test the case of `objectLayer` being set to `nil`.
|
||||
// There is no need to use an existing bucket or valid input for creating the request,
|
||||
@ -2017,7 +2017,7 @@ func testAPINewMultipartHandler(obj ObjectLayer, instanceType, bucketName string
|
||||
// ExecObjectLayerAPIAnonTest - Calls the HTTP API handler using the anonymous request, validates the ErrAccessDeniedResponse,
|
||||
// sets the bucket policy using the policy statement generated from `getWriteOnlyObjectStatement` so that the
|
||||
// unsigned request goes through and its validated again.
|
||||
ExecObjectLayerAPIAnonTest(t, "TestAPINewMultipartHandler", bucketName, objectName, instanceType, apiRouter, anonReq, getWriteOnlyObjectStatement)
|
||||
ExecObjectLayerAPIAnonTest(t, obj, "TestAPINewMultipartHandler", bucketName, objectName, instanceType, apiRouter, anonReq, getWriteOnlyObjectStatement)
|
||||
|
||||
// HTTP request to test the case of `objectLayer` being set to `nil`.
|
||||
// There is no need to use an existing bucket or valid input for creating the request,
|
||||
@ -2433,7 +2433,7 @@ func testAPICompleteMultipartHandler(obj ObjectLayer, instanceType, bucketName s
|
||||
// ExecObjectLayerAPIAnonTest - Calls the HTTP API handler using the anonymous request, validates the ErrAccessDeniedResponse,
|
||||
// sets the bucket policy using the policy statement generated from `getWriteOnlyObjectStatement` so that the
|
||||
// unsigned request goes through and its validated again.
|
||||
ExecObjectLayerAPIAnonTest(t, "TestAPICompleteMultipartHandler", bucketName, objectName, instanceType,
|
||||
ExecObjectLayerAPIAnonTest(t, obj, "TestAPICompleteMultipartHandler", bucketName, objectName, instanceType,
|
||||
apiRouter, anonReq, getWriteOnlyObjectStatement)
|
||||
|
||||
// HTTP request to test the case of `objectLayer` being set to `nil`.
|
||||
@ -2602,7 +2602,7 @@ func testAPIAbortMultipartHandler(obj ObjectLayer, instanceType, bucketName stri
|
||||
// ExecObjectLayerAPIAnonTest - Calls the HTTP API handler using the anonymous request, validates the ErrAccessDeniedResponse,
|
||||
// sets the bucket policy using the policy statement generated from `getWriteOnlyObjectStatement` so that the
|
||||
// unsigned request goes through and its validated again.
|
||||
ExecObjectLayerAPIAnonTest(t, "TestAPIAbortMultipartHandler", bucketName, objectName, instanceType,
|
||||
ExecObjectLayerAPIAnonTest(t, obj, "TestAPIAbortMultipartHandler", bucketName, objectName, instanceType,
|
||||
apiRouter, anonReq, getWriteOnlyObjectStatement)
|
||||
|
||||
// HTTP request to test the case of `objectLayer` being set to `nil`.
|
||||
@ -2770,7 +2770,7 @@ func testAPIDeleteObjectHandler(obj ObjectLayer, instanceType, bucketName string
|
||||
// ExecObjectLayerAPIAnonTest - Calls the HTTP API handler using the anonymous request, validates the ErrAccessDeniedResponse,
|
||||
// sets the bucket policy using the policy statement generated from `getWriteOnlyObjectStatement` so that the
|
||||
// unsigned request goes through and its validated again.
|
||||
ExecObjectLayerAPIAnonTest(t, "TestAPIDeleteObjectHandler", bucketName, anonObjectName, instanceType, apiRouter, anonReq, getWriteOnlyObjectStatement)
|
||||
ExecObjectLayerAPIAnonTest(t, obj, "TestAPIDeleteObjectHandler", bucketName, anonObjectName, instanceType, apiRouter, anonReq, getWriteOnlyObjectStatement)
|
||||
|
||||
// HTTP request to test the case of `objectLayer` being set to `nil`.
|
||||
// There is no need to use an existing bucket or valid input for creating the request,
|
||||
@ -3241,7 +3241,7 @@ func testAPIPutObjectPartHandler(obj ObjectLayer, instanceType, bucketName strin
|
||||
// ExecObjectLayerAPIAnonTest - Calls the HTTP API handler using the anonymous request, validates the ErrAccessDeniedResponse,
|
||||
// sets the bucket policy using the policy statement generated from `getWriteOnlyObjectStatement` so that the
|
||||
// unsigned request goes through and its validated again.
|
||||
ExecObjectLayerAPIAnonTest(t, "TestAPIPutObjectPartHandler", bucketName, testObject, instanceType, apiRouter, anonReq, getWriteOnlyObjectStatement)
|
||||
ExecObjectLayerAPIAnonTest(t, obj, "TestAPIPutObjectPartHandler", bucketName, testObject, instanceType, apiRouter, anonReq, getWriteOnlyObjectStatement)
|
||||
|
||||
// HTTP request for testing when `ObjectLayer` is set to `nil`.
|
||||
// There is no need to use an existing bucket and valid input for creating the request
|
||||
@ -3544,7 +3544,7 @@ func testAPIListObjectPartsHandler(obj ObjectLayer, instanceType, bucketName str
|
||||
// ExecObjectLayerAPIAnonTest - Calls the HTTP API handler using the anonymous request, validates the ErrAccessDeniedResponse,
|
||||
// sets the bucket policy using the policy statement generated from `getWriteOnlyObjectStatement` so that the
|
||||
// unsigned request goes through and its validated again.
|
||||
ExecObjectLayerAPIAnonTest(t, "TestAPIListObjectPartsHandler", bucketName, testObject, instanceType, apiRouter, anonReq, getWriteOnlyObjectStatement)
|
||||
ExecObjectLayerAPIAnonTest(t, obj, "TestAPIListObjectPartsHandler", bucketName, testObject, instanceType, apiRouter, anonReq, getWriteOnlyObjectStatement)
|
||||
|
||||
// HTTP request for testing when `objectLayer` is set to `nil`.
|
||||
// There is no need to use an existing bucket and valid input for creating the request
|
||||
|
@ -17,7 +17,6 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"path"
|
||||
"sync"
|
||||
@ -170,13 +169,8 @@ func S3PeersUpdateBucketListener(bucket string, lcfg []listenerConfig) {
|
||||
|
||||
// S3PeersUpdateBucketPolicy - Sends update bucket policy request to
|
||||
// all peers. Currently we log an error and continue.
|
||||
func S3PeersUpdateBucketPolicy(bucket string, pCh policyChange) {
|
||||
byts, err := json.Marshal(pCh)
|
||||
if err != nil {
|
||||
errorIf(err, "Failed to marshal policyChange - this is a BUG!")
|
||||
return
|
||||
}
|
||||
setBPPArgs := &SetBucketPolicyPeerArgs{Bucket: bucket, PChBytes: byts}
|
||||
func S3PeersUpdateBucketPolicy(bucket string) {
|
||||
setBPPArgs := &SetBucketPolicyPeerArgs{Bucket: bucket}
|
||||
errs := globalS3Peers.SendUpdate(nil, setBPPArgs)
|
||||
for idx, err := range errs {
|
||||
errorIf(
|
||||
|
@ -96,9 +96,6 @@ type SetBucketPolicyPeerArgs struct {
|
||||
AuthRPCArgs
|
||||
|
||||
Bucket string
|
||||
|
||||
// Policy change (serialized to JSON)
|
||||
PChBytes []byte
|
||||
}
|
||||
|
||||
// BucketUpdate - implements bucket policy updates,
|
||||
|
@ -17,7 +17,6 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"os"
|
||||
"path"
|
||||
"testing"
|
||||
@ -94,13 +93,7 @@ func (s *TestRPCS3PeerSuite) testS3PeerRPC(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Check bucket policy update call works.
|
||||
pCh := policyChange{IsRemove: true}
|
||||
pChBytes, err := json.Marshal(pCh)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
BPPArgs := SetBucketPolicyPeerArgs{Bucket: "bucket", PChBytes: pChBytes}
|
||||
BPPArgs := SetBucketPolicyPeerArgs{Bucket: "bucket"}
|
||||
err = client.Call("S3.SetBucketPolicyPeer", &BPPArgs, &AuthRPCReply{})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
|
@ -1777,7 +1777,7 @@ func prepareTestBackend(instanceType string) (ObjectLayer, []string, error) {
|
||||
// STEP 1: Call the handler with the unsigned HTTP request (anonReq), assert for the `ErrAccessDenied` error response.
|
||||
// STEP 2: Set the policy to allow the unsigned request, use the policyFunc to obtain the relevant statement and call
|
||||
// the handler again to verify its success.
|
||||
func ExecObjectLayerAPIAnonTest(t *testing.T, testName, bucketName, objectName, instanceType string, apiRouter http.Handler,
|
||||
func ExecObjectLayerAPIAnonTest(t *testing.T, obj ObjectLayer, testName, bucketName, objectName, instanceType string, apiRouter http.Handler,
|
||||
anonReq *http.Request, policyFunc func(string, string) policy.Statement) {
|
||||
|
||||
anonTestStr := "Anonymous HTTP request test"
|
||||
@ -1835,8 +1835,7 @@ func ExecObjectLayerAPIAnonTest(t *testing.T, testName, bucketName, objectName,
|
||||
Version: "1.0",
|
||||
Statements: []policy.Statement{policyFunc(bucketName, "")},
|
||||
}
|
||||
|
||||
globalBucketPolicies.SetBucketPolicy(bucketName, policyChange{false, bp})
|
||||
obj.SetBucketPolicy(bucketName, bp)
|
||||
// now call the handler again with the unsigned/anonymous request, it should be accepted.
|
||||
rec = httptest.NewRecorder()
|
||||
|
||||
@ -1888,6 +1887,7 @@ func ExecObjectLayerAPIAnonTest(t *testing.T, testName, bucketName, objectName,
|
||||
if rec.Code != accesDeniedHTTPStatus {
|
||||
t.Fatal(failTestStr(unknownSignTestStr, fmt.Sprintf("Object API Unknow auth test for \"%s\", expected to fail with %d, but failed with %d", testName, accesDeniedHTTPStatus, rec.Code)))
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// ExecObjectLayerAPINilTest - Sets the object layer to `nil`, and calls rhe registered object layer API endpoint,
|
||||
|
@ -21,7 +21,6 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
"path"
|
||||
@ -237,8 +236,8 @@ func (web *webAPIHandlers) ListObjects(r *http.Request, args *ListObjectsArgs, r
|
||||
return toJSONError(errServerNotInitialized)
|
||||
}
|
||||
prefix := args.Prefix + "test" // To test if GetObject/PutObject with the specified prefix is allowed.
|
||||
readable := isBucketActionAllowed("s3:GetObject", args.BucketName, prefix)
|
||||
writable := isBucketActionAllowed("s3:PutObject", args.BucketName, prefix)
|
||||
readable := isBucketActionAllowed("s3:GetObject", args.BucketName, prefix, objectAPI)
|
||||
writable := isBucketActionAllowed("s3:PutObject", args.BucketName, prefix, objectAPI)
|
||||
authErr := webRequestAuthenticate(r)
|
||||
switch {
|
||||
case authErr == errAuthentication:
|
||||
@ -537,7 +536,7 @@ func (web *webAPIHandlers) Upload(w http.ResponseWriter, r *http.Request) {
|
||||
writeWebErrorResponse(w, errAuthentication)
|
||||
return
|
||||
}
|
||||
if authErr != nil && !isBucketActionAllowed("s3:PutObject", bucket, object) {
|
||||
if authErr != nil && !isBucketActionAllowed("s3:PutObject", bucket, object, objectAPI) {
|
||||
writeWebErrorResponse(w, errAuthentication)
|
||||
return
|
||||
}
|
||||
@ -590,7 +589,7 @@ func (web *webAPIHandlers) Download(w http.ResponseWriter, r *http.Request) {
|
||||
object := vars["object"]
|
||||
token := r.URL.Query().Get("token")
|
||||
|
||||
if !isAuthTokenValid(token) && !isBucketActionAllowed("s3:GetObject", bucket, object) {
|
||||
if !isAuthTokenValid(token) && !isBucketActionAllowed("s3:GetObject", bucket, object, objectAPI) {
|
||||
writeWebErrorResponse(w, errAuthentication)
|
||||
return
|
||||
}
|
||||
@ -634,7 +633,7 @@ func (web *webAPIHandlers) DownloadZip(w http.ResponseWriter, r *http.Request) {
|
||||
token := r.URL.Query().Get("token")
|
||||
if !isAuthTokenValid(token) {
|
||||
for _, object := range args.Objects {
|
||||
if !isBucketActionAllowed("s3:GetObject", args.BucketName, pathJoin(args.Prefix, object)) {
|
||||
if !isBucketActionAllowed("s3:GetObject", args.BucketName, pathJoin(args.Prefix, object), objectAPI) {
|
||||
writeWebErrorResponse(w, errAuthentication)
|
||||
return
|
||||
}
|
||||
@ -708,38 +707,6 @@ type GetBucketPolicyRep struct {
|
||||
Policy policy.BucketPolicy `json:"policy"`
|
||||
}
|
||||
|
||||
func readBucketAccessPolicy(objAPI ObjectLayer, bucketName string) (policy.BucketAccessPolicy, error) {
|
||||
bucketPolicyReader, err := readBucketPolicyJSON(bucketName, objAPI)
|
||||
if err != nil {
|
||||
if _, ok := err.(BucketPolicyNotFound); ok {
|
||||
return policy.BucketAccessPolicy{Version: "2012-10-17"}, nil
|
||||
}
|
||||
return policy.BucketAccessPolicy{}, err
|
||||
}
|
||||
|
||||
bucketPolicyBuf, err := ioutil.ReadAll(bucketPolicyReader)
|
||||
if err != nil {
|
||||
return policy.BucketAccessPolicy{}, err
|
||||
}
|
||||
|
||||
policyInfo := policy.BucketAccessPolicy{}
|
||||
err = json.Unmarshal(bucketPolicyBuf, &policyInfo)
|
||||
if err != nil {
|
||||
return policy.BucketAccessPolicy{}, err
|
||||
}
|
||||
|
||||
return policyInfo, nil
|
||||
|
||||
}
|
||||
|
||||
func getBucketAccessPolicy(objAPI ObjectLayer, bucketName string) (policy.BucketAccessPolicy, error) {
|
||||
// FIXME: remove this code when S3 layer for gateway and server is unified.
|
||||
if layer, ok := objAPI.(GatewayLayer); ok {
|
||||
return layer.GetBucketPolicies(bucketName)
|
||||
}
|
||||
return readBucketAccessPolicy(objAPI, bucketName)
|
||||
}
|
||||
|
||||
// GetBucketPolicy - get bucket policy for the requested prefix.
|
||||
func (web *webAPIHandlers) GetBucketPolicy(r *http.Request, args *GetBucketPolicyArgs, reply *GetBucketPolicyRep) error {
|
||||
objectAPI := web.ObjectAPI()
|
||||
@ -751,9 +718,9 @@ func (web *webAPIHandlers) GetBucketPolicy(r *http.Request, args *GetBucketPolic
|
||||
return toJSONError(errAuthentication)
|
||||
}
|
||||
|
||||
var policyInfo, err = getBucketAccessPolicy(objectAPI, args.BucketName)
|
||||
var policyInfo, err = objectAPI.GetBucketPolicy(args.BucketName)
|
||||
if err != nil {
|
||||
_, ok := errors.Cause(err).(PolicyNotFound)
|
||||
_, ok := errors.Cause(err).(BucketPolicyNotFound)
|
||||
if !ok {
|
||||
return toJSONError(err, args.BucketName)
|
||||
}
|
||||
@ -792,8 +759,7 @@ func (web *webAPIHandlers) ListAllBucketPolicies(r *http.Request, args *ListAllB
|
||||
if !isHTTPRequestValid(r) {
|
||||
return toJSONError(errAuthentication)
|
||||
}
|
||||
|
||||
var policyInfo, err = getBucketAccessPolicy(objectAPI, args.BucketName)
|
||||
var policyInfo, err = objectAPI.GetBucketPolicy(args.BucketName)
|
||||
if err != nil {
|
||||
_, ok := errors.Cause(err).(PolicyNotFound)
|
||||
if !ok {
|
||||
@ -837,7 +803,7 @@ func (web *webAPIHandlers) SetBucketPolicy(r *http.Request, args *SetBucketPolic
|
||||
}
|
||||
}
|
||||
|
||||
var policyInfo, err = getBucketAccessPolicy(objectAPI, args.BucketName)
|
||||
var policyInfo, err = objectAPI.GetBucketPolicy(args.BucketName)
|
||||
if err != nil {
|
||||
if _, ok := errors.Cause(err).(PolicyNotFound); !ok {
|
||||
return toJSONError(err, args.BucketName)
|
||||
@ -846,47 +812,19 @@ func (web *webAPIHandlers) SetBucketPolicy(r *http.Request, args *SetBucketPolic
|
||||
}
|
||||
|
||||
policyInfo.Statements = policy.SetPolicy(policyInfo.Statements, bucketP, args.BucketName, args.Prefix)
|
||||
switch g := objectAPI.(type) {
|
||||
case GatewayLayer:
|
||||
if len(policyInfo.Statements) == 0 {
|
||||
err = g.DeleteBucketPolicies(args.BucketName)
|
||||
if err != nil {
|
||||
return toJSONError(err, args.BucketName)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
err = g.SetBucketPolicies(args.BucketName, policyInfo)
|
||||
if err != nil {
|
||||
return toJSONError(err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
if len(policyInfo.Statements) == 0 {
|
||||
if err = persistAndNotifyBucketPolicyChange(args.BucketName, policyChange{
|
||||
true, policy.BucketAccessPolicy{},
|
||||
}, objectAPI); err != nil {
|
||||
if err = objectAPI.DeleteBucketPolicy(args.BucketName); err != nil {
|
||||
return toJSONError(err, args.BucketName)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
data, err := json.Marshal(policyInfo)
|
||||
if err != nil {
|
||||
return toJSONError(err)
|
||||
}
|
||||
|
||||
// Parse validate and save bucket policy.
|
||||
if s3Error := parseAndPersistBucketPolicy(args.BucketName, data, objectAPI); s3Error != ErrNone {
|
||||
apiErr := getAPIError(s3Error)
|
||||
var err error
|
||||
if apiErr.Code == "XMinioPolicyNesting" {
|
||||
err = PolicyNesting{}
|
||||
} else {
|
||||
err = fmt.Errorf(apiErr.Description)
|
||||
}
|
||||
if err := objectAPI.SetBucketPolicy(args.BucketName, policyInfo); err != nil {
|
||||
return toJSONError(err, args.BucketName)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -543,7 +543,7 @@ func testListObjectsWebHandler(obj ObjectLayer, instanceType string, t TestErrHa
|
||||
Statements: []policy.Statement{getReadOnlyObjectStatement(bucketName, "")},
|
||||
}
|
||||
|
||||
globalBucketPolicies.SetBucketPolicy(bucketName, policyChange{false, policy})
|
||||
obj.SetBucketPolicy(bucketName, policy)
|
||||
|
||||
// Unauthenticated ListObjects with READ bucket policy should succeed.
|
||||
err, reply = test("")
|
||||
@ -917,7 +917,7 @@ func testUploadWebHandler(obj ObjectLayer, instanceType string, t TestErrHandler
|
||||
Statements: []policy.Statement{getWriteOnlyObjectStatement(bucketName, "")},
|
||||
}
|
||||
|
||||
globalBucketPolicies.SetBucketPolicy(bucketName, policyChange{false, bp})
|
||||
obj.SetBucketPolicy(bucketName, bp)
|
||||
|
||||
// Unauthenticated upload with WRITE policy should succeed.
|
||||
code = test("", true)
|
||||
@ -1024,7 +1024,7 @@ func testDownloadWebHandler(obj ObjectLayer, instanceType string, t TestErrHandl
|
||||
Statements: []policy.Statement{getReadOnlyObjectStatement(bucketName, "")},
|
||||
}
|
||||
|
||||
globalBucketPolicies.SetBucketPolicy(bucketName, policyChange{false, bp})
|
||||
obj.SetBucketPolicy(bucketName, bp)
|
||||
|
||||
// Unauthenticated download with READ policy should succeed.
|
||||
code, bodyContent = test("")
|
||||
|
@ -17,9 +17,11 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"sort"
|
||||
"sync"
|
||||
|
||||
"github.com/minio/minio-go/pkg/policy"
|
||||
"github.com/minio/minio/pkg/errors"
|
||||
)
|
||||
|
||||
@ -154,6 +156,11 @@ func (xl xlObjects) getBucketInfo(bucketName string) (bucketInfo BucketInfo, err
|
||||
|
||||
// GetBucketInfo - returns BucketInfo for a bucket.
|
||||
func (xl xlObjects) GetBucketInfo(bucket string) (bi BucketInfo, e error) {
|
||||
bucketLock := xl.nsMutex.NewNSLock(bucket, "")
|
||||
if e := bucketLock.GetRLock(globalObjectTimeout); e != nil {
|
||||
return bi, e
|
||||
}
|
||||
defer bucketLock.RUnlock()
|
||||
// Verify if bucket is valid.
|
||||
if !IsValidBucketName(bucket) {
|
||||
return bi, BucketNameInvalid{Bucket: bucket}
|
||||
@ -219,6 +226,13 @@ func (xl xlObjects) ListBuckets() ([]BucketInfo, error) {
|
||||
|
||||
// DeleteBucket - deletes a bucket.
|
||||
func (xl xlObjects) DeleteBucket(bucket string) error {
|
||||
|
||||
bucketLock := xl.nsMutex.NewNSLock(bucket, "")
|
||||
if err := bucketLock.GetLock(globalObjectTimeout); err != nil {
|
||||
return err
|
||||
}
|
||||
defer bucketLock.Unlock()
|
||||
|
||||
// Verify if bucket is valid.
|
||||
if !IsValidBucketName(bucket) {
|
||||
return BucketNameInvalid{Bucket: bucket}
|
||||
@ -240,12 +254,14 @@ func (xl xlObjects) DeleteBucket(bucket string) error {
|
||||
defer wg.Done()
|
||||
// Attempt to delete bucket.
|
||||
err := disk.DeleteVol(bucket)
|
||||
|
||||
if err != nil {
|
||||
dErrs[index] = errors.Trace(err)
|
||||
return
|
||||
}
|
||||
// Cleanup all the previously incomplete multiparts.
|
||||
err = cleanupDir(disk, minioMetaMultipartBucket, bucket)
|
||||
|
||||
if err != nil {
|
||||
if errors.Cause(err) == errVolumeNotFound {
|
||||
return
|
||||
@ -257,11 +273,73 @@ func (xl xlObjects) DeleteBucket(bucket string) error {
|
||||
|
||||
// Wait for all the delete vols to finish.
|
||||
wg.Wait()
|
||||
|
||||
writeQuorum := len(xl.storageDisks)/2 + 1
|
||||
err := reduceWriteQuorumErrs(dErrs, bucketOpIgnoredErrs, writeQuorum)
|
||||
if errors.Cause(err) == errXLWriteQuorum {
|
||||
xl.undoDeleteBucket(bucket)
|
||||
}
|
||||
return toObjectErr(err, bucket)
|
||||
if err != nil {
|
||||
return toObjectErr(err, bucket)
|
||||
}
|
||||
// Delete bucket access policy, if present - ignore any errors.
|
||||
_ = removeBucketPolicy(bucket, xl)
|
||||
|
||||
// Notify all peers (including self) to update in-memory state
|
||||
S3PeersUpdateBucketPolicy(bucket)
|
||||
|
||||
// Delete notification config, if present - ignore any errors.
|
||||
_ = removeNotificationConfig(bucket, xl)
|
||||
|
||||
// Notify all peers (including self) to update in-memory state
|
||||
S3PeersUpdateBucketNotification(bucket, nil)
|
||||
// Delete listener config, if present - ignore any errors.
|
||||
_ = removeListenerConfig(bucket, xl)
|
||||
|
||||
// Notify all peers (including self) to update in-memory state
|
||||
S3PeersUpdateBucketListener(bucket, []listenerConfig{})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetBucketPolicy sets policy on bucket
|
||||
func (xl xlObjects) SetBucketPolicy(bucket string, policy policy.BucketAccessPolicy) error {
|
||||
return persistAndNotifyBucketPolicyChange(bucket, false, policy, xl)
|
||||
}
|
||||
|
||||
// GetBucketPolicy will get policy on bucket
|
||||
func (xl xlObjects) GetBucketPolicy(bucket string) (policy.BucketAccessPolicy, error) {
|
||||
// fetch bucket policy from cache.
|
||||
bpolicy := xl.bucketPolicies.GetBucketPolicy(bucket)
|
||||
if reflect.DeepEqual(bpolicy, emptyBucketPolicy) {
|
||||
return readBucketPolicy(bucket, xl)
|
||||
}
|
||||
return bpolicy, nil
|
||||
}
|
||||
|
||||
// DeleteBucketPolicy deletes all policies on bucket
|
||||
func (xl xlObjects) DeleteBucketPolicy(bucket string) error {
|
||||
return persistAndNotifyBucketPolicyChange(bucket, true, emptyBucketPolicy, xl)
|
||||
}
|
||||
|
||||
// RefreshBucketPolicy refreshes policy cache from disk
|
||||
func (xl xlObjects) RefreshBucketPolicy(bucket string) error {
|
||||
policy, err := readBucketPolicy(bucket, xl)
|
||||
|
||||
if err != nil {
|
||||
if reflect.DeepEqual(policy, emptyBucketPolicy) {
|
||||
return xl.bucketPolicies.DeleteBucketPolicy(bucket)
|
||||
}
|
||||
return err
|
||||
}
|
||||
return xl.bucketPolicies.SetBucketPolicy(bucket, policy)
|
||||
}
|
||||
|
||||
// IsNotificationSupported returns whether bucket notification is applicable for this layer.
|
||||
func (xl xlObjects) IsNotificationSupported() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// IsEncryptionSupported returns whether server side encryption is applicable for this layer.
|
||||
func (xl xlObjects) IsEncryptionSupported() bool {
|
||||
return true
|
||||
}
|
||||
|
@ -87,6 +87,7 @@ func (xl xlObjects) listObjects(bucket, prefix, marker, delimiter string, maxKey
|
||||
var eof bool
|
||||
var nextMarker string
|
||||
for i := 0; i < maxKeys; {
|
||||
|
||||
walkResult, ok := <-walkResultCh
|
||||
if !ok {
|
||||
// Closed channel.
|
||||
@ -155,6 +156,14 @@ func (xl xlObjects) ListObjects(bucket, prefix, marker, delimiter string, maxKey
|
||||
return loi, nil
|
||||
}
|
||||
|
||||
// Marker is set validate pre-condition.
|
||||
if marker != "" {
|
||||
// Marker not common with prefix is not implemented.Send an empty response
|
||||
if !hasPrefix(marker, prefix) {
|
||||
return ListObjectsInfo{}, e
|
||||
}
|
||||
}
|
||||
|
||||
// For delimiter and prefix as '/' we do not list anything at all
|
||||
// since according to s3 spec we stop at the 'delimiter' along
|
||||
// with the prefix. On a flat namespace with 'prefix' as '/'
|
||||
|
@ -542,6 +542,10 @@ func renameObject(disks []StorageAPI, srcBucket, srcObject, dstBucket, dstObject
|
||||
// writes `xl.json` which carries the necessary metadata for future
|
||||
// object operations.
|
||||
func (xl xlObjects) PutObject(bucket string, object string, data *hash.Reader, metadata map[string]string) (objInfo ObjectInfo, err error) {
|
||||
// Validate put object input args.
|
||||
if err = checkPutObjectArgs(bucket, object, xl, data.Size()); err != nil {
|
||||
return ObjectInfo{}, err
|
||||
}
|
||||
// Lock the object.
|
||||
objectLock := xl.nsMutex.NewNSLock(bucket, object)
|
||||
if err := objectLock.GetLock(globalObjectTimeout); err != nil {
|
||||
@ -597,7 +601,7 @@ func (xl xlObjects) putObject(bucket string, object string, data *hash.Reader, m
|
||||
}
|
||||
|
||||
// Validate put object input args.
|
||||
if err = checkPutObjectArgs(bucket, object, xl); err != nil {
|
||||
if err = checkPutObjectArgs(bucket, object, xl, data.Size()); err != nil {
|
||||
return ObjectInfo{}, err
|
||||
}
|
||||
|
||||
@ -845,3 +849,20 @@ func (xl xlObjects) DeleteObject(bucket, object string) (err error) {
|
||||
// Success.
|
||||
return nil
|
||||
}
|
||||
|
||||
// ListObjectsV2 lists all blobs in bucket filtered by prefix
|
||||
func (xl xlObjects) ListObjectsV2(bucket, prefix, continuationToken, delimiter string, maxKeys int, fetchOwner bool, startAfter string) (result ListObjectsV2Info, err error) {
|
||||
loi, err := xl.ListObjects(bucket, prefix, continuationToken, delimiter, maxKeys)
|
||||
if err != nil {
|
||||
return result, err
|
||||
}
|
||||
|
||||
listObjectsV2Info := ListObjectsV2Info{
|
||||
IsTruncated: loi.IsTruncated,
|
||||
ContinuationToken: continuationToken,
|
||||
NextContinuationToken: loi.NextMarker,
|
||||
Objects: loi.Objects,
|
||||
Prefixes: loi.Prefixes,
|
||||
}
|
||||
return listObjectsV2Info, err
|
||||
}
|
||||
|
@ -51,6 +51,8 @@ type xlObjects struct {
|
||||
|
||||
// name space mutex for object layer
|
||||
nsMutex *nsLockMap
|
||||
// Variable represents bucket policies in memory.
|
||||
bucketPolicies *bucketPolicies
|
||||
}
|
||||
|
||||
// list of all errors that can be ignored in tree walk operation in XL
|
||||
|
Loading…
Reference in New Issue
Block a user