From 0d521260237ca69a89044f23f10a13b44e1f53c9 Mon Sep 17 00:00:00 2001 From: Bala FA Date: Wed, 25 Apr 2018 04:23:30 +0530 Subject: [PATCH] Enhance policy handling to support SSE and WORM (#5790) - remove old bucket policy handling - add new policy handling - add new policy handling unit tests This patch brings support to bucket policy to have more control not limiting to anonymous. Bucket owner controls to allow/deny any rest API. For example server side encryption can be controlled by allowing PUT/GET objects with encryptions including bucket owner. --- cmd/admin-handlers_test.go | 3 + cmd/api-errors.go | 2 - cmd/auth-handler.go | 94 +- cmd/bucket-handlers-listobjects.go | 5 +- cmd/bucket-handlers.go | 130 +- cmd/bucket-handlers_test.go | 36 +- cmd/bucket-notification-handlers.go | 28 +- cmd/bucket-policy-handlers.go | 306 +---- cmd/bucket-policy-handlers_test.go | 467 ++----- cmd/bucket-policy-parser.go | 295 ----- cmd/bucket-policy-parser_test.go | 867 ------------- cmd/bucket-policy.go | 206 ---- cmd/fs-v1.go | 46 +- cmd/gateway-common.go | 2 +- cmd/gateway-main.go | 7 +- cmd/gateway-unsupported.go | 8 +- cmd/gateway/azure/gateway-azure.go | 60 +- cmd/gateway/b2/gateway-b2.go | 53 +- cmd/gateway/gcs/gateway-gcs.go | 75 +- cmd/gateway/nas/gateway-nas.go | 6 +- cmd/gateway/oss/gateway-oss.go | 69 +- cmd/gateway/s3/gateway-s3.go | 26 +- cmd/gateway/s3/gateway-s3_test.go | 2 +- cmd/globals.go | 1 + cmd/notification.go | 43 +- cmd/object-api-common.go | 2 +- cmd/object-api-errors.go | 16 - cmd/object-api-interface.go | 7 +- cmd/object-handlers.go | 94 +- cmd/object-handlers_test.go | 20 +- cmd/peer-rpc.go | 49 +- cmd/policy.go | 218 ++++ cmd/policy_test.go | 424 +++++++ cmd/server-main.go | 7 +- cmd/server_test.go | 9 +- cmd/test-utils_test.go | 35 +- cmd/web-handlers.go | 207 ++-- cmd/web-handlers_test.go | 190 +-- cmd/xl-sets.go | 47 +- cmd/xl-v1-bucket.go | 31 +- cmd/xl-v1.go | 3 - pkg/policy/action.go | 267 ++++ pkg/policy/action_test.go | 116 ++ pkg/policy/actionset.go | 114 ++ pkg/policy/actionset_test.go | 158 +++ pkg/policy/condition/func.go | 168 +++ pkg/policy/condition/func_test.go | 298 +++++ pkg/policy/condition/ipaddressfunc.go | 180 +++ pkg/policy/condition/ipaddressfunc_test.go | 278 +++++ pkg/policy/condition/key.go | 186 +++ pkg/policy/condition/key_test.go | 214 ++++ pkg/policy/condition/name.go | 79 ++ pkg/policy/condition/name_test.go | 106 ++ pkg/policy/condition/nullfunc.go | 101 ++ pkg/policy/condition/nullfunc_test.go | 161 +++ pkg/policy/condition/stringequalsfunc.go | 182 +++ pkg/policy/condition/stringequalsfunc_test.go | 718 +++++++++++ pkg/policy/condition/stringlikefunc.go | 165 +++ pkg/policy/condition/stringlikefunc_test.go | 810 ++++++++++++ pkg/policy/condition/value.go | 157 +++ pkg/policy/condition/value_test.go | 260 ++++ pkg/policy/condition/valueset.go | 85 ++ pkg/policy/condition/valueset_test.go | 118 ++ pkg/policy/effect.go | 78 ++ pkg/policy/effect_test.go | 122 ++ pkg/policy/id.go | 64 + pkg/policy/id_test.go | 99 ++ pkg/policy/policy.go | 176 +++ pkg/policy/policy_test.go | 1089 +++++++++++++++++ pkg/policy/principal.go | 92 ++ pkg/policy/principal_test.go | 141 +++ pkg/policy/resource.go | 131 ++ pkg/policy/resource_test.go | 221 ++++ pkg/policy/resourceset.go | 147 +++ pkg/policy/resourceset_test.go | 240 ++++ pkg/policy/statement.go | 156 +++ pkg/policy/statement_test.go | 571 +++++++++ 77 files changed, 9811 insertions(+), 2633 deletions(-) delete mode 100644 cmd/bucket-policy-parser.go delete mode 100644 cmd/bucket-policy-parser_test.go delete mode 100644 cmd/bucket-policy.go create mode 100644 cmd/policy.go create mode 100644 cmd/policy_test.go create mode 100644 pkg/policy/action.go create mode 100644 pkg/policy/action_test.go create mode 100644 pkg/policy/actionset.go create mode 100644 pkg/policy/actionset_test.go create mode 100644 pkg/policy/condition/func.go create mode 100644 pkg/policy/condition/func_test.go create mode 100644 pkg/policy/condition/ipaddressfunc.go create mode 100644 pkg/policy/condition/ipaddressfunc_test.go create mode 100644 pkg/policy/condition/key.go create mode 100644 pkg/policy/condition/key_test.go create mode 100644 pkg/policy/condition/name.go create mode 100644 pkg/policy/condition/name_test.go create mode 100644 pkg/policy/condition/nullfunc.go create mode 100644 pkg/policy/condition/nullfunc_test.go create mode 100644 pkg/policy/condition/stringequalsfunc.go create mode 100644 pkg/policy/condition/stringequalsfunc_test.go create mode 100644 pkg/policy/condition/stringlikefunc.go create mode 100644 pkg/policy/condition/stringlikefunc_test.go create mode 100644 pkg/policy/condition/value.go create mode 100644 pkg/policy/condition/value_test.go create mode 100644 pkg/policy/condition/valueset.go create mode 100644 pkg/policy/condition/valueset_test.go create mode 100644 pkg/policy/effect.go create mode 100644 pkg/policy/effect_test.go create mode 100644 pkg/policy/id.go create mode 100644 pkg/policy/id_test.go create mode 100644 pkg/policy/policy.go create mode 100644 pkg/policy/policy_test.go create mode 100644 pkg/policy/principal.go create mode 100644 pkg/policy/principal_test.go create mode 100644 pkg/policy/resource.go create mode 100644 pkg/policy/resource_test.go create mode 100644 pkg/policy/resourceset.go create mode 100644 pkg/policy/resourceset_test.go create mode 100644 pkg/policy/statement.go create mode 100644 pkg/policy/statement_test.go diff --git a/cmd/admin-handlers_test.go b/cmd/admin-handlers_test.go index 57e811fe9..5d78fc54b 100644 --- a/cmd/admin-handlers_test.go +++ b/cmd/admin-handlers_test.go @@ -181,6 +181,9 @@ func prepareAdminXLTestBed() (*adminXLTestBed, error) { return nil, err } + // Create new policy system. + globalPolicySys = NewPolicySys() + // Setup admin mgmt REST API handlers. adminRouter := mux.NewRouter() registerAdminRouter(adminRouter) diff --git a/cmd/api-errors.go b/cmd/api-errors.go index 8f140f80b..750bd044d 100644 --- a/cmd/api-errors.go +++ b/cmd/api-errors.go @@ -953,8 +953,6 @@ func toAPIErrorCode(err error) (apiErr APIErrorCode) { apiErr = ErrEntityTooSmall case NotImplemented: apiErr = ErrNotImplemented - case PolicyNotFound: - apiErr = ErrNoSuchBucketPolicy case PartTooBig: apiErr = ErrEntityTooLarge case UnsupportedMetadata: diff --git a/cmd/auth-handler.go b/cmd/auth-handler.go index 324f04252..62aca703d 100644 --- a/cmd/auth-handler.go +++ b/cmd/auth-handler.go @@ -1,5 +1,5 @@ /* - * Minio Cloud Storage, (C) 2015, 2016 Minio, Inc. + * Minio Cloud Storage, (C) 2015-2018 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -27,7 +27,7 @@ import ( "strings" "github.com/minio/minio/cmd/logger" - "github.com/minio/minio/pkg/handlers" + "github.com/minio/minio/pkg/policy" ) // Verify if request has JWT. @@ -123,28 +123,84 @@ func checkAdminRequestAuthType(r *http.Request, region string) APIErrorCode { return s3Err } -func checkRequestAuthType(ctx context.Context, r *http.Request, bucket, policyAction, region string) APIErrorCode { - reqAuthType := getRequestAuthType(r) +func checkRequestAuthType(ctx context.Context, r *http.Request, action policy.Action, bucketName, objectName string) APIErrorCode { + isOwner := true + accountName := globalServerConfig.GetCredential().AccessKey - switch reqAuthType { + switch getRequestAuthType(r) { + case authTypeUnknown: + return ErrAccessDenied case authTypePresignedV2, authTypeSignedV2: - // Signature V2 validation. - return isReqAuthenticatedV2(r) - case authTypeSigned, authTypePresigned: - return isReqAuthenticated(r, region) - } - - if reqAuthType == authTypeAnonymous && policyAction != "" { - // http://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html - resource, err := getResource(r.URL.Path, r.Host, globalDomainName) - if err != nil { - return ErrInternalError + if errorCode := isReqAuthenticatedV2(r); errorCode != ErrNone { + return errorCode } - return enforceBucketPolicy(ctx, bucket, policyAction, resource, - r.Referer(), handlers.GetSourceIP(r), r.URL.Query()) + case authTypeSigned, authTypePresigned: + region := globalServerConfig.GetRegion() + switch action { + case policy.GetBucketLocationAction, policy.ListAllMyBucketsAction: + region = "" + } + + if errorCode := isReqAuthenticated(r, region); errorCode != ErrNone { + return errorCode + } + default: + isOwner = false + accountName = "" + } + + // LocationConstraint is valid only for CreateBucketAction. + var locationConstraint string + if action == policy.CreateBucketAction { + // To extract region from XML in request body, get copy of request body. + payload, err := ioutil.ReadAll(r.Body) + if err != nil { + logger.LogIf(ctx, err) + return ErrAccessDenied + } + + // Populate payload to extract location constraint. + r.Body = ioutil.NopCloser(bytes.NewReader(payload)) + + var s3Error APIErrorCode + locationConstraint, s3Error = parseLocationConstraint(r) + if s3Error != ErrNone { + return ErrAccessDenied + } + + // Populate payload again to handle it in HTTP handler. + r.Body = ioutil.NopCloser(bytes.NewReader(payload)) + } + + if globalPolicySys.IsAllowed(policy.Args{ + AccountName: accountName, + Action: action, + BucketName: bucketName, + ConditionValues: getConditionValues(r, locationConstraint), + IsOwner: isOwner, + ObjectName: objectName, + }) { + return ErrNone + } + + // As policy.ListBucketAction and policy.ListObjectsAction are same but different names, + // policy.ListBucketAction is used across the code but user may used policy.ListObjectsAction + // in bucket policy to denote the same. In below try again with policy.ListObjectsAction. + if action != policy.ListBucketAction { + return ErrAccessDenied + } + + if globalPolicySys.IsAllowed(policy.Args{ + AccountName: accountName, + Action: policy.ListObjectsAction, + BucketName: bucketName, + ConditionValues: getConditionValues(r, locationConstraint), + IsOwner: isOwner, + ObjectName: objectName, + }) { + return ErrNone } - // By default return ErrAccessDenied return ErrAccessDenied } diff --git a/cmd/bucket-handlers-listobjects.go b/cmd/bucket-handlers-listobjects.go index 3f4637b65..f320eed11 100644 --- a/cmd/bucket-handlers-listobjects.go +++ b/cmd/bucket-handlers-listobjects.go @@ -20,6 +20,7 @@ import ( "net/http" "github.com/gorilla/mux" + "github.com/minio/minio/pkg/policy" ) // Validate all the ListObjects query arguments, returns an APIErrorCode @@ -64,7 +65,7 @@ func (api objectAPIHandlers) ListObjectsV2Handler(w http.ResponseWriter, r *http return } - if s3Error := checkRequestAuthType(ctx, r, bucket, "s3:ListBucket", globalServerConfig.GetRegion()); s3Error != ErrNone { + if s3Error := checkRequestAuthType(ctx, r, policy.ListBucketAction, bucket, ""); s3Error != ErrNone { writeErrorResponse(w, s3Error, r.URL) return } @@ -134,7 +135,7 @@ func (api objectAPIHandlers) ListObjectsV1Handler(w http.ResponseWriter, r *http return } - if s3Error := checkRequestAuthType(ctx, r, bucket, "s3:ListBucket", globalServerConfig.GetRegion()); s3Error != ErrNone { + if s3Error := checkRequestAuthType(ctx, r, policy.ListBucketAction, bucket, ""); s3Error != ErrNone { writeErrorResponse(w, s3Error, r.URL) return } diff --git a/cmd/bucket-handlers.go b/cmd/bucket-handlers.go index c4568367a..63d527015 100644 --- a/cmd/bucket-handlers.go +++ b/cmd/bucket-handlers.go @@ -17,7 +17,6 @@ package cmd import ( - "context" "encoding/base64" "encoding/xml" "io" @@ -26,86 +25,16 @@ import ( "net/url" "path" "path/filepath" - "reflect" "strings" "sync" "github.com/gorilla/mux" - "github.com/minio/minio-go/pkg/policy" - "github.com/minio/minio-go/pkg/set" "github.com/minio/minio/cmd/logger" "github.com/minio/minio/pkg/event" "github.com/minio/minio/pkg/hash" + "github.com/minio/minio/pkg/policy" ) -// http://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html -// Enforces bucket policies for a bucket for a given tatusaction. -func enforceBucketPolicy(ctx context.Context, bucket, action, resource, referer, sourceIP string, queryParams url.Values) (s3Error APIErrorCode) { - // Verify if bucket actually exists - objAPI := newObjectLayerFn() - if err := checkBucketExist(ctx, bucket, objAPI); err != nil { - switch err.(type) { - case BucketNameInvalid: - // Return error for invalid bucket name. - return ErrInvalidBucketName - case BucketNotFound: - // For no bucket found we return NoSuchBucket instead. - return ErrNoSuchBucket - } - // Return internal error for any other errors so that we can investigate. - return ErrInternalError - } - - // Fetch bucket policy, if policy is not set return access denied. - p, err := objAPI.GetBucketPolicy(ctx, bucket) - if err != nil { - return ErrAccessDenied - } - if reflect.DeepEqual(p, emptyBucketPolicy) { - return ErrAccessDenied - } - - // Construct resource in 'arn:aws:s3:::examplebucket/object' format. - arn := bucketARNPrefix + strings.TrimSuffix(strings.TrimPrefix(resource, "/"), "/") - - // Get conditions for policy verification. - conditionKeyMap := make(policy.ConditionKeyMap) - for queryParam := range queryParams { - conditionKeyMap[queryParam] = set.CreateStringSet(queryParams.Get(queryParam)) - } - - // Add request referer to conditionKeyMap if present. - if referer != "" { - conditionKeyMap["referer"] = set.CreateStringSet(referer) - } - // Add request source Ip to conditionKeyMap. - conditionKeyMap["ip"] = set.CreateStringSet(sourceIP) - - // Validate action, resource and conditions with current policy statements. - if !bucketPolicyEvalStatements(action, arn, conditionKeyMap, p.Statements) { - return ErrAccessDenied - } - return ErrNone -} - -// Check if the action is allowed on the bucket/prefix. -func isBucketActionAllowed(action, bucket, prefix string, objectAPI ObjectLayer) bool { - reqInfo := &logger.ReqInfo{BucketName: bucket} - reqInfo.AppendTags("prefix", prefix) - ctx := logger.SetReqInfo(context.Background(), reqInfo) - bp, err := objectAPI.GetBucketPolicy(ctx, bucket) - if err != nil { - return false - } - if reflect.DeepEqual(bp, emptyBucketPolicy) { - return false - } - resource := bucketARNPrefix + path.Join(bucket, prefix) - var conditionKeyMap map[string]set.StringSet - // Validate action, resource and conditions with current policy statements. - return bucketPolicyEvalStatements(action, resource, conditionKeyMap, bp.Statements) -} - // GetBucketLocationHandler - GET Bucket location. // ------------------------- // This operation returns bucket location. @@ -121,12 +50,7 @@ func (api objectAPIHandlers) GetBucketLocationHandler(w http.ResponseWriter, r * return } - s3Error := checkRequestAuthType(ctx, r, bucket, "s3:GetBucketLocation", globalMinioDefaultRegion) - if s3Error == ErrInvalidRegion { - // Clients like boto3 send getBucketLocation() call signed with region that is configured. - s3Error = checkRequestAuthType(ctx, r, "", "s3:GetBucketLocation", globalServerConfig.GetRegion()) - } - if s3Error != ErrNone { + if s3Error := checkRequestAuthType(ctx, r, policy.GetBucketLocationAction, bucket, ""); s3Error != ErrNone { writeErrorResponse(w, s3Error, r.URL) return } @@ -180,7 +104,7 @@ func (api objectAPIHandlers) ListMultipartUploadsHandler(w http.ResponseWriter, return } - if s3Error := checkRequestAuthType(ctx, r, bucket, "s3:ListBucketMultipartUploads", globalServerConfig.GetRegion()); s3Error != ErrNone { + if s3Error := checkRequestAuthType(ctx, r, policy.ListBucketMultipartUploadsAction, bucket, ""); s3Error != ErrNone { writeErrorResponse(w, s3Error, r.URL) return } @@ -228,16 +152,12 @@ func (api objectAPIHandlers) ListBucketsHandler(w http.ResponseWriter, r *http.R if api.CacheAPI() != nil { listBuckets = api.CacheAPI().ListBuckets } - // ListBuckets does not have any bucket action. - s3Error := checkRequestAuthType(ctx, r, "", "", globalMinioDefaultRegion) - if s3Error == ErrInvalidRegion { - // Clients like boto3 send listBuckets() call signed with region that is configured. - s3Error = checkRequestAuthType(ctx, r, "", "", globalServerConfig.GetRegion()) - } - if s3Error != ErrNone { + + if s3Error := checkRequestAuthType(ctx, r, policy.ListAllMyBucketsAction, "", ""); s3Error != ErrNone { writeErrorResponse(w, s3Error, r.URL) return } + // Invoke the list buckets. bucketsInfo, err := listBuckets(ctx) if err != nil { @@ -266,12 +186,12 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter, return } - var authError APIErrorCode - if authError = checkRequestAuthType(ctx, r, bucket, "s3:DeleteObject", globalServerConfig.GetRegion()); authError != ErrNone { + var s3Error APIErrorCode + if s3Error = checkRequestAuthType(ctx, r, policy.DeleteObjectAction, bucket, ""); s3Error != ErrNone { // In the event access is denied, a 200 response should still be returned // http://docs.aws.amazon.com/AmazonS3/latest/API/multiobjectdeleteapi.html - if authError != ErrAccessDenied { - writeErrorResponse(w, authError, r.URL) + if s3Error != ErrAccessDenied { + writeErrorResponse(w, s3Error, r.URL) return } } @@ -326,7 +246,7 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter, defer wg.Done() // If the request is denied access, each item // should be marked as 'AccessDenied' - if authError == ErrAccessDenied { + if s3Error == ErrAccessDenied { dErrs[i] = PrefixAccessDenied{ Bucket: bucket, Object: obj.ObjectName, @@ -411,16 +331,14 @@ func (api objectAPIHandlers) PutBucketHandler(w http.ResponseWriter, r *http.Req return } - // PutBucket does not have any bucket action. - s3Error := checkRequestAuthType(ctx, r, "", "", globalServerConfig.GetRegion()) - if s3Error != ErrNone { + vars := mux.Vars(r) + bucket := vars["bucket"] + + if s3Error := checkRequestAuthType(ctx, r, policy.CreateBucketAction, bucket, ""); s3Error != ErrNone { writeErrorResponse(w, s3Error, r.URL) return } - vars := mux.Vars(r) - bucket := vars["bucket"] - // Parse incoming location constraint. location, s3Error := parseLocationConstraint(r) if s3Error != ErrNone { @@ -690,10 +608,11 @@ func (api objectAPIHandlers) HeadBucketHandler(w http.ResponseWriter, r *http.Re return } - if s3Error := checkRequestAuthType(ctx, r, bucket, "s3:ListBucket", globalServerConfig.GetRegion()); s3Error != ErrNone { + if s3Error := checkRequestAuthType(ctx, r, policy.ListBucketAction, bucket, ""); s3Error != ErrNone { writeErrorResponseHeadersOnly(w, s3Error) return } + getBucketInfo := objectAPI.GetBucketInfo if api.CacheAPI() != nil { getBucketInfo = api.CacheAPI().GetBucketInfo @@ -710,20 +629,20 @@ func (api objectAPIHandlers) HeadBucketHandler(w http.ResponseWriter, r *http.Re func (api objectAPIHandlers) DeleteBucketHandler(w http.ResponseWriter, r *http.Request) { ctx := newContext(r, "DeleteBucket") + vars := mux.Vars(r) + bucket := vars["bucket"] + objectAPI := api.ObjectAPI() if objectAPI == nil { writeErrorResponse(w, ErrServerNotInitialized, r.URL) return } - // DeleteBucket does not have any bucket action. - if s3Error := checkRequestAuthType(ctx, r, "", "", globalServerConfig.GetRegion()); s3Error != ErrNone { + if s3Error := checkRequestAuthType(ctx, r, policy.DeleteBucketAction, bucket, ""); s3Error != ErrNone { writeErrorResponse(w, s3Error, r.URL) return } - vars := mux.Vars(r) - bucket := vars["bucket"] deleteBucket := objectAPI.DeleteBucket if api.CacheAPI() != nil { deleteBucket = api.CacheAPI().DeleteBucket @@ -734,13 +653,8 @@ func (api objectAPIHandlers) DeleteBucketHandler(w http.ResponseWriter, r *http. return } - // Notify all peers (including self) to update in-memory state - for addr, err := range globalNotificationSys.UpdateBucketPolicy(bucket) { - logger.GetReqInfo(ctx).AppendTags("remotePeer", addr.Name) - logger.LogIf(ctx, err) - } - globalNotificationSys.RemoveNotification(bucket) + globalPolicySys.Remove(bucket) for addr, err := range globalNotificationSys.DeleteBucket(bucket) { logger.GetReqInfo(ctx).AppendTags("remotePeer", addr.Name) logger.LogIf(ctx, err) diff --git a/cmd/bucket-handlers_test.go b/cmd/bucket-handlers_test.go index 052b59c2a..1b88dfdb3 100644 --- a/cmd/bucket-handlers_test.go +++ b/cmd/bucket-handlers_test.go @@ -155,7 +155,7 @@ func testGetBucketLocationHandler(obj ObjectLayer, instanceType, bucketName stri // ExecObjectLayerAPIAnonTest - Calls the HTTP API handler using the anonymous request, validates the ErrAccessDeniedResponse, // sets the bucket policy using the policy statement generated from `getReadOnlyBucketStatement` so that the // unsigned request goes through and its validated again. - ExecObjectLayerAPIAnonTest(t, obj, "TestGetBucketLocationHandler", bucketName, "", instanceType, apiRouter, anonReq, getReadOnlyBucketStatement) + ExecObjectLayerAPIAnonTest(t, obj, "TestGetBucketLocationHandler", bucketName, "", instanceType, apiRouter, anonReq, getAnonReadOnlyBucketPolicy(bucketName)) // HTTP request for testing when `objectLayer` is set to `nil`. // There is no need to use an existing bucket and valid input for creating the request @@ -260,7 +260,7 @@ func testHeadBucketHandler(obj ObjectLayer, instanceType, bucketName string, api // ExecObjectLayerAPIAnonTest - Calls the HTTP API handler using the anonymous request, validates the ErrAccessDeniedResponse, // sets the bucket policy using the policy statement generated from `getReadOnlyBucketStatement` so that the // unsigned request goes through and its validated again. - ExecObjectLayerAPIAnonTest(t, obj, "TestHeadBucketHandler", bucketName, "", instanceType, apiRouter, anonReq, getReadOnlyBucketStatement) + ExecObjectLayerAPIAnonTest(t, obj, "TestHeadBucketHandler", bucketName, "", instanceType, apiRouter, anonReq, getAnonReadOnlyBucketPolicy(bucketName)) // HTTP request for testing when `objectLayer` is set to `nil`. // There is no need to use an existing bucket and valid input for creating the request @@ -494,7 +494,7 @@ func testListMultipartUploadsHandler(obj ObjectLayer, instanceType, bucketName s // ExecObjectLayerAPIAnonTest - Calls the HTTP API handler using the anonymous request, validates the ErrAccessDeniedResponse, // sets the bucket policy using the policy statement generated from `getWriteOnlyBucketStatement` so that the // unsigned request goes through and its validated again. - ExecObjectLayerAPIAnonTest(t, obj, "TestListMultipartUploadsHandler", bucketName, "", instanceType, apiRouter, anonReq, getWriteOnlyBucketStatement) + ExecObjectLayerAPIAnonTest(t, obj, "TestListMultipartUploadsHandler", bucketName, "", instanceType, apiRouter, anonReq, getAnonWriteOnlyBucketPolicy(bucketName)) // HTTP request for testing when `objectLayer` is set to `nil`. // There is no need to use an existing bucket and valid input for creating the request @@ -592,7 +592,7 @@ func testListBucketsHandler(obj ObjectLayer, instanceType, bucketName string, ap // ExecObjectLayerAPIAnonTest - Calls the HTTP API handler using the anonymous request, validates the ErrAccessDeniedResponse, // sets the bucket policy using the policy statement generated from `getWriteOnlyObjectStatement` so that the // unsigned request goes through and its validated again. - ExecObjectLayerAPIAnonTest(t, obj, "ListBucketsHandler", "", "", instanceType, apiRouter, anonReq, getWriteOnlyObjectStatement) + ExecObjectLayerAPIAnonTest(t, obj, "ListBucketsHandler", "", "", instanceType, apiRouter, anonReq, getAnonWriteOnlyBucketPolicy("*")) // HTTP request for testing when `objectLayer` is set to `nil`. // There is no need to use an existing bucket and valid input for creating the request @@ -793,31 +793,3 @@ func testAPIDeleteMultipleObjectsHandler(obj ObjectLayer, instanceType, bucketNa // `ExecObjectLayerAPINilTest` manages the operation. ExecObjectLayerAPINilTest(t, nilBucket, nilObject, instanceType, apiRouter, nilReq) } - -func TestIsBucketActionAllowed(t *testing.T) { - ExecObjectLayerAPITest(t, testIsBucketActionAllowedHandler, []string{"BucketLocation"}) -} - -func testIsBucketActionAllowedHandler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler, - credentials auth.Credentials, t *testing.T) { - - testCases := []struct { - // input. - action string - bucket string - prefix string - isGlobalPoliciesNil bool - // flag indicating whether the test should pass. - shouldPass bool - }{ - {"s3:GetBucketLocation", "mybucket", "abc", true, false}, - {"s3:ListObject", "mybucket", "abc", false, false}, - } - for i, testCase := range testCases { - isAllowed := isBucketActionAllowed(testCase.action, testCase.bucket, testCase.prefix, obj) - if isAllowed != testCase.shouldPass { - t.Errorf("Case %d: Expected the response status to be `%t`, but instead found `%t`", i+1, testCase.shouldPass, isAllowed) - } - - } -} diff --git a/cmd/bucket-notification-handlers.go b/cmd/bucket-notification-handlers.go index c0caaa0ca..6d13d6801 100644 --- a/cmd/bucket-notification-handlers.go +++ b/cmd/bucket-notification-handlers.go @@ -27,6 +27,7 @@ import ( "github.com/minio/minio/pkg/event" "github.com/minio/minio/pkg/event/target" xnet "github.com/minio/minio/pkg/net" + "github.com/minio/minio/pkg/policy" ) const ( @@ -43,6 +44,9 @@ var errNoSuchNotifications = errors.New("The specified bucket does not have buck func (api objectAPIHandlers) GetBucketNotificationHandler(w http.ResponseWriter, r *http.Request) { ctx := newContext(r, "GetBucketNotification") + vars := mux.Vars(r) + bucketName := vars["bucket"] + objAPI := api.ObjectAPI() if objAPI == nil { writeErrorResponse(w, ErrServerNotInitialized, r.URL) @@ -53,14 +57,12 @@ func (api objectAPIHandlers) GetBucketNotificationHandler(w http.ResponseWriter, writeErrorResponse(w, ErrNotImplemented, r.URL) return } - if s3Error := checkRequestAuthType(ctx, r, "", "", globalServerConfig.GetRegion()); s3Error != ErrNone { + + if s3Error := checkRequestAuthType(ctx, r, policy.GetBucketNotificationAction, bucketName, ""); s3Error != ErrNone { writeErrorResponse(w, s3Error, r.URL) return } - vars := mux.Vars(r) - bucketName := vars["bucket"] - _, err := objAPI.GetBucketInfo(ctx, bucketName) if err != nil { writeErrorResponse(w, toAPIErrorCode(err), r.URL) @@ -104,14 +106,15 @@ func (api objectAPIHandlers) PutBucketNotificationHandler(w http.ResponseWriter, writeErrorResponse(w, ErrNotImplemented, r.URL) return } - if s3Error := checkRequestAuthType(ctx, r, "", "", globalServerConfig.GetRegion()); s3Error != ErrNone { - writeErrorResponse(w, s3Error, r.URL) - return - } vars := mux.Vars(r) bucketName := vars["bucket"] + if s3Error := checkRequestAuthType(ctx, r, policy.PutBucketNotificationAction, bucketName, ""); s3Error != ErrNone { + writeErrorResponse(w, s3Error, r.URL) + return + } + _, err := objectAPI.GetBucketInfo(ctx, bucketName) if err != nil { writeErrorResponse(w, toAPIErrorCode(err), r.URL) @@ -166,14 +169,15 @@ func (api objectAPIHandlers) ListenBucketNotificationHandler(w http.ResponseWrit writeErrorResponse(w, ErrNotImplemented, r.URL) return } - if s3Error := checkRequestAuthType(ctx, r, "", "", globalServerConfig.GetRegion()); s3Error != ErrNone { - writeErrorResponse(w, s3Error, r.URL) - return - } vars := mux.Vars(r) bucketName := vars["bucket"] + if s3Error := checkRequestAuthType(ctx, r, policy.ListenBucketNotificationAction, bucketName, ""); s3Error != ErrNone { + writeErrorResponse(w, s3Error, r.URL) + return + } + values := r.URL.Query() var prefix string diff --git a/cmd/bucket-policy-handlers.go b/cmd/bucket-policy-handlers.go index 6adcf461b..8279944e0 100644 --- a/cmd/bucket-policy-handlers.go +++ b/cmd/bucket-policy-handlers.go @@ -19,206 +19,24 @@ package cmd import ( "encoding/json" "io" - "io/ioutil" - "net" "net/http" - "runtime" - "strings" humanize "github.com/dustin/go-humanize" "github.com/gorilla/mux" - "github.com/minio/minio-go/pkg/policy" "github.com/minio/minio/cmd/logger" - "github.com/minio/minio/pkg/wildcard" + "github.com/minio/minio/pkg/policy" ) -// maximum supported access policy size. -const maxAccessPolicySize = 20 * humanize.KiByte +const ( + // As per AWS S3 specification, 20KiB policy JSON data is allowed. + maxBucketPolicySize = 20 * humanize.KiByte -// Verify if a given action is valid for the url path based on the -// existing bucket access policy. -func bucketPolicyEvalStatements(action string, resource string, conditions policy.ConditionKeyMap, - statements []policy.Statement) bool { - for _, statement := range statements { - if bucketPolicyMatchStatement(action, resource, conditions, statement) { - if statement.Effect == "Allow" { - return true - } - // Do not uncomment kept here for readability. - // else statement.Effect == "Deny" - return false - } - } - // None match so deny. - return false -} + // Policy configuration file. + bucketPolicyConfig = "policy.json" +) -// Verify if action, resource and conditions match input policy statement. -func bucketPolicyMatchStatement(action string, resource string, conditions policy.ConditionKeyMap, - statement policy.Statement) bool { - // Verify if action, resource and condition match in given statement. - return (bucketPolicyActionMatch(action, statement) && - bucketPolicyResourceMatch(resource, statement) && - bucketPolicyConditionMatch(conditions, statement)) -} - -// Verify if given action matches with policy statement. -func bucketPolicyActionMatch(action string, statement policy.Statement) bool { - return !statement.Actions.FuncMatch(actionMatch, action).IsEmpty() -} - -// Match function matches wild cards in 'pattern' for resource. -func resourceMatch(pattern, resource string) bool { - if runtime.GOOS == "windows" { - // For windows specifically make sure we are case insensitive. - return wildcard.Match(strings.ToLower(pattern), strings.ToLower(resource)) - } - return wildcard.Match(pattern, resource) -} - -// Match function matches wild cards in 'pattern' for action. -func actionMatch(pattern, action string) bool { - return wildcard.MatchSimple(pattern, action) -} - -func refererMatch(pattern, referer string) bool { - return wildcard.MatchSimple(pattern, referer) -} - -// isIPInCIDR - checks if a given a IP address is a member of the given subnet. -func isIPInCIDR(cidr, ip string) bool { - // AWS S3 spec says IPs must use standard CIDR notation. - // http://docs.aws.amazon.com/AmazonS3/latest/dev/example-bucket-policies.html#example-bucket-policies-use-case-3. - _, cidrNet, err := net.ParseCIDR(cidr) - if err != nil { - return false // If provided CIDR can't be parsed no IP will be in the subnet. - } - addr := net.ParseIP(ip) - return cidrNet.Contains(addr) -} - -// Verify if given resource matches with policy statement. -func bucketPolicyResourceMatch(resource string, statement policy.Statement) bool { - // the resource rule for object could contain "*" wild card. - // the requested object can be given access based on the already set bucket policy if - // the match is successful. - // More info: http://docs.aws.amazon.com/AmazonS3/latest/dev/s3-arn-format.html. - return !statement.Resources.FuncMatch(resourceMatch, resource).IsEmpty() -} - -// Verify if given condition matches with policy statement. -func bucketPolicyConditionMatch(conditions policy.ConditionKeyMap, statement policy.Statement) bool { - // Supports following conditions. - // - StringEquals - // - StringNotEquals - // - StringLike - // - StringNotLike - // - IpAddress - // - NotIpAddress - // - // Supported applicable condition keys for each conditions. - // - s3:prefix - // - s3:max-keys - // - s3:aws-Referer - // - s3:aws-SourceIp - - // The following loop evaluates the logical AND of all the - // conditions in the statement. Note: we can break out of the - // loop if and only if a condition evaluates to false. - for condition, conditionKeyVal := range statement.Conditions { - prefixConditon := conditionKeyVal["s3:prefix"] - maxKeyCondition := conditionKeyVal["s3:max-keys"] - if condition == "StringEquals" { - // If there is no condition with "s3:prefix" or "s3:max-keys" condition key - // then there is nothing to check condition against. - if !prefixConditon.IsEmpty() && !prefixConditon.Equals(conditions["prefix"]) { - return false - } - if !maxKeyCondition.IsEmpty() && !maxKeyCondition.Equals(conditions["max-keys"]) { - return false - } - } else if condition == "StringNotEquals" { - // If there is no condition with "s3:prefix" or "s3:max-keys" condition key - // then there is nothing to check condition against. - if !prefixConditon.IsEmpty() && prefixConditon.Equals(conditions["prefix"]) { - return false - } - if !maxKeyCondition.IsEmpty() && maxKeyCondition.Equals(conditions["max-keys"]) { - return false - } - } else if condition == "StringLike" { - awsReferers := conditionKeyVal["aws:Referer"] - // Skip empty condition, it is trivially satisfied. - if awsReferers.IsEmpty() { - continue - } - // wildcard match of referer in statement was not empty. - // StringLike has a match, i.e, condition evaluates to true. - refererFound := false - for referer := range conditions["referer"] { - if !awsReferers.FuncMatch(refererMatch, referer).IsEmpty() { - refererFound = true - break - } - } - // No matching referer found, so the condition is false. - if !refererFound { - return false - } - } else if condition == "StringNotLike" { - awsReferers := conditionKeyVal["aws:Referer"] - // Skip empty condition, it is trivially satisfied. - if awsReferers.IsEmpty() { - continue - } - // wildcard match of referer in statement was not empty. - // StringNotLike has a match, i.e, condition evaluates to false. - for referer := range conditions["referer"] { - if !awsReferers.FuncMatch(refererMatch, referer).IsEmpty() { - return false - } - } - } else if condition == "IpAddress" { - awsIps := conditionKeyVal["aws:SourceIp"] - // Skip empty condition, it is trivially satisfied. - if awsIps.IsEmpty() { - continue - } - // wildcard match of ip if statement was not empty. - // Find a valid ip. - ipFound := false - for ip := range conditions["ip"] { - if !awsIps.FuncMatch(isIPInCIDR, ip).IsEmpty() { - ipFound = true - break - } - } - if !ipFound { - return false - } - } else if condition == "NotIpAddress" { - awsIps := conditionKeyVal["aws:SourceIp"] - // Skip empty condition, it is trivially satisfied. - if awsIps.IsEmpty() { - continue - } - // wildcard match of ip if statement was not empty. - // Find if nothing matches. - for ip := range conditions["ip"] { - if !awsIps.FuncMatch(isIPInCIDR, ip).IsEmpty() { - return false - } - } - } - } - - return true -} - -// PutBucketPolicyHandler - PUT Bucket policy -// ----------------- -// This implementation of the PUT operation uses the policy -// subresource to add to or replace a policy on a bucket +// PutBucketPolicyHandler - This HTTP handler stores given bucket policy configuration as per +// https://docs.aws.amazon.com/AmazonS3/latest/dev/access-policy-language-overview.html func (api objectAPIHandlers) PutBucketPolicyHandler(w http.ResponseWriter, r *http.Request) { ctx := newContext(r, "PutBucketPolicy") @@ -228,65 +46,46 @@ func (api objectAPIHandlers) PutBucketPolicyHandler(w http.ResponseWriter, r *ht return } - if s3Error := checkRequestAuthType(ctx, r, "", "", globalServerConfig.GetRegion()); s3Error != ErrNone { + vars := mux.Vars(r) + bucket := vars["bucket"] + + if s3Error := checkRequestAuthType(ctx, r, policy.PutBucketPolicyAction, bucket, ""); s3Error != ErrNone { writeErrorResponse(w, s3Error, r.URL) return } - vars := mux.Vars(r) - bucket := vars["bucket"] - - // Before proceeding validate if bucket exists. - _, err := objAPI.GetBucketInfo(ctx, bucket) - if err != nil { + // Check if bucket exists. + if _, err := objAPI.GetBucketInfo(ctx, bucket); err != nil { writeErrorResponse(w, toAPIErrorCode(err), r.URL) return } - // If Content-Length is unknown or zero, deny the - // request. PutBucketPolicy always needs a Content-Length. - if r.ContentLength == -1 || r.ContentLength == 0 { + // Error out if Content-Length is missing. + // PutBucketPolicy always needs Content-Length. + if r.ContentLength <= 0 { writeErrorResponse(w, ErrMissingContentLength, r.URL) return } - // If Content-Length is greater than maximum allowed policy size. - if r.ContentLength > maxAccessPolicySize { + + // Error out if Content-Length is beyond allowed size. + if r.ContentLength > maxBucketPolicySize { writeErrorResponse(w, ErrEntityTooLarge, r.URL) return } - // Read access policy up to maxAccessPolicySize. - // http://docs.aws.amazon.com/AmazonS3/latest/dev/access-policy-language-overview.html - // bucket policies are limited to 20KB in size, using a limit reader. - policyBytes, err := ioutil.ReadAll(io.LimitReader(r.Body, maxAccessPolicySize)) + bucketPolicy, err := policy.ParseConfig(io.LimitReader(r.Body, r.ContentLength), bucket) if err != nil { - logger.LogIf(ctx, err) + writeErrorResponse(w, ErrMalformedPolicy, r.URL) + return + } + + if err = objAPI.SetBucketPolicy(ctx, bucket, bucketPolicy); err != nil { writeErrorResponse(w, toAPIErrorCode(err), r.URL) return } - policyInfo := policy.BucketAccessPolicy{} - if err = json.Unmarshal(policyBytes, &policyInfo); err != nil { - writeErrorResponse(w, ErrInvalidPolicyDocument, r.URL) - return - } - // Parse check bucket policy. - if s3Error := checkBucketPolicyResources(bucket, policyInfo); s3Error != ErrNone { - writeErrorResponse(w, s3Error, r.URL) - return - } - if err = objAPI.SetBucketPolicy(ctx, bucket, policyInfo); err != nil { - switch err.(type) { - case NotImplemented: - // Return error for invalid bucket name. - writeErrorResponse(w, ErrNotImplemented, r.URL) - default: - writeErrorResponse(w, ErrMalformedPolicy, r.URL) - } - return - } - - for addr, err := range globalNotificationSys.UpdateBucketPolicy(bucket) { + globalPolicySys.Set(bucket, *bucketPolicy) + for addr, err := range globalNotificationSys.SetBucketPolicy(bucket, bucketPolicy) { logger.GetReqInfo(ctx).AppendTags("remotePeer", addr.Name) logger.LogIf(ctx, err) } @@ -295,10 +94,7 @@ func (api objectAPIHandlers) PutBucketPolicyHandler(w http.ResponseWriter, r *ht writeSuccessNoContent(w) } -// DeleteBucketPolicyHandler - DELETE Bucket policy -// ----------------- -// This implementation of the DELETE operation uses the policy -// subresource to add to remove a policy on a bucket. +// DeleteBucketPolicyHandler - This HTTP handler removes bucket policy configuration. func (api objectAPIHandlers) DeleteBucketPolicyHandler(w http.ResponseWriter, r *http.Request) { ctx := newContext(r, "DeleteBucketPolicy") @@ -308,29 +104,27 @@ func (api objectAPIHandlers) DeleteBucketPolicyHandler(w http.ResponseWriter, r return } - if s3Error := checkRequestAuthType(ctx, r, "", "", globalServerConfig.GetRegion()); s3Error != ErrNone { + vars := mux.Vars(r) + bucket := vars["bucket"] + + if s3Error := checkRequestAuthType(ctx, r, policy.DeleteBucketPolicyAction, bucket, ""); s3Error != ErrNone { writeErrorResponse(w, s3Error, r.URL) return } - vars := mux.Vars(r) - bucket := vars["bucket"] - - // Before proceeding validate if bucket exists. - _, err := objAPI.GetBucketInfo(ctx, bucket) - if err != nil { + // Check if bucket exists. + if _, err := objAPI.GetBucketInfo(ctx, bucket); err != nil { writeErrorResponse(w, toAPIErrorCode(err), r.URL) return } - // Delete bucket access policy, by passing an empty policy - // struct. if err := objAPI.DeleteBucketPolicy(ctx, bucket); err != nil { writeErrorResponse(w, toAPIErrorCode(err), r.URL) return } - for addr, err := range globalNotificationSys.UpdateBucketPolicy(bucket) { + globalPolicySys.Remove(bucket) + for addr, err := range globalNotificationSys.RemoveBucketPolicy(bucket) { logger.GetReqInfo(ctx).AppendTags("remotePeer", addr.Name) logger.LogIf(ctx, err) } @@ -339,10 +133,7 @@ func (api objectAPIHandlers) DeleteBucketPolicyHandler(w http.ResponseWriter, r writeSuccessNoContent(w) } -// GetBucketPolicyHandler - GET Bucket policy -// ----------------- -// This operation uses the policy -// subresource to return the policy of a specified bucket. +// GetBucketPolicyHandler - This HTTP handler returns bucket policy configuration. func (api objectAPIHandlers) GetBucketPolicyHandler(w http.ResponseWriter, r *http.Request) { ctx := newContext(r, "GetBucketPolicy") @@ -352,29 +143,28 @@ func (api objectAPIHandlers) GetBucketPolicyHandler(w http.ResponseWriter, r *ht return } - if s3Error := checkRequestAuthType(ctx, r, "", "", globalServerConfig.GetRegion()); s3Error != ErrNone { + vars := mux.Vars(r) + bucket := vars["bucket"] + + if s3Error := checkRequestAuthType(ctx, r, policy.GetBucketPolicyAction, bucket, ""); s3Error != ErrNone { writeErrorResponse(w, s3Error, r.URL) return } - vars := mux.Vars(r) - bucket := vars["bucket"] - - // Before proceeding validate if bucket exists. - _, err := objAPI.GetBucketInfo(ctx, bucket) - if err != nil { + // Check if bucket exists. + if _, err := objAPI.GetBucketInfo(ctx, bucket); err != nil { writeErrorResponse(w, toAPIErrorCode(err), r.URL) return } // Read bucket access policy. - policy, err := objAPI.GetBucketPolicy(ctx, bucket) + bucketPolicy, err := objAPI.GetBucketPolicy(ctx, bucket) if err != nil { writeErrorResponse(w, toAPIErrorCode(err), r.URL) return } - policyBytes, err := json.Marshal(&policy) + policyData, err := json.Marshal(bucketPolicy) if err != nil { logger.LogIf(ctx, err) writeErrorResponse(w, toAPIErrorCode(err), r.URL) @@ -382,5 +172,5 @@ func (api objectAPIHandlers) GetBucketPolicyHandler(w http.ResponseWriter, r *ht } // Write to client. - w.Write(policyBytes) + w.Write(policyData) } diff --git a/cmd/bucket-policy-handlers_test.go b/cmd/bucket-policy-handlers_test.go index 387741a06..210e1a222 100644 --- a/cmd/bucket-policy-handlers_test.go +++ b/cmd/bucket-policy-handlers_test.go @@ -24,222 +24,72 @@ import ( "io/ioutil" "net/http" "net/http/httptest" + "reflect" + "strings" "testing" - "github.com/minio/minio-go/pkg/policy" - "github.com/minio/minio-go/pkg/set" "github.com/minio/minio/pkg/auth" + "github.com/minio/minio/pkg/policy" + "github.com/minio/minio/pkg/policy/condition" ) -// Tests validate Bucket policy resource matcher. -func TestBucketPolicyResourceMatch(t *testing.T) { - - // generates statement with given resource.. - generateStatement := func(resource string) policy.Statement { - statement := policy.Statement{} - statement.Resources = set.CreateStringSet([]string{resource}...) - return statement - } - - // generates resource prefix. - generateResource := func(bucketName, objectName string) string { - return bucketARNPrefix + bucketName + "/" + objectName - } - - testCases := []struct { - resourceToMatch string - statement policy.Statement - expectedResourceMatch bool - }{ - // Test case 1-4. - // Policy with resource ending with bucket/* allows access to all objects inside the given bucket. - {generateResource("minio-bucket", ""), generateStatement(fmt.Sprintf("%s%s", bucketARNPrefix, "minio-bucket"+"/*")), true}, - {generateResource("minio-bucket", ""), generateStatement(fmt.Sprintf("%s%s", bucketARNPrefix, "minio-bucket"+"/*")), true}, - {generateResource("minio-bucket", ""), generateStatement(fmt.Sprintf("%s%s", bucketARNPrefix, "minio-bucket"+"/*")), true}, - {generateResource("minio-bucket", ""), generateStatement(fmt.Sprintf("%s%s", bucketARNPrefix, "minio-bucket"+"/*")), true}, - // Test case - 5. - // Policy with resource ending with bucket/oo* should not allow access to bucket/output.txt. - {generateResource("minio-bucket", "output.txt"), generateStatement(fmt.Sprintf("%s%s", bucketARNPrefix, "minio-bucket"+"/oo*")), false}, - // Test case - 6. - // Policy with resource ending with bucket/oo* should allow access to bucket/ootput.txt. - {generateResource("minio-bucket", "ootput.txt"), generateStatement(fmt.Sprintf("%s%s", bucketARNPrefix, "minio-bucket"+"/oo*")), true}, - // Test case - 7. - // Policy with resource ending with bucket/oo* allows access to all sub-dirs starting with "oo" inside given bucket. - {generateResource("minio-bucket", "oop-bucket/my-file"), generateStatement(fmt.Sprintf("%s%s", bucketARNPrefix, "minio-bucket"+"/oo*")), true}, - // Test case - 8. - {generateResource("minio-bucket", "Asia/India/1.pjg"), generateStatement(fmt.Sprintf("%s%s", bucketARNPrefix, "minio-bucket"+"/Asia/Japan/*")), false}, - // Test case - 9. - {generateResource("minio-bucket", "Asia/India/1.pjg"), generateStatement(fmt.Sprintf("%s%s", bucketARNPrefix, "minio-bucket"+"/Asia/Japan/*")), false}, - // Test case - 10. - // Proves that the name space is flat. - {generateResource("minio-bucket", "Africa/Bihar/India/design_info.doc/Bihar"), generateStatement(fmt.Sprintf("%s%s", bucketARNPrefix, - "minio-bucket"+"/*/India/*/Bihar")), true}, - // Test case - 11. - // Proves that the name space is flat. - {generateResource("minio-bucket", "Asia/China/India/States/Bihar/output.txt"), generateStatement(fmt.Sprintf("%s%s", bucketARNPrefix, - "minio-bucket"+"/*/India/*/Bihar/*")), true}, - } - for i, testCase := range testCases { - actualResourceMatch := bucketPolicyResourceMatch(testCase.resourceToMatch, testCase.statement) - if testCase.expectedResourceMatch != actualResourceMatch { - t.Errorf("Test %d: Expected Resource match to be `%v`, but instead found it to be `%v`", i+1, testCase.expectedResourceMatch, actualResourceMatch) - } +func getAnonReadOnlyBucketPolicy(bucketName string) *policy.Policy { + return &policy.Policy{ + Version: policy.DefaultVersion, + Statements: []policy.Statement{policy.NewStatement( + policy.Allow, + policy.NewPrincipal("*"), + policy.NewActionSet(policy.GetBucketLocationAction, policy.ListBucketAction), + policy.NewResourceSet(policy.NewResource(bucketName, "")), + condition.NewFunctions(), + )}, } } -// TestBucketPolicyActionMatch - Test validates whether given action on the -// bucket/object matches the allowed actions in policy.Statement. -// This test preserves the allowed actions for all 3 sets of policies, that is read-write,read-only, write-only. -// The intention of the test is to catch any changes made to allowed action for on eof the above 3 major policy groups mentioned. -func TestBucketPolicyActionMatch(t *testing.T) { - bucketName := getRandomBucketName() - objectPrefix := "test-object" - - testCases := []struct { - action string - statement policy.Statement - expectedResult bool - }{ - // s3:GetBucketLocation is the action necessary to be present in the bucket policy to allow - // fetching of bucket location on an Anonymous/unsigned request. - - //r ead-write bucket policy is expected to allow GetBucketLocation operation on an anonymous request (Test case - 1). - {"s3:GetBucketLocation", getReadWriteBucketStatement(bucketName, objectPrefix), true}, - // write-only bucket policy is expected to allow GetBucketLocation operation on an anonymous request (Test case - 2). - {"s3:GetBucketLocation", getWriteOnlyBucketStatement(bucketName, objectPrefix), true}, - // read-only bucket policy is expected to allow GetBucketLocation operation on an anonymous request (Test case - 3). - {"s3:GetBucketLocation", getReadOnlyBucketStatement(bucketName, objectPrefix), true}, - - // Any of the Object level access permissions shouldn't allow for GetBucketLocation operation on an Anonymous/unsigned request (Test cases 4-6). - {"s3:GetBucketLocation", getReadWriteObjectStatement(bucketName, objectPrefix), false}, - {"s3:GetBucketLocation", getWriteOnlyObjectStatement(bucketName, objectPrefix), false}, - {"s3:GetBucketLocation", getReadOnlyObjectStatement(bucketName, objectPrefix), false}, - - // s3:ListBucketMultipartUploads is the action necessary to be present in the bucket policy to allow - // Listing of multipart uploads in a given bucket for an Anonymous/unsigned request. - - //read-write bucket policy is expected to allow ListBucketMultipartUploads operation on an anonymous request (Test case 7). - {"s3:ListBucketMultipartUploads", getReadWriteBucketStatement(bucketName, objectPrefix), true}, - // write-only bucket policy is expected to allow ListBucketMultipartUploads operation on an anonymous request (Test case 8). - {"s3:ListBucketMultipartUploads", getWriteOnlyBucketStatement(bucketName, objectPrefix), true}, - // read-only bucket policy is expected to not allow ListBucketMultipartUploads operation on an anonymous request (Test case 9). - // the allowed actions in read-only bucket statement are "s3:GetBucketLocation","s3:ListBucket", - // this should not allow for ListBucketMultipartUploads operations. - {"s3:ListBucketMultipartUploads", getReadOnlyBucketStatement(bucketName, objectPrefix), false}, - - // Any of the object level policy will not allow for s3:ListBucketMultipartUploads (Test cases 10-12). - {"s3:ListBucketMultipartUploads", getReadWriteObjectStatement(bucketName, objectPrefix), false}, - {"s3:ListBucketMultipartUploads", getWriteOnlyObjectStatement(bucketName, objectPrefix), false}, - {"s3:ListBucketMultipartUploads", getReadOnlyObjectStatement(bucketName, objectPrefix), false}, - - // s3:ListBucket is the action necessary to be present in the bucket policy to allow - // listing of all objects inside a given bucket on an Anonymous/unsigned request. - - // Cases for testing ListBucket access for different Bucket level access permissions. - // read-only bucket policy is expected to allow ListBucket operation on an anonymous request (Test case 13). - {"s3:ListBucket", getReadOnlyBucketStatement(bucketName, objectPrefix), true}, - // read-write bucket policy is expected to allow ListBucket operation on an anonymous request (Test case 14). - {"s3:ListBucket", getReadWriteBucketStatement(bucketName, objectPrefix), true}, - // write-only bucket policy is expected to not allow ListBucket operation on an anonymous request (Test case 15). - // the allowed actions in write-only bucket statement are "s3:GetBucketLocation", "s3:ListBucketMultipartUploads", - // this should not allow for ListBucket operations. - {"s3:ListBucket", getWriteOnlyBucketStatement(bucketName, objectPrefix), false}, - - // Cases for testing ListBucket access for different Object level access permissions (Test cases 16-18). - // Any of the Object level access permissions shouldn't allow for ListBucket operation on an Anonymous/unsigned request. - {"s3:ListBucket", getReadOnlyObjectStatement(bucketName, objectPrefix), false}, - {"s3:ListBucket", getReadWriteObjectStatement(bucketName, objectPrefix), false}, - {"s3:ListBucket", getWriteOnlyObjectStatement(bucketName, objectPrefix), false}, - - // s3:DeleteObject is the action necessary to be present in the bucket policy to allow - // deleting/removal of objects inside a given bucket for an Anonymous/unsigned request. - - // Cases for testing DeleteObject access for different Bucket level access permissions (Test cases 19-21). - // Any of the Bucket level access permissions shouldn't allow for DeleteObject operation on an Anonymous/unsigned request. - {"s3:DeleteObject", getReadOnlyBucketStatement(bucketName, objectPrefix), false}, - {"s3:DeleteObject", getReadWriteBucketStatement(bucketName, objectPrefix), false}, - {"s3:DeleteObject", getWriteOnlyBucketStatement(bucketName, objectPrefix), false}, - - // Cases for testing DeleteObject access for different Object level access permissions (Test cases 22). - // read-only bucket policy is expected to not allow Delete Object operation on an anonymous request. - {"s3:DeleteObject", getReadOnlyObjectStatement(bucketName, objectPrefix), false}, - // read-write bucket policy is expected to allow Delete Bucket operation on an anonymous request (Test cases 23). - {"s3:DeleteObject", getReadWriteObjectStatement(bucketName, objectPrefix), true}, - // write-only bucket policy is expected to allow Delete Object operation on an anonymous request (Test cases 24). - {"s3:DeleteObject", getWriteOnlyObjectStatement(bucketName, objectPrefix), true}, - - // s3:AbortMultipartUpload is the action necessary to be present in the bucket policy to allow - // cancelling or abortion of an already initiated multipart upload operation for an Anonymous/unsigned request. - - // Cases for testing AbortMultipartUpload access for different Bucket level access permissions (Test cases 25-27). - // Any of the Bucket level access permissions shouldn't allow for AbortMultipartUpload operation on an Anonymous/unsigned request. - {"s3:AbortMultipartUpload", getReadOnlyBucketStatement(bucketName, objectPrefix), false}, - {"s3:AbortMultipartUpload", getReadWriteBucketStatement(bucketName, objectPrefix), false}, - {"s3:AbortMultipartUpload", getWriteOnlyBucketStatement(bucketName, objectPrefix), false}, - - // Cases for testing AbortMultipartUpload access for different Object level access permissions. - // read-only object policy is expected to not allow AbortMultipartUpload operation on an anonymous request (Test case 28). - {"s3:AbortMultipartUpload", getReadOnlyObjectStatement(bucketName, objectPrefix), false}, - // read-write object policy is expected to allow AbortMultipartUpload operation on an anonymous request (Test case 29). - {"s3:AbortMultipartUpload", getReadWriteObjectStatement(bucketName, objectPrefix), true}, - // write-only object policy is expected to allow AbortMultipartUpload operation on an anonymous request (Test case 30). - {"s3:AbortMultipartUpload", getWriteOnlyObjectStatement(bucketName, objectPrefix), true}, - - // s3:PutObject is the action necessary to be present in the bucket policy to allow - // uploading of an object for an Anonymous/unsigned request. - - // Cases for testing PutObject access for different Bucket level access permissions (Test cases 31-33). - // Any of the Bucket level access permissions shouldn't allow for PutObject operation on an Anonymous/unsigned request. - {"s3:PutObject", getReadOnlyBucketStatement(bucketName, objectPrefix), false}, - {"s3:PutObject", getReadWriteBucketStatement(bucketName, objectPrefix), false}, - {"s3:PutObject", getWriteOnlyBucketStatement(bucketName, objectPrefix), false}, - - // Cases for testing PutObject access for different Object level access permissions. - // read-only object policy is expected to not allow PutObject operation on an anonymous request (Test case 34). - {"s3:PutObject", getReadOnlyObjectStatement(bucketName, objectPrefix), false}, - // read-write object policy is expected to allow PutObject operation on an anonymous request (Test case 35). - {"s3:PutObject", getReadWriteObjectStatement(bucketName, objectPrefix), true}, - // write-only object policy is expected to allow PutObject operation on an anonymous request (Test case 36). - {"s3:PutObject", getWriteOnlyObjectStatement(bucketName, objectPrefix), true}, - - // s3:GetObject is the action necessary to be present in the bucket policy to allow - // downloading of an object for an Anonymous/unsigned request. - - // Cases for testing GetObject access for different Bucket level access permissions (Test cases 37-39). - // Any of the Bucket level access permissions shouldn't allow for GetObject operation on an Anonymous/unsigned request. - {"s3:GetObject", getReadOnlyBucketStatement(bucketName, objectPrefix), false}, - {"s3:GetObject", getReadWriteBucketStatement(bucketName, objectPrefix), false}, - {"s3:GetObject", getWriteOnlyBucketStatement(bucketName, objectPrefix), false}, - - // Cases for testing GetObject access for different Object level access permissions. - // read-only bucket policy is expected to allow downloading of an Object on an anonymous request (Test case 40). - {"s3:GetObject", getReadOnlyObjectStatement(bucketName, objectPrefix), true}, - // read-write bucket policy is expected to allow downloading of an Object on an anonymous request (Test case 41). - {"s3:GetObject", getReadWriteObjectStatement(bucketName, objectPrefix), true}, - // write-only bucket policy is expected to not allow downloading of an Object on an anonymous request (Test case 42). - {"s3:GetObject", getWriteOnlyObjectStatement(bucketName, objectPrefix), false}, - - // s3:ListMultipartUploadParts is the action necessary to be present in the bucket policy to allow - // Listing of uploaded parts for an Anonymous/unsigned request. - - // Any of the Bucket level access permissions shouldn't allow for ListMultipartUploadParts operation on an Anonymous/unsigned request. - // read-only bucket policy is expected to not allow ListMultipartUploadParts operation on an anonymous request (Test cases 43-45). - {"s3:ListMultipartUploadParts", getReadOnlyBucketStatement(bucketName, objectPrefix), false}, - {"s3:ListMultipartUploadParts", getReadWriteBucketStatement(bucketName, objectPrefix), false}, - {"s3:ListMultipartUploadParts", getWriteOnlyBucketStatement(bucketName, objectPrefix), false}, - - // read-only object policy is expected to not allow ListMultipartUploadParts operation on an anonymous request (Test case 46). - {"s3:ListMultipartUploadParts", getReadOnlyObjectStatement(bucketName, objectPrefix), false}, - // read-write object policy is expected to allow ListMultipartUploadParts operation on an anonymous request (Test case 47). - {"s3:ListMultipartUploadParts", getReadWriteObjectStatement(bucketName, objectPrefix), true}, - // write-only object policy is expected to allow ListMultipartUploadParts operation on an anonymous request (Test case 48). - {"s3:ListMultipartUploadParts", getWriteOnlyObjectStatement(bucketName, objectPrefix), true}, +func getAnonWriteOnlyBucketPolicy(bucketName string) *policy.Policy { + return &policy.Policy{ + Version: policy.DefaultVersion, + Statements: []policy.Statement{policy.NewStatement( + policy.Allow, + policy.NewPrincipal("*"), + policy.NewActionSet( + policy.GetBucketLocationAction, + policy.ListBucketMultipartUploadsAction, + ), + policy.NewResourceSet(policy.NewResource(bucketName, "")), + condition.NewFunctions(), + )}, } - for i, testCase := range testCases { - actualResult := bucketPolicyActionMatch(testCase.action, testCase.statement) - if testCase.expectedResult != actualResult { - t.Errorf("Test %d: Expected the result to be `%v`, but instead found it to be `%v`", i+1, testCase.expectedResult, actualResult) - } +} + +func getAnonReadOnlyObjectPolicy(bucketName, prefix string) *policy.Policy { + return &policy.Policy{ + Version: policy.DefaultVersion, + Statements: []policy.Statement{policy.NewStatement( + policy.Allow, + policy.NewPrincipal("*"), + policy.NewActionSet(policy.GetObjectAction), + policy.NewResourceSet(policy.NewResource(bucketName, prefix)), + condition.NewFunctions(), + )}, + } +} + +func getAnonWriteOnlyObjectPolicy(bucketName, prefix string) *policy.Policy { + return &policy.Policy{ + Version: policy.DefaultVersion, + Statements: []policy.Statement{policy.NewStatement( + policy.Allow, + policy.NewPrincipal("*"), + policy.NewActionSet( + policy.AbortMultipartUploadAction, + policy.DeleteObjectAction, + policy.ListMultipartUploadPartsAction, + policy.PutObjectAction, + ), + policy.NewResourceSet(policy.NewResource(bucketName, prefix)), + condition.NewFunctions(), + )}, } } @@ -290,7 +140,7 @@ func testPutBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string bucketName: bucketName, bucketPolicyReader: bytes.NewReader([]byte(fmt.Sprintf(bucketPolicyTemplate, bucketName, bucketName))), - policyLen: maxAccessPolicySize + 1, + policyLen: maxBucketPolicySize + 1, accessKey: credentials.AccessKey, secretKey: credentials.SecretKey, expectedRespStatus: http.StatusBadRequest, @@ -430,7 +280,7 @@ func testPutBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string // ExecObjectLayerAPIAnonTest - Calls the HTTP API handler using the anonymous request, validates the ErrAccessDeniedResponse, // sets the bucket policy using the policy statement generated from `getWriteOnlyObjectStatement` so that the // unsigned request goes through and its validated again. - ExecObjectLayerAPIAnonTest(t, obj, "PutBucketPolicyHandler", bucketName, "", instanceType, apiRouter, anonReq, getWriteOnlyObjectStatement) + ExecObjectLayerAPIAnonTest(t, obj, "PutBucketPolicyHandler", bucketName, "", instanceType, apiRouter, anonReq, getAnonWriteOnlyBucketPolicy(bucketName)) // HTTP request for testing when `objectLayer` is set to `nil`. // There is no need to use an existing bucket and valid input for creating the request @@ -459,7 +309,7 @@ func TestGetBucketPolicyHandler(t *testing.T) { func testGetBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler, credentials auth.Credentials, t *testing.T) { // template for constructing HTTP request body for PUT bucket policy. - bucketPolicyTemplate := `{"Version":"2012-10-17","Statement":[{"Action":["s3:GetBucketLocation","s3:ListBucket"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::%s"],"Sid":""},{"Action":["s3:GetObject"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::%s/this*"],"Sid":""}]}` + bucketPolicyTemplate := `{"Version":"2012-10-17","Statement":[{"Action":["s3:GetBucketLocation","s3:ListBucket"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::%s"]},{"Action":["s3:GetObject"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::%s/this*"]}]}` // Writing bucket policy before running test on GetBucketPolicy. putTestPolicies := []struct { @@ -572,7 +422,16 @@ func testGetBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string if recV4.Code != testCase.expectedRespStatus { // Verify whether the bucket policy fetched is same as the one inserted. - if expectedBucketPolicyStr != string(bucketPolicyReadBuf) { + expectedPolicy, err := policy.ParseConfig(strings.NewReader(expectedBucketPolicyStr), testCase.bucketName) + if err != nil { + t.Fatalf("unexpected error. %v", err) + } + gotPolicy, err := policy.ParseConfig(bytes.NewReader(bucketPolicyReadBuf), testCase.bucketName) + if err != nil { + t.Fatalf("unexpected error. %v", err) + } + + if !reflect.DeepEqual(expectedPolicy, gotPolicy) { t.Errorf("Test %d: %s: Bucket policy differs from expected value.", i+1, instanceType) } } @@ -598,7 +457,16 @@ func testGetBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string } if recV2.Code == http.StatusOK { // Verify whether the bucket policy fetched is same as the one inserted. - if expectedBucketPolicyStr != string(bucketPolicyReadBuf) { + expectedPolicy, err := policy.ParseConfig(strings.NewReader(expectedBucketPolicyStr), testCase.bucketName) + if err != nil { + t.Fatalf("unexpected error. %v", err) + } + gotPolicy, err := policy.ParseConfig(bytes.NewReader(bucketPolicyReadBuf), testCase.bucketName) + if err != nil { + t.Fatalf("unexpected error. %v", err) + } + + if !reflect.DeepEqual(expectedPolicy, gotPolicy) { t.Errorf("Test %d: %s: Bucket policy differs from expected value.", i+1, instanceType) } } @@ -617,7 +485,7 @@ func testGetBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string // ExecObjectLayerAPIAnonTest - Calls the HTTP API handler using the anonymous request, validates the ErrAccessDeniedResponse, // sets the bucket policy using the policy statement generated from `getWriteOnlyObjectStatement` so that the // unsigned request goes through and its validated again. - ExecObjectLayerAPIAnonTest(t, obj, "GetBucketPolicyHandler", bucketName, "", instanceType, apiRouter, anonReq, getReadOnlyObjectStatement) + ExecObjectLayerAPIAnonTest(t, obj, "GetBucketPolicyHandler", bucketName, "", instanceType, apiRouter, anonReq, getAnonReadOnlyBucketPolicy(bucketName)) // HTTP request for testing when `objectLayer` is set to `nil`. // There is no need to use an existing bucket and valid input for creating the request @@ -820,7 +688,7 @@ func testDeleteBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName str // ExecObjectLayerAPIAnonTest - Calls the HTTP API handler using the anonymous request, validates the ErrAccessDeniedResponse, // sets the bucket policy using the policy statement generated from `getWriteOnlyObjectStatement` so that the // unsigned request goes through and its validated again. - ExecObjectLayerAPIAnonTest(t, obj, "DeleteBucketPolicyHandler", bucketName, "", instanceType, apiRouter, anonReq, getReadOnlyObjectStatement) + ExecObjectLayerAPIAnonTest(t, obj, "DeleteBucketPolicyHandler", bucketName, "", instanceType, apiRouter, anonReq, getAnonWriteOnlyBucketPolicy(bucketName)) // HTTP request for testing when `objectLayer` is set to `nil`. // There is no need to use an existing bucket and valid input for creating the request @@ -838,174 +706,3 @@ func testDeleteBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName str // `ExecObjectLayerAPINilTest` manages the operation. ExecObjectLayerAPINilTest(t, nilBucket, "", instanceType, apiRouter, nilReq) } - -// TestBucketPolicyConditionMatch - Tests to validate whether bucket policy conditions match. -func TestBucketPolicyConditionMatch(t *testing.T) { - // obtain the inner map[string]set.StringSet for policy.Statement.Conditions. - getInnerMap := func(key2, value string) map[string]set.StringSet { - innerMap := make(map[string]set.StringSet) - innerMap[key2] = set.CreateStringSet(value) - return innerMap - } - - // obtain policy.Statement with Conditions set. - getStatementWithCondition := func(key1, key2, value string) policy.Statement { - innerMap := getInnerMap(key2, value) - // to set policyStatment.Conditions . - conditions := make(policy.ConditionMap) - conditions[key1] = innerMap - // new policy statement. - statement := policy.Statement{} - // set the condition. - statement.Conditions = conditions - return statement - } - - testCases := []struct { - statementCondition policy.Statement - condition map[string]set.StringSet - - expectedMatch bool - }{ - - // Test case - 1. - // StringEquals condition matches. - { - - statementCondition: getStatementWithCondition("StringEquals", "s3:prefix", "Asia/"), - condition: getInnerMap("prefix", "Asia/"), - - expectedMatch: true, - }, - // Test case - 2. - // StringEquals condition doesn't match. - { - - statementCondition: getStatementWithCondition("StringEquals", "s3:prefix", "Asia/"), - condition: getInnerMap("prefix", "Africa/"), - - expectedMatch: false, - }, - // Test case - 3. - // StringEquals condition matches. - { - - statementCondition: getStatementWithCondition("StringEquals", "s3:max-keys", "Asia/"), - condition: getInnerMap("max-keys", "Asia/"), - - expectedMatch: true, - }, - // Test case - 4. - // StringEquals condition doesn't match. - { - - statementCondition: getStatementWithCondition("StringEquals", "s3:max-keys", "Asia/"), - condition: getInnerMap("max-keys", "Africa/"), - - expectedMatch: false, - }, - // Test case - 5. - // StringNotEquals condition matches. - { - - statementCondition: getStatementWithCondition("StringNotEquals", "s3:prefix", "Asia/"), - condition: getInnerMap("prefix", "Asia/"), - - expectedMatch: false, - }, - // Test case - 6. - // StringNotEquals condition doesn't match. - { - - statementCondition: getStatementWithCondition("StringNotEquals", "s3:prefix", "Asia/"), - condition: getInnerMap("prefix", "Africa/"), - - expectedMatch: true, - }, - // Test case - 7. - // StringNotEquals condition matches. - { - - statementCondition: getStatementWithCondition("StringNotEquals", "s3:max-keys", "Asia/"), - condition: getInnerMap("max-keys", "Asia/"), - - expectedMatch: false, - }, - // Test case - 8. - // StringNotEquals condition doesn't match. - { - - statementCondition: getStatementWithCondition("StringNotEquals", "s3:max-keys", "Asia/"), - condition: getInnerMap("max-keys", "Africa/"), - - expectedMatch: true, - }, - // Test case - 9. - // StringLike condition matches. - { - statementCondition: getStatementWithCondition("StringLike", "aws:Referer", "http://www.example.com/"), - condition: getInnerMap("referer", "http://www.example.com/"), - expectedMatch: true, - }, - // Test case - 10. - // StringLike condition doesn't match. - { - statementCondition: getStatementWithCondition("StringLike", "aws:Referer", "http://www.example.com/"), - condition: getInnerMap("referer", "www.somethingelse.com"), - expectedMatch: false, - }, - // Test case - 11. - // StringNotLike condition evaluates to false. - { - statementCondition: getStatementWithCondition("StringNotLike", "aws:Referer", "http://www.example.com/"), - condition: getInnerMap("referer", "http://www.example.com/"), - expectedMatch: false, - }, - // Test case - 12. - // StringNotLike condition evaluates to true. - { - statementCondition: getStatementWithCondition("StringNotLike", "aws:Referer", "http://www.example.com/"), - condition: getInnerMap("referer", "http://somethingelse.com/"), - expectedMatch: true, - }, - // Test case 13. - // IpAddress condition evaluates to true. - { - statementCondition: getStatementWithCondition("IpAddress", "aws:SourceIp", "54.240.143.0/24"), - condition: getInnerMap("ip", "54.240.143.2"), - expectedMatch: true, - }, - // Test case 14. - // IpAddress condition evaluates to false. - { - statementCondition: getStatementWithCondition("IpAddress", "aws:SourceIp", "54.240.143.0/24"), - condition: getInnerMap("ip", "127.240.143.224"), - expectedMatch: false, - }, - // Test case 15. - // NotIpAddress condition evaluates to true. - { - statementCondition: getStatementWithCondition("NotIpAddress", "aws:SourceIp", "54.240.143.0/24"), - condition: getInnerMap("ip", "54.240.144.188"), - expectedMatch: true, - }, - // Test case 16. - // NotIpAddress condition evaluates to false. - { - statementCondition: getStatementWithCondition("NotIpAddress", "aws:SourceIp", "54.240.143.0/24"), - condition: getInnerMap("ip", "54.240.143.243"), - expectedMatch: false, - }, - } - - for i, tc := range testCases { - t.Run(fmt.Sprintf("Case %d", i+1), func(t *testing.T) { - // call the function under test and assert the result with the expected result. - doesMatch := bucketPolicyConditionMatch(tc.condition, tc.statementCondition) - if tc.expectedMatch != doesMatch { - t.Errorf("Expected the match to be `%v`; got `%v` - %v %v.", - tc.expectedMatch, doesMatch, tc.condition, tc.statementCondition) - } - }) - } -} diff --git a/cmd/bucket-policy-parser.go b/cmd/bucket-policy-parser.go deleted file mode 100644 index 3d0bb9eb0..000000000 --- a/cmd/bucket-policy-parser.go +++ /dev/null @@ -1,295 +0,0 @@ -/* - * Minio Cloud Storage, (C) 2015, 2016, 2017 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -// Package cmd This file implements AWS Access Policy Language parser in -// accordance with http://docs.aws.amazon.com/AmazonS3/latest/dev/access-policy-language-overview.html -package cmd - -import ( - "encoding/json" - "errors" - "fmt" - "io" - "sort" - "strings" - - "github.com/minio/minio-go/pkg/policy" - "github.com/minio/minio-go/pkg/set" -) - -var emptyBucketPolicy = policy.BucketAccessPolicy{} - -var conditionKeyActionMap = policy.ConditionKeyMap{ - "s3:prefix": set.CreateStringSet("s3:ListBucket", "s3:ListBucketMultipartUploads"), - "s3:max-keys": set.CreateStringSet("s3:ListBucket", "s3:ListBucketMultipartUploads", - "s3:ListMultipartUploadParts"), -} - -// supportedActionMap - lists all the actions supported by minio. -var supportedActionMap = set.CreateStringSet("*", "s3:*", "s3:GetObject", - "s3:ListBucket", "s3:PutObject", "s3:GetBucketLocation", "s3:DeleteObject", - "s3:AbortMultipartUpload", "s3:ListBucketMultipartUploads", "s3:ListMultipartUploadParts") - -// supported Conditions type. -var supportedConditionsType = set.CreateStringSet("StringEquals", "StringNotEquals", "StringLike", "StringNotLike", "IpAddress", "NotIpAddress") - -// Validate s3:prefix, s3:max-keys are present if not -// supported keys for the conditions. -var supportedConditionsKey = set.CreateStringSet("s3:prefix", "s3:max-keys", "aws:Referer", "aws:SourceIp") - -// supportedEffectMap - supported effects. -var supportedEffectMap = set.CreateStringSet("Allow", "Deny") - -// isValidActions - are actions valid. -func isValidActions(actions set.StringSet) (err error) { - // Statement actions cannot be empty. - if actions.IsEmpty() { - err = errors.New("Action list cannot be empty") - return err - } - if unsupportedActions := actions.Difference(supportedActionMap); !unsupportedActions.IsEmpty() { - err = fmt.Errorf("Unsupported actions found: ‘%#v’, please validate your policy document", - unsupportedActions) - return err - } - return nil -} - -// isValidEffect - is effect valid. -func isValidEffect(effect string) (err error) { - // Statement effect cannot be empty. - if effect == "" { - err = errors.New("Policy effect cannot be empty") - return err - } - if !supportedEffectMap.Contains(effect) { - err = errors.New("Unsupported Effect found: ‘" + effect + "’, please validate your policy document") - return err - } - return nil -} - -// isValidResources - are valid resources. -func isValidResources(resources set.StringSet) (err error) { - // Statement resources cannot be empty. - if resources.IsEmpty() { - err = errors.New("Resource list cannot be empty") - return err - } - for resource := range resources { - if !hasPrefix(resource, bucketARNPrefix) { - err = errors.New("Unsupported resource style found: ‘" + resource + "’, please validate your policy document") - return err - } - resourceSuffix := strings.SplitAfter(resource, bucketARNPrefix)[1] - if len(resourceSuffix) == 0 || hasPrefix(resourceSuffix, "/") { - err = errors.New("Invalid resource style found: ‘" + resource + "’, please validate your policy document") - return err - } - } - return nil -} - -// isValidPrincipals - are valid principals. -func isValidPrincipals(principal policy.User) (err error) { - if principal.AWS.IsEmpty() { - return errors.New("Principal cannot be empty") - } - if diff := principal.AWS.Difference(set.CreateStringSet("*")); !diff.IsEmpty() { - // Minio does not support or implement IAM, "*" is the only valid value. - // Amazon s3 doc on principal: - // http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements.html#Principal - err = fmt.Errorf("Unsupported principals found: ‘%#v’, please validate your policy document", - diff) - return err - } - return nil -} - -// isValidConditions - returns nil if the given conditions valid and -// corresponding error otherwise. -func isValidConditions(actions set.StringSet, conditions policy.ConditionMap) (err error) { - // Verify conditions should be valid. Validate if only - // supported condition keys are present and return error - // otherwise. - conditionKeyVal := make(map[string]set.StringSet) - for conditionType := range conditions { - if !supportedConditionsType.Contains(conditionType) { - err = fmt.Errorf("Unsupported condition type '%s', please validate your policy document", conditionType) - return err - } - for key, value := range conditions[conditionType] { - if !supportedConditionsKey.Contains(key) { - err = fmt.Errorf("Unsupported condition key '%s', please validate your policy document", conditionType) - return err - } - - compatibleActions := conditionKeyActionMap[key] - if !compatibleActions.IsEmpty() && - compatibleActions.Intersection(actions).IsEmpty() { - err = fmt.Errorf("Unsupported condition key %s for the given actions %s, "+ - "please validate your policy document", key, actions) - return err - } - - conditionVal, ok := conditionKeyVal[key] - if ok && !value.Intersection(conditionVal).IsEmpty() { - err = fmt.Errorf("Ambigious condition values for key '%s', please validate your policy document", key) - return err - } - conditionKeyVal[key] = value - } - } - return nil -} - -// List of actions for which prefixes are not allowed. -var invalidPrefixActions = set.StringSet{ - "s3:GetBucketLocation": {}, - "s3:ListBucket": {}, - "s3:ListBucketMultipartUploads": {}, - // Add actions which do not honor prefixes. -} - -// resourcePrefix - provides the prefix removing any wildcards. -func resourcePrefix(resource string) string { - if strings.HasSuffix(resource, "*") { - resource = strings.TrimSuffix(resource, "*") - } - return resource -} - -// checkBucketPolicyResources validates Resources in unmarshalled bucket policy structure. -// - Resources are validated against the given set of Actions. -// - -func checkBucketPolicyResources(bucket string, bucketPolicy policy.BucketAccessPolicy) APIErrorCode { - // Validate statements for special actions and collect resources - // for others to validate nesting. - var resourceMap = set.NewStringSet() - for _, statement := range bucketPolicy.Statements { - for action := range statement.Actions { - for resource := range statement.Resources { - resourcePrefix := strings.SplitAfter(resource, bucketARNPrefix)[1] - if _, ok := invalidPrefixActions[action]; ok { - // Resource prefix is not equal to bucket for - // prefix invalid actions, reject them. - if resourcePrefix != bucket { - return ErrMalformedPolicy - } - } else { - // For all other actions validate if resourcePrefix begins - // with bucket name, if not reject them. - if strings.Split(resourcePrefix, "/")[0] != bucket { - return ErrMalformedPolicy - } - // All valid resources collect them separately to verify nesting. - resourceMap.Add(resourcePrefix) - } - } - } - } - - var resources []string - for resource := range resourceMap { - resources = append(resources, resourcePrefix(resource)) - } - - // Sort strings as shorter first. - sort.Strings(resources) - - for len(resources) > 1 { - var resource string - resource, resources = resources[0], resources[1:] - // Loop through all resources, if one of them matches with - // previous shorter one, it means we have detected - // nesting. Reject such rules. - for _, otherResource := range resources { - // Common prefix reject such rules. - if hasPrefix(otherResource, resource) { - return ErrPolicyNesting - } - } - } - - // No errors found. - return ErrNone -} - -// parseBucketPolicy - parses and validates if bucket policy is of -// proper JSON and follows allowed restrictions with policy standards. -func parseBucketPolicy(bucketPolicyReader io.Reader, bktPolicy *policy.BucketAccessPolicy) (err error) { - // Parse bucket policy reader. - decoder := json.NewDecoder(bucketPolicyReader) - if err = decoder.Decode(bktPolicy); err != nil { - return err - } - - // Policy version cannot be empty. - if len(bktPolicy.Version) == 0 { - err = errors.New("Policy version cannot be empty") - return err - } - - // Policy statements cannot be empty. - if len(bktPolicy.Statements) == 0 { - err = errors.New("Policy statement cannot be empty") - return err - } - - // Loop through all policy statements and validate entries. - for _, statement := range bktPolicy.Statements { - // Statement effect should be valid. - if err := isValidEffect(statement.Effect); err != nil { - return err - } - // Statement principal should be supported format. - if err := isValidPrincipals(statement.Principal); err != nil { - return err - } - // Statement actions should be valid. - if err := isValidActions(statement.Actions); err != nil { - return err - } - // Statement resources should be valid. - if err := isValidResources(statement.Resources); err != nil { - return err - } - // Statement conditions should be valid. - if err := isValidConditions(statement.Actions, statement.Conditions); err != nil { - return err - } - } - - // Separate deny and allow statements, so that we can apply deny - // statements in the beginning followed by Allow statements. - var denyStatements []policy.Statement - var allowStatements []policy.Statement - for _, statement := range bktPolicy.Statements { - if statement.Effect == "Deny" { - denyStatements = append(denyStatements, statement) - continue - } - - // else if statement.Effect == "Allow" - allowStatements = append(allowStatements, statement) - } - - // Deny statements are enforced first once matched. - bktPolicy.Statements = append(denyStatements, allowStatements...) - - // Return successfully parsed policy structure. - return nil -} diff --git a/cmd/bucket-policy-parser_test.go b/cmd/bucket-policy-parser_test.go deleted file mode 100644 index 13c3c3101..000000000 --- a/cmd/bucket-policy-parser_test.go +++ /dev/null @@ -1,867 +0,0 @@ -/* - * Minio Cloud Storage, (C) 2015, 2016 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package cmd - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "reflect" - "testing" - - "github.com/minio/minio-go/pkg/policy" - "github.com/minio/minio-go/pkg/set" -) - -// Common bucket actions for both read and write policies. -var ( - readWriteBucketActions = []string{ - "s3:GetBucketLocation", - "s3:ListBucket", - "s3:ListBucketMultipartUploads", - // Add more bucket level read-write actions here. - } - readWriteObjectActions = []string{ - "s3:AbortMultipartUpload", - "s3:DeleteObject", - "s3:GetObject", - "s3:ListMultipartUploadParts", - "s3:PutObject", - // Add more object level read-write actions here. - } -) - -// Write only actions. -var ( - writeOnlyBucketActions = []string{ - "s3:GetBucketLocation", - "s3:ListBucketMultipartUploads", - // Add more bucket level write actions here. - } - writeOnlyObjectActions = []string{ - "s3:AbortMultipartUpload", - "s3:DeleteObject", - "s3:ListMultipartUploadParts", - "s3:PutObject", - // Add more object level write actions here. - } -) - -// Read only actions. -var ( - readOnlyBucketActions = []string{ - "s3:GetBucketLocation", - "s3:ListBucket", - // Add more bucket level read actions here. - } - readOnlyObjectActions = []string{ - "s3:GetObject", - // Add more object level read actions here. - } -) - -// Obtain bucket statement for read-write bucketPolicy. -func getReadWriteObjectStatement(bucketName, objectPrefix string) policy.Statement { - objectResourceStatement := policy.Statement{} - objectResourceStatement.Effect = "Allow" - objectResourceStatement.Principal = policy.User{ - AWS: set.StringSet{"*": struct{}{}}, - } - objectResourceStatement.Resources = set.CreateStringSet([]string{fmt.Sprintf("%s%s", bucketARNPrefix, bucketName+"/"+objectPrefix+"*")}...) - objectResourceStatement.Actions = set.CreateStringSet(readWriteObjectActions...) - return objectResourceStatement -} - -// Obtain object statement for read-write bucketPolicy. -func getReadWriteBucketStatement(bucketName, objectPrefix string) policy.Statement { - bucketResourceStatement := policy.Statement{} - bucketResourceStatement.Effect = "Allow" - bucketResourceStatement.Principal = policy.User{ - AWS: set.StringSet{"*": struct{}{}}, - } - bucketResourceStatement.Resources = set.CreateStringSet([]string{fmt.Sprintf("%s%s", bucketARNPrefix, bucketName)}...) - bucketResourceStatement.Actions = set.CreateStringSet(readWriteBucketActions...) - return bucketResourceStatement -} - -// Obtain statements for read-write bucketPolicy. -func getReadWriteStatement(bucketName, objectPrefix string) []policy.Statement { - statements := []policy.Statement{} - // Save the read write policy. - statements = append(statements, getReadWriteBucketStatement(bucketName, objectPrefix), getReadWriteObjectStatement(bucketName, objectPrefix)) - return statements -} - -// Obtain bucket statement for read only bucketPolicy. -func getReadOnlyBucketStatement(bucketName, objectPrefix string) policy.Statement { - bucketResourceStatement := policy.Statement{} - bucketResourceStatement.Effect = "Allow" - bucketResourceStatement.Principal = policy.User{ - AWS: set.StringSet{"*": struct{}{}}, - } - bucketResourceStatement.Resources = set.CreateStringSet([]string{fmt.Sprintf("%s%s", bucketARNPrefix, bucketName)}...) - bucketResourceStatement.Actions = set.CreateStringSet(readOnlyBucketActions...) - return bucketResourceStatement -} - -// Obtain object statement for read only bucketPolicy. -func getReadOnlyObjectStatement(bucketName, objectPrefix string) policy.Statement { - objectResourceStatement := policy.Statement{} - objectResourceStatement.Effect = "Allow" - objectResourceStatement.Principal = policy.User{ - AWS: set.StringSet{"*": struct{}{}}, - } - objectResourceStatement.Resources = set.CreateStringSet([]string{fmt.Sprintf("%s%s", bucketARNPrefix, bucketName+"/"+objectPrefix+"*")}...) - objectResourceStatement.Actions = set.CreateStringSet(readOnlyObjectActions...) - return objectResourceStatement -} - -// Obtain statements for read only bucketPolicy. -func getReadOnlyStatement(bucketName, objectPrefix string) []policy.Statement { - statements := []policy.Statement{} - // Save the read only policy. - statements = append(statements, getReadOnlyBucketStatement(bucketName, objectPrefix), getReadOnlyObjectStatement(bucketName, objectPrefix)) - return statements -} - -// Obtain bucket statements for write only bucketPolicy. -func getWriteOnlyBucketStatement(bucketName, objectPrefix string) policy.Statement { - - bucketResourceStatement := policy.Statement{} - bucketResourceStatement.Effect = "Allow" - bucketResourceStatement.Principal = policy.User{ - AWS: set.StringSet{"*": struct{}{}}, - } - bucketResourceStatement.Resources = set.CreateStringSet([]string{fmt.Sprintf("%s%s", bucketARNPrefix, bucketName)}...) - bucketResourceStatement.Actions = set.CreateStringSet(writeOnlyBucketActions...) - return bucketResourceStatement -} - -// Obtain object statements for write only bucketPolicy. -func getWriteOnlyObjectStatement(bucketName, objectPrefix string) policy.Statement { - objectResourceStatement := policy.Statement{} - objectResourceStatement.Effect = "Allow" - objectResourceStatement.Principal = policy.User{ - AWS: set.StringSet{"*": struct{}{}}, - } - objectResourceStatement.Resources = set.CreateStringSet([]string{fmt.Sprintf("%s%s", bucketARNPrefix, bucketName+"/"+objectPrefix+"*")}...) - objectResourceStatement.Actions = set.CreateStringSet(writeOnlyObjectActions...) - return objectResourceStatement -} - -// Obtain statements for write only bucketPolicy. -func getWriteOnlyStatement(bucketName, objectPrefix string) []policy.Statement { - statements := []policy.Statement{} - // Write only policy. - // Save the write only policy. - statements = append(statements, getWriteOnlyBucketStatement(bucketName, objectPrefix), getWriteOnlyBucketStatement(bucketName, objectPrefix)) - return statements -} - -// Tests validate Action validator. -func TestIsValidActions(t *testing.T) { - testCases := []struct { - // input. - actions set.StringSet - // expected output. - err error - // flag indicating whether the test should pass. - shouldPass bool - }{ - // Inputs with unsupported Action. - // Test case - 1. - // "s3:ListObject" is an invalid Action. - {set.CreateStringSet([]string{"s3:GetObject", "s3:ListObject", "s3:RemoveObject"}...), - errors.New("Unsupported actions found: ‘set.StringSet{\"s3:RemoveObject\":struct {}{}, \"s3:ListObject\":struct {}{}}’, please validate your policy document"), false}, - // Test case - 2. - // Empty Actions. - {set.CreateStringSet([]string{}...), errors.New("Action list cannot be empty"), false}, - // Test case - 3. - // "s3:DeleteEverything"" is an invalid Action. - {set.CreateStringSet([]string{"s3:GetObject", "s3:ListBucket", "s3:PutObject", "s3:DeleteEverything"}...), - errors.New("Unsupported actions found: ‘set.StringSet{\"s3:DeleteEverything\":struct {}{}}’, please validate your policy document"), false}, - // Inputs with valid Action. - // Test Case - 4. - {set.CreateStringSet([]string{ - "s3:*", "*", "s3:GetObject", "s3:ListBucket", - "s3:PutObject", "s3:GetBucketLocation", "s3:DeleteObject", - "s3:AbortMultipartUpload", "s3:ListBucketMultipartUploads", - "s3:ListMultipartUploadParts"}...), nil, true}, - } - for i, testCase := range testCases { - err := isValidActions(testCase.actions) - if err != nil && testCase.shouldPass { - t.Errorf("Test %d: Expected to pass, but failed with: %s", i+1, err.Error()) - } - if err == nil && !testCase.shouldPass { - t.Errorf("Test %d: Expected to fail with \"%s\", but passed instead", i+1, testCase.err.Error()) - } - } -} - -// Tests validate Effect validator. -func TestIsValidEffect(t *testing.T) { - testCases := []struct { - // input. - effect string - // expected output. - err error - // flag indicating whether the test should pass. - shouldPass bool - }{ - // Inputs with unsupported Effect. - // Test case - 1. - {"", errors.New("Policy effect cannot be empty"), false}, - // Test case - 2. - {"DontAllow", errors.New("Unsupported Effect found: ‘DontAllow’, please validate your policy document"), false}, - // Test case - 3. - {"NeverAllow", errors.New("Unsupported Effect found: ‘NeverAllow’, please validate your policy document"), false}, - // Test case - 4. - {"AllowAlways", errors.New("Unsupported Effect found: ‘AllowAlways’, please validate your policy document"), false}, - - // Inputs with valid Effect. - // Test Case - 5. - {"Allow", nil, true}, - // Test Case - 6. - {"Deny", nil, true}, - } - for i, testCase := range testCases { - err := isValidEffect(testCase.effect) - if err != nil && testCase.shouldPass { - t.Errorf("Test %d: Expected to pass, but failed with: %s", i+1, err.Error()) - } - if err == nil && !testCase.shouldPass { - t.Errorf("Test %d: Expected to fail with \"%s\", but passed instead", i+1, testCase.err.Error()) - } - // Failed as expected, but does it fail for the expected reason. - if err != nil && !testCase.shouldPass { - if err.Error() != testCase.err.Error() { - t.Errorf("Test %d: Expected to fail with error \"%s\", but instead failed with error \"%s\"", i+1, testCase.err.Error(), err.Error()) - } - } - } -} - -// Tests validate Resources validator. -func TestIsValidResources(t *testing.T) { - testCases := []struct { - // input. - resources []string - // expected output. - err error - // flag indicating whether the test should pass. - shouldPass bool - }{ - // Inputs with unsupported Action. - // Test case - 1. - // Empty Resources. - {[]string{}, errors.New("Resource list cannot be empty"), false}, - // Test case - 2. - // A valid resource should have prefix bucketARNPrefix. - {[]string{"my-resource"}, errors.New("Unsupported resource style found: ‘my-resource’, please validate your policy document"), false}, - // Test case - 3. - // A Valid resource should have bucket name followed by bucketARNPrefix. - {[]string{bucketARNPrefix}, errors.New("Invalid resource style found: ‘arn:aws:s3:::’, please validate your policy document"), false}, - // Test Case - 4. - // Valid resource shouldn't have slash('/') followed by bucketARNPrefix. - {[]string{bucketARNPrefix + "/"}, errors.New("Invalid resource style found: ‘arn:aws:s3:::/’, please validate your policy document"), false}, - - // Test cases with valid Resources. - {[]string{bucketARNPrefix + "my-bucket"}, nil, true}, - {[]string{bucketARNPrefix + "my-bucket/Asia/*"}, nil, true}, - {[]string{bucketARNPrefix + "my-bucket/Asia/India/*"}, nil, true}, - } - for i, testCase := range testCases { - err := isValidResources(set.CreateStringSet(testCase.resources...)) - if err != nil && testCase.shouldPass { - t.Errorf("Test %d: Expected to pass, but failed with: %s", i+1, err.Error()) - } - if err == nil && !testCase.shouldPass { - t.Errorf("Test %d: Expected to fail with \"%s\", but passed instead", i+1, testCase.err.Error()) - } - // Failed as expected, but does it fail for the expected reason. - if err != nil && !testCase.shouldPass { - if err.Error() != testCase.err.Error() { - t.Errorf("Test %d: Expected to fail with error \"%s\", but instead failed with error \"%s\"", i+1, testCase.err.Error(), err.Error()) - } - } - } -} - -// Tests validate principals validator. -func TestIsValidPrincipals(t *testing.T) { - testCases := []struct { - // input. - principals []string - // expected output. - err error - // flag indicating whether the test should pass. - shouldPass bool - }{ - // Inputs with unsupported Principals. - // Test case - 1. - // Empty Principals list. - {[]string{}, errors.New("Principal cannot be empty"), false}, - // Test case - 2. - // "*" is the only valid principal. - {[]string{"my-principal"}, errors.New("Unsupported principals found: ‘set.StringSet{\"my-principal\":struct {}{}}’, please validate your policy document"), false}, - // Test case - 3. - {[]string{"*", "111122233"}, errors.New("Unsupported principals found: ‘set.StringSet{\"111122233\":struct {}{}}’, please validate your policy document"), false}, - // Test case - 4. - // Test case with valid principal value. - {[]string{"*"}, nil, true}, - } - for i, testCase := range testCases { - u := policy.User{ - AWS: set.CreateStringSet(testCase.principals...), - } - err := isValidPrincipals(u) - if err != nil && testCase.shouldPass { - t.Errorf("Test %d: Expected to pass, but failed with: %s", i+1, err.Error()) - } - if err == nil && !testCase.shouldPass { - t.Errorf("Test %d: Expected to fail with \"%s\", but passed instead", i+1, testCase.err.Error()) - } - // Failed as expected, but does it fail for the expected reason. - if err != nil && !testCase.shouldPass { - if err.Error() != testCase.err.Error() { - t.Errorf("Test %d: Expected to fail with error \"%s\", but instead failed with error \"%s\"", i+1, testCase.err.Error(), err.Error()) - } - } - } -} - -// getEmptyConditionMap - returns a function that generates a -// condition key map for a given key. -func getEmptyConditionMap(conditionKey string) func() policy.ConditionMap { - emptyConditonGenerator := func() policy.ConditionMap { - emptyMap := make(policy.ConditionKeyMap) - conditions := make(policy.ConditionMap) - conditions[conditionKey] = emptyMap - return conditions - } - return emptyConditonGenerator -} - -// Tests validate policy.Statement condition validator. -func TestIsValidConditions(t *testing.T) { - // returns empty conditions map. - setEmptyConditions := func() policy.ConditionMap { - return make(policy.ConditionMap) - } - - // returns map with the "StringEquals" set to empty map. - setEmptyStringEquals := getEmptyConditionMap("StringEquals") - - // returns map with the "StringNotEquals" set to empty map. - setEmptyStringNotEquals := getEmptyConditionMap("StringNotEquals") - - // returns map with the "StringLike" set to empty map. - setEmptyStringLike := getEmptyConditionMap("StringLike") - - // returns map with the "StringNotLike" set to empty map. - setEmptyStringNotLike := getEmptyConditionMap("StringNotLike") - - // returns map with the "IpAddress" set to empty map. - setEmptyIPAddress := getEmptyConditionMap("IpAddress") - - // returns map with "NotIpAddress" set to empty map. - setEmptyNotIPAddress := getEmptyConditionMap("NotIpAddress") - - // Generate conditions. - generateConditions := func(key1, key2, value string) policy.ConditionMap { - innerMap := make(policy.ConditionKeyMap) - innerMap[key2] = set.CreateStringSet(value) - conditions := make(policy.ConditionMap) - conditions[key1] = innerMap - return conditions - } - - // generate ambigious conditions. - generateAmbigiousConditions := func() policy.ConditionMap { - prefixMap := make(policy.ConditionKeyMap) - prefixMap["s3:prefix"] = set.CreateStringSet("Asia/") - conditions := make(policy.ConditionMap) - conditions["StringEquals"] = prefixMap - conditions["StringNotEquals"] = prefixMap - return conditions - } - - // generate valid and non valid type in the condition map. - generateValidInvalidConditions := func() policy.ConditionMap { - innerMap := make(policy.ConditionKeyMap) - innerMap["s3:prefix"] = set.CreateStringSet("Asia/") - conditions := make(policy.ConditionMap) - conditions["StringEquals"] = innerMap - conditions["InvalidType"] = innerMap - return conditions - } - - // generate valid and invalid keys for valid types in the same condition map. - generateValidInvalidConditionKeys := func() policy.ConditionMap { - innerMapValid := make(policy.ConditionKeyMap) - innerMapValid["s3:prefix"] = set.CreateStringSet("Asia/") - innerMapInValid := make(map[string]set.StringSet) - innerMapInValid["s3:invalid"] = set.CreateStringSet("Asia/") - conditions := make(policy.ConditionMap) - conditions["StringEquals"] = innerMapValid - conditions["StringEquals"] = innerMapInValid - return conditions - } - - // List of Conditions used for test cases. - testConditions := []policy.ConditionMap{ - generateConditions("StringValues", "s3:max-keys", "100"), - generateConditions("StringEquals", "s3:Object", "100"), - generateAmbigiousConditions(), - generateValidInvalidConditions(), - generateValidInvalidConditionKeys(), - setEmptyConditions(), - setEmptyStringEquals(), - setEmptyStringNotEquals(), - setEmptyStringLike(), - setEmptyStringNotLike(), - setEmptyIPAddress(), - setEmptyNotIPAddress(), - generateConditions("StringEquals", "s3:prefix", "Asia/"), - generateConditions("StringEquals", "s3:max-keys", "100"), - generateConditions("StringNotEquals", "s3:prefix", "Asia/"), - generateConditions("StringNotEquals", "s3:max-keys", "100"), - } - - getObjectActionSet := set.CreateStringSet("s3:GetObject") - roBucketActionSet := set.CreateStringSet(readOnlyBucketActions...) - maxKeysConditionErr := fmt.Errorf("Unsupported condition key %s for the given actions %s, "+ - "please validate your policy document", "s3:max-keys", getObjectActionSet) - testCases := []struct { - inputActions set.StringSet - inputCondition policy.ConditionMap - // expected result. - expectedErr error - // flag indicating whether test should pass. - shouldPass bool - }{ - // Malformed conditions. - // Test case - 1. - // "StringValues" is an invalid type. - {roBucketActionSet, testConditions[0], fmt.Errorf("Unsupported condition type 'StringValues', " + - "please validate your policy document"), false}, - // Test case - 2. - // "s3:Object" is an invalid key. - {roBucketActionSet, testConditions[1], fmt.Errorf("Unsupported condition key " + - "'StringEquals', please validate your policy document"), false}, - // Test case - 3. - // Test case with Ambigious conditions set. - {roBucketActionSet, testConditions[2], fmt.Errorf("Ambigious condition values for key 's3:prefix', " + - "please validate your policy document"), false}, - // Test case - 4. - // Test case with valid and invalid condition types. - {roBucketActionSet, testConditions[3], fmt.Errorf("Unsupported condition type 'InvalidType', " + - "please validate your policy document"), false}, - // Test case - 5. - // Test case with valid and invalid condition keys. - {roBucketActionSet, testConditions[4], fmt.Errorf("Unsupported condition key 'StringEquals', " + - "please validate your policy document"), false}, - // Test cases with valid conditions. - // Test case - 6. - {roBucketActionSet, testConditions[5], nil, true}, - // Test case - 7. - {roBucketActionSet, testConditions[6], nil, true}, - // Test case - 8. - {roBucketActionSet, testConditions[7], nil, true}, - // Test case - 9. - {roBucketActionSet, testConditions[8], nil, true}, - // Test case - 10. - {roBucketActionSet, testConditions[9], nil, true}, - // Test case - 11. - {roBucketActionSet, testConditions[10], nil, true}, - // Test case - 12. - {roBucketActionSet, testConditions[11], nil, true}, - // Test case - 13. - {roBucketActionSet, testConditions[12], nil, true}, - // Test case - 11. - {roBucketActionSet, testConditions[13], nil, true}, - // Test case - 12. - {roBucketActionSet, testConditions[14], nil, true}, - // Test case - 13. - {getObjectActionSet, testConditions[15], maxKeysConditionErr, false}, - } - for i, testCase := range testCases { - actualErr := isValidConditions(testCase.inputActions, testCase.inputCondition) - if actualErr != nil && testCase.shouldPass { - t.Errorf("Test %d: Expected to pass, but failed with: %s", i+1, actualErr.Error()) - } - if actualErr == nil && !testCase.shouldPass { - t.Errorf("Test %d: Expected to fail with \"%s\", but passed instead", i+1, testCase.expectedErr.Error()) - } - // Failed as expected, but does it fail for the expected reason. - if actualErr != nil && !testCase.shouldPass { - if actualErr.Error() != testCase.expectedErr.Error() { - t.Errorf("Test %d: Expected to fail with error \"%s\", but instead failed with error \"%s\"", i+1, testCase.expectedErr.Error(), actualErr.Error()) - } - } - } -} - -// Tests validate Policy Action and Resource fields. -func TestCheckbucketPolicyResources(t *testing.T) { - // constructing policy statement without invalidPrefixActions (check bucket-policy-parser.go). - setValidPrefixActions := func(statements []policy.Statement) []policy.Statement { - statements[0].Actions = set.CreateStringSet([]string{"s3:DeleteObject", "s3:PutObject"}...) - return statements - } - // contracting policy statement with recursive resources. - // should result in ErrMalformedPolicy - setRecurseResource := func(statements []policy.Statement) []policy.Statement { - statements[0].Resources = set.CreateStringSet([]string{"arn:aws:s3:::minio-bucket/Asia/*", "arn:aws:s3:::minio-bucket/Asia/India/*"}...) - return statements - } - - // constructing policy statement with lexically close characters. - // should not result in ErrMalformedPolicy - setResourceLexical := func(statements []policy.Statement) []policy.Statement { - statements[0].Resources = set.CreateStringSet([]string{"arn:aws:s3:::minio-bucket/op*", "arn:aws:s3:::minio-bucket/oo*"}...) - return statements - } - - // List of bucketPolicy used for tests. - bucketAccessPolicies := []policy.BucketAccessPolicy{ - // bucketPolicy - 1. - // Contains valid read only policy statement. - {Version: "1.0", Statements: getReadOnlyStatement("minio-bucket", "")}, - // bucketPolicy - 2. - // Contains valid read-write only policy statement. - {Version: "1.0", Statements: getReadWriteStatement("minio-bucket", "Asia/")}, - // bucketPolicy - 3. - // Contains valid write only policy statement. - {Version: "1.0", Statements: getWriteOnlyStatement("minio-bucket", "Asia/India/")}, - // bucketPolicy - 4. - // Contains invalidPrefixActions. - // Since resourcePrefix is not to the bucket-name, it return ErrMalformedPolicy. - {Version: "1.0", Statements: getReadOnlyStatement("minio-bucket-fail", "Asia/India/")}, - // bucketPolicy - 5. - // constructing policy statement without invalidPrefixActions (check bucket-policy-parser.go). - // but bucket part of the resource is not equal to the bucket name. - // this results in return of ErrMalformedPolicy. - {Version: "1.0", Statements: setValidPrefixActions(getWriteOnlyStatement("minio-bucket-fail", "Asia/India/"))}, - // bucketPolicy - 6. - // contracting policy statement with recursive resources. - // should result in ErrMalformedPolicy - {Version: "1.0", Statements: setRecurseResource(setValidPrefixActions(getWriteOnlyStatement("minio-bucket", "")))}, - // BucketPolciy - 7. - // constructing policy statement with non recursive but - // lexically close resources. - // should result in ErrNone. - {Version: "1.0", Statements: setResourceLexical(setValidPrefixActions(getWriteOnlyStatement("minio-bucket", "oo")))}, - } - - testCases := []struct { - inputPolicy policy.BucketAccessPolicy - // expected results. - apiErrCode APIErrorCode - // Flag indicating whether the test should pass. - shouldPass bool - }{ - // Test case - 1. - {bucketAccessPolicies[0], ErrNone, true}, - // Test case - 2. - {bucketAccessPolicies[1], ErrNone, true}, - // Test case - 3. - {bucketAccessPolicies[2], ErrNone, true}, - // Test case - 4. - // contains invalidPrefixActions (check bucket-policy-parser.go). - // Resource prefix will not be equal to the bucket name in this case. - {bucketAccessPolicies[3], ErrMalformedPolicy, false}, - // Test case - 5. - // actions contain invalidPrefixActions (check bucket-policy-parser.go). - // Resource prefix bucket part is not equal to the bucket name in this case. - {bucketAccessPolicies[4], ErrMalformedPolicy, false}, - // Test case - 6. - // contracting policy statement with recursive resources. - // should result in ErrPolicyNesting. - {bucketAccessPolicies[5], ErrPolicyNesting, false}, - // Test case - 7. - // constructing policy statement with lexically close - // characters. - // should result in ErrNone. - {bucketAccessPolicies[6], ErrNone, true}, - } - for i, testCase := range testCases { - apiErrCode := checkBucketPolicyResources("minio-bucket", testCase.inputPolicy) - if apiErrCode != ErrNone && testCase.shouldPass { - t.Errorf("Test %d: Expected to pass, but failed with Errocode %v", i+1, apiErrCode) - } - if apiErrCode == ErrNone && !testCase.shouldPass { - t.Errorf("Test %d: Expected to fail with ErrCode %v, but passed instead", i+1, testCase.apiErrCode) - } - // Failed as expected, but does it fail for the expected reason. - if apiErrCode != ErrNone && !testCase.shouldPass { - if testCase.apiErrCode != apiErrCode { - t.Errorf("Test %d: Expected to fail with error code %v, but instead failed with error code %v", i+1, testCase.apiErrCode, apiErrCode) - } - } - } -} - -// Tests validate parsing of BucketAccessPolicy. -func TestParseBucketPolicy(t *testing.T) { - // set Unsupported Actions. - setUnsupportedActions := func(statements []policy.Statement) []policy.Statement { - // "s3:DeleteEverything"" is an Unsupported Action. - statements[0].Actions = set.CreateStringSet([]string{"s3:GetObject", "s3:ListBucket", "s3:PutObject", "s3:DeleteEverything"}...) - return statements - } - // set unsupported Effect. - setUnsupportedEffect := func(statements []policy.Statement) []policy.Statement { - // Effect "Don't allow" is Unsupported. - statements[0].Effect = "DontAllow" - return statements - } - // set unsupported principals. - setUnsupportedPrincipals := func(statements []policy.Statement) []policy.Statement { - // "User1111"" is an Unsupported Principal. - statements[0].Principal = policy.User{ - AWS: set.CreateStringSet([]string{"*", "User1111"}...), - } - return statements - } - // set unsupported Resources. - setUnsupportedResources := func(statements []policy.Statement) []policy.Statement { - // "s3:DeleteEverything"" is an Unsupported Action. - statements[0].Resources = set.CreateStringSet([]string{"my-resource"}...) - return statements - } - // List of bucketPolicy used for test cases. - bucketAccesPolicies := []policy.BucketAccessPolicy{ - // bucketPolicy - 0. - // bucketPolicy statement empty. - {Version: "1.0"}, - // bucketPolicy - 1. - // bucketPolicy version empty. - {Version: "", Statements: []policy.Statement{}}, - // bucketPolicy - 2. - // Readonly bucketPolicy. - {Version: "1.0", Statements: getReadOnlyStatement("minio-bucket", "")}, - // bucketPolicy - 3. - // Read-Write bucket policy. - {Version: "1.0", Statements: getReadWriteStatement("minio-bucket", "Asia/")}, - // bucketPolicy - 4. - // Write only bucket policy. - {Version: "1.0", Statements: getWriteOnlyStatement("minio-bucket", "Asia/India/")}, - // bucketPolicy - 5. - // bucketPolicy statement contains unsupported action. - {Version: "1.0", Statements: setUnsupportedActions(getReadOnlyStatement("minio-bucket", ""))}, - // bucketPolicy - 6. - // bucketPolicy statement contains unsupported Effect. - {Version: "1.0", Statements: setUnsupportedEffect(getReadWriteStatement("minio-bucket", "Asia/"))}, - // bucketPolicy - 7. - // bucketPolicy statement contains unsupported Principal. - {Version: "1.0", Statements: setUnsupportedPrincipals(getWriteOnlyStatement("minio-bucket", "Asia/India/"))}, - // bucketPolicy - 8. - // bucketPolicy statement contains unsupported Resource. - {Version: "1.0", Statements: setUnsupportedResources(getWriteOnlyStatement("minio-bucket", "Asia/India/"))}, - } - - testCases := []struct { - inputPolicy policy.BucketAccessPolicy - // expected results. - expectedPolicy policy.BucketAccessPolicy - err error - // Flag indicating whether the test should pass. - shouldPass bool - }{ - // Test case - 1. - // bucketPolicy statement empty. - {bucketAccesPolicies[0], policy.BucketAccessPolicy{}, errors.New("Policy statement cannot be empty"), false}, - // Test case - 2. - // bucketPolicy version empty. - {bucketAccesPolicies[1], policy.BucketAccessPolicy{}, errors.New("Policy version cannot be empty"), false}, - // Test case - 3. - // Readonly bucketPolicy. - {bucketAccesPolicies[2], bucketAccesPolicies[2], nil, true}, - // Test case - 4. - // Read-Write bucket policy. - {bucketAccesPolicies[3], bucketAccesPolicies[3], nil, true}, - // Test case - 5. - // Write only bucket policy. - {bucketAccesPolicies[4], bucketAccesPolicies[4], nil, true}, - // Test case - 6. - // bucketPolicy statement contains unsupported action. - {bucketAccesPolicies[5], bucketAccesPolicies[5], fmt.Errorf("Unsupported actions found: ‘set.StringSet{\"s3:DeleteEverything\":struct {}{}}’, please validate your policy document"), false}, - // Test case - 7. - // bucketPolicy statement contains unsupported Effect. - {bucketAccesPolicies[6], bucketAccesPolicies[6], fmt.Errorf("Unsupported Effect found: ‘DontAllow’, please validate your policy document"), false}, - // Test case - 8. - // bucketPolicy statement contains unsupported Principal. - {bucketAccesPolicies[7], bucketAccesPolicies[7], fmt.Errorf("Unsupported principals found: ‘set.StringSet{\"User1111\":struct {}{}}’, please validate your policy document"), false}, - // Test case - 9. - // bucketPolicy statement contains unsupported Resource. - {bucketAccesPolicies[8], bucketAccesPolicies[8], fmt.Errorf("Unsupported resource style found: ‘my-resource’, please validate your policy document"), false}, - } - for i, testCase := range testCases { - var buffer bytes.Buffer - encoder := json.NewEncoder(&buffer) - err := encoder.Encode(testCase.inputPolicy) - if err != nil { - t.Fatalf("Test %d: Couldn't Marshal bucket policy %s", i+1, err) - } - - var actualAccessPolicy = policy.BucketAccessPolicy{} - err = parseBucketPolicy(&buffer, &actualAccessPolicy) - if err != nil && testCase.shouldPass { - t.Errorf("Test %d: Expected to pass, but failed with: %s", i+1, err.Error()) - } - if err == nil && !testCase.shouldPass { - t.Errorf("Test %d: Expected to fail with \"%s\", but passed instead", i+1, testCase.err.Error()) - } - // Failed as expected, but does it fail for the expected reason. - if err != nil && !testCase.shouldPass { - if err.Error() != testCase.err.Error() { - t.Errorf("Test %d: Expected to fail with error \"%s\", but instead failed with error \"%s\"", i+1, testCase.err.Error(), err.Error()) - } - } - // Test passes as expected, but the output values are verified for correctness here. - if err == nil && testCase.shouldPass { - if !reflect.DeepEqual(testCase.expectedPolicy, actualAccessPolicy) { - t.Errorf("Test %d: The expected statements from resource statement generator doesn't match the actual statements", i+1) - } - } - } -} - -func TestAWSRefererCondition(t *testing.T) { - resource := set.CreateStringSet([]string{ - fmt.Sprintf("%s%s", bucketARNPrefix, "minio-bucket"+"/"+"Asia"+"*"), - }...) - - conditionsKeyMap := make(policy.ConditionKeyMap) - conditionsKeyMap.Add("aws:Referer", - set.CreateStringSet("www.example.com", - "http://www.example.com")) - - requestConditionMap := make(policy.ConditionKeyMap) - requestConditionMap["referer"] = set.CreateStringSet("www.example.com") - - testCases := []struct { - effect string - conditionKey string - match bool - }{ - { - effect: "Allow", - conditionKey: "StringLike", - match: true, - }, - { - effect: "Allow", - conditionKey: "StringNotLike", - match: false, - }, - { - effect: "Deny", - conditionKey: "StringLike", - match: true, - }, - { - effect: "Deny", - conditionKey: "StringNotLike", - match: false, - }, - } - - for i, test := range testCases { - conditions := make(policy.ConditionMap) - conditions[test.conditionKey] = conditionsKeyMap - - allowStatement := policy.Statement{ - Sid: "Testing AWS referer condition", - Effect: test.effect, - Principal: policy.User{ - AWS: set.CreateStringSet("*"), - }, - Resources: resource, - Conditions: conditions, - } - - if result := bucketPolicyConditionMatch(requestConditionMap, allowStatement); result != test.match { - t.Errorf("Test %d - Expected conditons to evaluate to %v but got %v", - i+1, test.match, result) - } - } -} - -func TestAWSSourceIPCondition(t *testing.T) { - resource := set.CreateStringSet([]string{ - fmt.Sprintf("%s%s", bucketARNPrefix, "minio-bucket"+"/"+"Asia"+"*"), - }...) - - conditionsKeyMap := make(policy.ConditionKeyMap) - // Test both IPv4 and IPv6 addresses. - conditionsKeyMap.Add("aws:SourceIp", - set.CreateStringSet("54.240.143.0/24", - "2001:DB8:1234:5678::/64")) - - requestConditionMap := make(policy.ConditionKeyMap) - requestConditionMap["ip"] = set.CreateStringSet("54.240.143.2") - - testCases := []struct { - effect string - conditionKey string - match bool - }{ - { - effect: "Allow", - conditionKey: "IpAddress", - match: true, - }, - { - effect: "Allow", - conditionKey: "NotIpAddress", - match: false, - }, - { - effect: "Deny", - conditionKey: "IpAddress", - match: true, - }, - { - effect: "Deny", - conditionKey: "NotIpAddress", - match: false, - }, - } - - for i, test := range testCases { - conditions := make(policy.ConditionMap) - conditions[test.conditionKey] = conditionsKeyMap - - allowStatement := policy.Statement{ - Sid: "Testing AWS referer condition", - Effect: test.effect, - Principal: policy.User{ - AWS: set.CreateStringSet("*"), - }, - Resources: resource, - Conditions: conditions, - } - - if result := bucketPolicyConditionMatch(requestConditionMap, allowStatement); result != test.match { - t.Errorf("Test %d - Expected conditons to evaluate to %v but got %v", - i+1, test.match, result) - } - } -} diff --git a/cmd/bucket-policy.go b/cmd/bucket-policy.go deleted file mode 100644 index ad1b978c9..000000000 --- a/cmd/bucket-policy.go +++ /dev/null @@ -1,206 +0,0 @@ -/* - * Minio Cloud Storage, (C) 2015, 2016, 2017, 2018 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package cmd - -import ( - "bytes" - "context" - "encoding/json" - "io" - "reflect" - "sync" - - "github.com/minio/minio-go/pkg/policy" - "github.com/minio/minio/cmd/logger" - "github.com/minio/minio/pkg/hash" -) - -const ( - // Static prefix to be used while constructing bucket ARN. - // refer to S3 docs for more info. - bucketARNPrefix = "arn:aws:s3:::" - - // Bucket policy config name. - bucketPolicyConfig = "policy.json" -) - -// Global bucket policies list, policies are enforced on each bucket looking -// through the policies here. -type bucketPolicies struct { - rwMutex *sync.RWMutex - - // Collection of 'bucket' policies. - bucketPolicyConfigs map[string]policy.BucketAccessPolicy -} - -// Fetch bucket policy for a given bucket. -func (bp bucketPolicies) GetBucketPolicy(bucket string) policy.BucketAccessPolicy { - bp.rwMutex.RLock() - defer bp.rwMutex.RUnlock() - return bp.bucketPolicyConfigs[bucket] -} - -// Set a new bucket policy for a bucket, this operation will overwrite -// any previous bucket policies for the bucket. -func (bp *bucketPolicies) SetBucketPolicy(bucket string, newpolicy policy.BucketAccessPolicy) error { - bp.rwMutex.Lock() - defer bp.rwMutex.Unlock() - - if reflect.DeepEqual(newpolicy, emptyBucketPolicy) { - return errInvalidArgument - } - bp.bucketPolicyConfigs[bucket] = newpolicy - - return nil -} - -// Delete bucket policy from struct for a given bucket. -func (bp *bucketPolicies) DeleteBucketPolicy(bucket string) error { - bp.rwMutex.Lock() - defer bp.rwMutex.Unlock() - delete(bp.bucketPolicyConfigs, bucket) - return nil -} - -// Intialize all bucket policies. -func initBucketPolicies(objAPI ObjectLayer) (*bucketPolicies, error) { - if objAPI == nil { - return nil, errInvalidArgument - } - - // List buckets to proceed loading all notification configuration. - buckets, err := objAPI.ListBuckets(context.Background()) - if err != nil { - return nil, err - } - - policies := make(map[string]policy.BucketAccessPolicy) - // Loads bucket policy. - for _, bucket := range buckets { - bp, pErr := ReadBucketPolicy(bucket.Name, objAPI) - if pErr != nil { - // net.Dial fails for rpc client or any - // other unexpected errors during net.Dial. - if !IsErrIgnored(pErr, errDiskNotFound) { - if !isErrBucketPolicyNotFound(pErr) { - return nil, pErr - } - } - // Continue to load other bucket policies if possible. - continue - } - policies[bucket.Name] = bp - } - - // Return all bucket policies. - return &bucketPolicies{ - rwMutex: &sync.RWMutex{}, - bucketPolicyConfigs: policies, - }, nil -} - -// readBucketPolicyJSON - reads bucket policy for an input bucket, returns BucketPolicyNotFound -// if bucket policy is not found. -func readBucketPolicyJSON(bucket string, objAPI ObjectLayer) (bucketPolicyReader io.Reader, err error) { - policyPath := pathJoin(bucketConfigPrefix, bucket, bucketPolicyConfig) - - var buffer bytes.Buffer - ctx := logger.SetReqInfo(context.Background(), &logger.ReqInfo{BucketName: bucket}) - err = objAPI.GetObject(ctx, minioMetaBucket, policyPath, 0, -1, &buffer, "") - if err != nil { - if isErrObjectNotFound(err) || isErrIncompleteBody(err) { - return nil, PolicyNotFound{Bucket: bucket} - } - return nil, err - } - - return &buffer, nil -} - -// ReadBucketPolicy - reads bucket policy for an input bucket, returns BucketPolicyNotFound -// if bucket policy is not found. This function also parses the bucket policy into an object. -func ReadBucketPolicy(bucket string, objAPI ObjectLayer) (policy.BucketAccessPolicy, error) { - // Read bucket policy JSON. - bucketPolicyReader, err := readBucketPolicyJSON(bucket, objAPI) - if err != nil { - return emptyBucketPolicy, err - } - - // Parse the saved policy. - var bp policy.BucketAccessPolicy - if err = parseBucketPolicy(bucketPolicyReader, &bp); err != nil { - return emptyBucketPolicy, err - - } - return bp, nil -} - -// removeBucketPolicy - removes any previously written bucket policy. Returns BucketPolicyNotFound -// if no policies are found. -func removeBucketPolicy(ctx context.Context, bucket string, objAPI ObjectLayer) error { - policyPath := pathJoin(bucketConfigPrefix, bucket, bucketPolicyConfig) - err := objAPI.DeleteObject(ctx, minioMetaBucket, policyPath) - if err != nil { - if _, ok := err.(ObjectNotFound); ok { - return BucketPolicyNotFound{Bucket: bucket} - } - return err - } - return nil -} - -// writeBucketPolicy - save a bucket policy that is assumed to be validated. -func writeBucketPolicy(ctx context.Context, bucket string, objAPI ObjectLayer, bpy policy.BucketAccessPolicy) error { - buf, err := json.Marshal(bpy) - - if err != nil { - logger.LogIf(ctx, err) - return err - } - policyPath := pathJoin(bucketConfigPrefix, bucket, bucketPolicyConfig) - hashReader, err := hash.NewReader(bytes.NewReader(buf), int64(len(buf)), "", getSHA256Hash(buf)) - if err != nil { - logger.LogIf(ctx, err) - return err - } - - if _, err = objAPI.PutObject(ctx, minioMetaBucket, policyPath, hashReader, nil); err != nil { - return err - } - return nil -} - -// persistAndNotifyBucketPolicyChange - takes a policyChange argument, -// persists it to storage, and notify nodes in the cluster about the -// change. In-memory state is updated in response to the notification. -func persistAndNotifyBucketPolicyChange(ctx context.Context, bucket string, isRemove bool, bktPolicy policy.BucketAccessPolicy, objAPI ObjectLayer) error { - if isRemove { - err := removeBucketPolicy(ctx, bucket, objAPI) - if err != nil { - return err - } - } else { - if reflect.DeepEqual(bktPolicy, emptyBucketPolicy) { - return errInvalidArgument - } - if err := writeBucketPolicy(ctx, bucket, objAPI, bktPolicy); err != nil { - return err - } - } - - return nil -} diff --git a/cmd/fs-v1.go b/cmd/fs-v1.go index 9ccb61e19..72a22ebc1 100644 --- a/cmd/fs-v1.go +++ b/cmd/fs-v1.go @@ -24,16 +24,15 @@ import ( "io/ioutil" "os" "path" - "reflect" "sort" "sync" "time" - "github.com/minio/minio-go/pkg/policy" "github.com/minio/minio/cmd/logger" "github.com/minio/minio/pkg/hash" "github.com/minio/minio/pkg/lock" "github.com/minio/minio/pkg/madmin" + "github.com/minio/minio/pkg/policy" ) // FSObjects - Implements fs object layer. @@ -60,9 +59,6 @@ type FSObjects struct { // To manage the appendRoutine go-routines nsMutex *nsLockMap - - // Variable represents bucket policies in memory. - bucketPolicies *bucketPolicies } // Represents the background append file. @@ -139,15 +135,14 @@ func NewFSObjectLayer(fsPath string) (ObjectLayer, error) { // or cause changes on backend format. fs.fsFormatRlk = rlk - // Initialize and load bucket policies. - fs.bucketPolicies, err = initBucketPolicies(fs) - if err != nil { - return nil, fmt.Errorf("Unable to load all bucket policies. %s", err) - } - // Initialize notification system. if err = globalNotificationSys.Init(fs); err != nil { - return nil, fmt.Errorf("Unable to initialize event notification. %s", err) + return nil, fmt.Errorf("Unable to initialize notification system. %v", err) + } + + // Initialize policy system. + if err = globalPolicySys.Init(fs); err != nil { + return nil, fmt.Errorf("Unable to initialize policy system. %v", err) } go fs.cleanupStaleMultipartUploads(ctx, globalMultipartCleanupInterval, globalMultipartExpiry, globalServiceDoneCh) @@ -1058,22 +1053,18 @@ func (fs *FSObjects) ListBucketsHeal(ctx context.Context) ([]BucketInfo, error) } // SetBucketPolicy sets policy on bucket -func (fs *FSObjects) SetBucketPolicy(ctx context.Context, bucket string, policy policy.BucketAccessPolicy) error { - return persistAndNotifyBucketPolicyChange(ctx, bucket, false, policy, fs) +func (fs *FSObjects) SetBucketPolicy(ctx context.Context, bucket string, policy *policy.Policy) error { + return savePolicyConfig(fs, bucket, policy) } // GetBucketPolicy will get policy on bucket -func (fs *FSObjects) GetBucketPolicy(ctx context.Context, bucket string) (policy.BucketAccessPolicy, error) { - policy := fs.bucketPolicies.GetBucketPolicy(bucket) - if reflect.DeepEqual(policy, emptyBucketPolicy) { - return ReadBucketPolicy(bucket, fs) - } - return policy, nil +func (fs *FSObjects) GetBucketPolicy(ctx context.Context, bucket string) (*policy.Policy, error) { + return GetPolicyConfig(fs, bucket) } // DeleteBucketPolicy deletes all policies on bucket func (fs *FSObjects) DeleteBucketPolicy(ctx context.Context, bucket string) error { - return persistAndNotifyBucketPolicyChange(ctx, bucket, true, emptyBucketPolicy, fs) + return removePolicyConfig(ctx, fs, bucket) } // ListObjectsV2 lists all blobs in bucket filtered by prefix @@ -1093,19 +1084,6 @@ func (fs *FSObjects) ListObjectsV2(ctx context.Context, bucket, prefix, continua return listObjectsV2Info, err } -// RefreshBucketPolicy refreshes cache policy with what's on disk. -func (fs *FSObjects) RefreshBucketPolicy(ctx context.Context, bucket string) error { - policy, err := ReadBucketPolicy(bucket, fs) - - if err != nil { - if reflect.DeepEqual(policy, emptyBucketPolicy) { - return fs.bucketPolicies.DeleteBucketPolicy(bucket) - } - return err - } - return fs.bucketPolicies.SetBucketPolicy(bucket, policy) -} - // IsNotificationSupported returns whether bucket notification is applicable for this layer. func (fs *FSObjects) IsNotificationSupported() bool { return true diff --git a/cmd/gateway-common.go b/cmd/gateway-common.go index 2d15abc6f..2e35f5187 100644 --- a/cmd/gateway-common.go +++ b/cmd/gateway-common.go @@ -281,7 +281,7 @@ func ErrorRespToObjectError(err error, params ...string) error { case "BucketNotEmpty": err = BucketNotEmpty{} case "NoSuchBucketPolicy": - err = PolicyNotFound{} + err = BucketPolicyNotFound{} case "InvalidBucketName": err = BucketNameInvalid{Bucket: bucket} case "NoSuchBucket": diff --git a/cmd/gateway-main.go b/cmd/gateway-main.go index dec5c95ba..c4b969ee1 100644 --- a/cmd/gateway-main.go +++ b/cmd/gateway-main.go @@ -167,9 +167,12 @@ func StartGateway(ctx *cli.Context, gw Gateway) { initNSLock(false) // Enable local namespace lock. - // Initialize notification system. + // Create new notification system. globalNotificationSys, err = NewNotificationSys(globalServerConfig, EndpointList{}) - logger.FatalIf(err, "Unable to initialize notification system.") + logger.FatalIf(err, "Unable to create new notification system.") + + // Create new policy system. + globalPolicySys = NewPolicySys() newObject, err := gw.NewGatewayLayer(globalServerConfig.GetCredential()) logger.FatalIf(err, "Unable to initialize gateway layer") diff --git a/cmd/gateway-unsupported.go b/cmd/gateway-unsupported.go index 5e2d5a14f..710e819a4 100644 --- a/cmd/gateway-unsupported.go +++ b/cmd/gateway-unsupported.go @@ -20,10 +20,10 @@ import ( "context" "time" - "github.com/minio/minio-go/pkg/policy" "github.com/minio/minio/cmd/logger" "github.com/minio/minio/pkg/hash" "github.com/minio/minio/pkg/madmin" + "github.com/minio/minio/pkg/policy" ) // GatewayUnsupported list of unsupported call stubs for gateway. @@ -72,15 +72,15 @@ func (a GatewayUnsupported) CompleteMultipartUpload(ctx context.Context, bucket } // SetBucketPolicy sets policy on bucket -func (a GatewayUnsupported) SetBucketPolicy(ctx context.Context, bucket string, policyInfo policy.BucketAccessPolicy) error { +func (a GatewayUnsupported) SetBucketPolicy(ctx context.Context, bucket string, bucketPolicy *policy.Policy) error { logger.LogIf(ctx, NotImplemented{}) return NotImplemented{} } // GetBucketPolicy will get policy on bucket -func (a GatewayUnsupported) GetBucketPolicy(ctx context.Context, bucket string) (bal policy.BucketAccessPolicy, err error) { +func (a GatewayUnsupported) GetBucketPolicy(ctx context.Context, bucket string) (bucketPolicy *policy.Policy, err error) { logger.LogIf(ctx, NotImplemented{}) - return bal, NotImplemented{} + return nil, NotImplemented{} } // DeleteBucketPolicy deletes all policies on bucket diff --git a/cmd/gateway/azure/gateway-azure.go b/cmd/gateway/azure/gateway-azure.go index 7c8f4b1b7..c56c1b882 100644 --- a/cmd/gateway/azure/gateway-azure.go +++ b/cmd/gateway/azure/gateway-azure.go @@ -34,10 +34,12 @@ import ( "github.com/Azure/azure-sdk-for-go/storage" humanize "github.com/dustin/go-humanize" "github.com/minio/cli" - "github.com/minio/minio-go/pkg/policy" + miniogopolicy "github.com/minio/minio-go/pkg/policy" "github.com/minio/minio/cmd/logger" "github.com/minio/minio/pkg/auth" "github.com/minio/minio/pkg/hash" + "github.com/minio/minio/pkg/policy" + "github.com/minio/minio/pkg/policy/condition" sha256 "github.com/minio/sha256-simd" minio "github.com/minio/minio/cmd" @@ -1020,10 +1022,16 @@ func (a *azureObjects) CompleteMultipartUpload(ctx context.Context, bucket, obje // storage.ContainerAccessTypePrivate - none in minio terminology // As the common denominator for minio and azure is readonly and none, we support // these two policies at the bucket level. -func (a *azureObjects) SetBucketPolicy(ctx context.Context, bucket string, policyInfo policy.BucketAccessPolicy) error { - var policies []minio.BucketAccessPolicy +func (a *azureObjects) SetBucketPolicy(ctx context.Context, bucket string, bucketPolicy *policy.Policy) error { + policyInfo, err := minio.PolicyToBucketAccessPolicy(bucketPolicy) + if err != nil { + // This should not happen. + logger.LogIf(ctx, err) + return azureToObjectError(err, bucket) + } - for prefix, policy := range policy.GetPolicies(policyInfo.Statements, bucket, "") { + var policies []minio.BucketAccessPolicy + for prefix, policy := range miniogopolicy.GetPolicies(policyInfo.Statements, bucket, "") { policies = append(policies, minio.BucketAccessPolicy{ Prefix: prefix, Policy: policy, @@ -1038,7 +1046,7 @@ func (a *azureObjects) SetBucketPolicy(ctx context.Context, bucket string, polic logger.LogIf(ctx, minio.NotImplemented{}) return minio.NotImplemented{} } - if policies[0].Policy != policy.BucketPolicyReadOnly { + if policies[0].Policy != miniogopolicy.BucketPolicyReadOnly { logger.LogIf(ctx, minio.NotImplemented{}) return minio.NotImplemented{} } @@ -1047,31 +1055,47 @@ func (a *azureObjects) SetBucketPolicy(ctx context.Context, bucket string, polic AccessPolicies: nil, } container := a.client.GetContainerReference(bucket) - err := container.SetPermissions(perm, nil) + err = container.SetPermissions(perm, nil) logger.LogIf(ctx, err) return azureToObjectError(err, bucket) } // GetBucketPolicy - Get the container ACL and convert it to canonical []bucketAccessPolicy -func (a *azureObjects) GetBucketPolicy(ctx context.Context, bucket string) (policy.BucketAccessPolicy, error) { - policyInfo := policy.BucketAccessPolicy{Version: "2012-10-17"} +func (a *azureObjects) GetBucketPolicy(ctx context.Context, bucket string) (*policy.Policy, error) { container := a.client.GetContainerReference(bucket) perm, err := container.GetPermissions(nil) if err != nil { logger.LogIf(ctx, err) - return policy.BucketAccessPolicy{}, azureToObjectError(err, bucket) + return nil, azureToObjectError(err, bucket) } - switch perm.AccessType { - case storage.ContainerAccessTypePrivate: - logger.LogIf(ctx, minio.PolicyNotFound{Bucket: bucket}) - return policy.BucketAccessPolicy{}, minio.PolicyNotFound{Bucket: bucket} - case storage.ContainerAccessTypeContainer: - policyInfo.Statements = policy.SetPolicy(policyInfo.Statements, policy.BucketPolicyReadOnly, bucket, "") - default: + + if perm.AccessType == storage.ContainerAccessTypePrivate { + logger.LogIf(ctx, minio.BucketPolicyNotFound{Bucket: bucket}) + return nil, minio.BucketPolicyNotFound{Bucket: bucket} + } else if perm.AccessType != storage.ContainerAccessTypeContainer { logger.LogIf(ctx, minio.NotImplemented{}) - return policy.BucketAccessPolicy{}, azureToObjectError(minio.NotImplemented{}) + return nil, azureToObjectError(minio.NotImplemented{}) } - return policyInfo, nil + + return &policy.Policy{ + Version: policy.DefaultVersion, + Statements: []policy.Statement{ + policy.NewStatement( + policy.Allow, + policy.NewPrincipal("*"), + policy.NewActionSet( + policy.GetBucketLocationAction, + policy.ListBucketAction, + policy.GetObjectAction, + ), + policy.NewResourceSet( + policy.NewResource(bucket, ""), + policy.NewResource(bucket, "*"), + ), + condition.NewFunctions(), + ), + }, + }, nil } // DeleteBucketPolicy - Set the container ACL to "private" diff --git a/cmd/gateway/b2/gateway-b2.go b/cmd/gateway/b2/gateway-b2.go index e87d12d35..2a02159cb 100644 --- a/cmd/gateway/b2/gateway-b2.go +++ b/cmd/gateway/b2/gateway-b2.go @@ -30,10 +30,12 @@ import ( b2 "github.com/minio/blazer/base" "github.com/minio/cli" - "github.com/minio/minio-go/pkg/policy" + miniogopolicy "github.com/minio/minio-go/pkg/policy" "github.com/minio/minio/cmd/logger" "github.com/minio/minio/pkg/auth" h2 "github.com/minio/minio/pkg/hash" + "github.com/minio/minio/pkg/policy" + "github.com/minio/minio/pkg/policy/condition" minio "github.com/minio/minio/cmd" ) @@ -722,10 +724,15 @@ func (l *b2Objects) CompleteMultipartUpload(ctx context.Context, bucket string, // bucketType.AllPublic - bucketTypeReadOnly means that anybody can download the files is the bucket; // bucketType.AllPrivate - bucketTypePrivate means that you need an authorization token to download them. // Default is AllPrivate for all buckets. -func (l *b2Objects) SetBucketPolicy(ctx context.Context, bucket string, policyInfo policy.BucketAccessPolicy) error { - var policies []minio.BucketAccessPolicy +func (l *b2Objects) SetBucketPolicy(ctx context.Context, bucket string, bucketPolicy *policy.Policy) error { + policyInfo, err := minio.PolicyToBucketAccessPolicy(bucketPolicy) + if err != nil { + // This should not happen. + return b2ToObjectError(err, bucket) + } - for prefix, policy := range policy.GetPolicies(policyInfo.Statements, bucket, "") { + var policies []minio.BucketAccessPolicy + for prefix, policy := range miniogopolicy.GetPolicies(policyInfo.Statements, bucket, "") { policies = append(policies, minio.BucketAccessPolicy{ Prefix: prefix, Policy: policy, @@ -740,7 +747,7 @@ func (l *b2Objects) SetBucketPolicy(ctx context.Context, bucket string, policyIn logger.LogIf(ctx, minio.NotImplemented{}) return minio.NotImplemented{} } - if policies[0].Policy != policy.BucketPolicyReadOnly { + if policies[0].Policy != miniogopolicy.BucketPolicyReadOnly { logger.LogIf(ctx, minio.NotImplemented{}) return minio.NotImplemented{} } @@ -756,21 +763,39 @@ func (l *b2Objects) SetBucketPolicy(ctx context.Context, bucket string, policyIn // GetBucketPolicy, returns the current bucketType from B2 backend and convert // it into S3 compatible bucket policy info. -func (l *b2Objects) GetBucketPolicy(ctx context.Context, bucket string) (policy.BucketAccessPolicy, error) { - policyInfo := policy.BucketAccessPolicy{Version: "2012-10-17"} +func (l *b2Objects) GetBucketPolicy(ctx context.Context, bucket string) (*policy.Policy, error) { bkt, err := l.Bucket(ctx, bucket) if err != nil { - return policyInfo, err - } - if bkt.Type == bucketTypeReadOnly { - policyInfo.Statements = policy.SetPolicy(policyInfo.Statements, policy.BucketPolicyReadOnly, bucket, "") - return policyInfo, nil + return nil, err } + // bkt.Type can also be snapshot, but it is only allowed through B2 browser console, // just return back as policy not found for all cases. // CreateBucket always sets the value to allPrivate by default. - logger.LogIf(ctx, minio.PolicyNotFound{Bucket: bucket}) - return policy.BucketAccessPolicy{}, minio.PolicyNotFound{Bucket: bucket} + if bkt.Type != bucketTypeReadOnly { + logger.LogIf(ctx, minio.BucketPolicyNotFound{Bucket: bucket}) + return nil, minio.BucketPolicyNotFound{Bucket: bucket} + } + + return &policy.Policy{ + Version: policy.DefaultVersion, + Statements: []policy.Statement{ + policy.NewStatement( + policy.Allow, + policy.NewPrincipal("*"), + policy.NewActionSet( + policy.GetBucketLocationAction, + policy.ListBucketAction, + policy.GetObjectAction, + ), + policy.NewResourceSet( + policy.NewResource(bucket, ""), + policy.NewResource(bucket, "*"), + ), + condition.NewFunctions(), + ), + }, + }, nil } // DeleteBucketPolicy - resets the bucketType of bucket on B2 to 'allPrivate'. diff --git a/cmd/gateway/gcs/gateway-gcs.go b/cmd/gateway/gcs/gateway-gcs.go index e64f99dd9..3c2e5c693 100644 --- a/cmd/gateway/gcs/gateway-gcs.go +++ b/cmd/gateway/gcs/gateway-gcs.go @@ -33,10 +33,12 @@ import ( "cloud.google.com/go/storage" humanize "github.com/dustin/go-humanize" "github.com/minio/cli" - "github.com/minio/minio-go/pkg/policy" + miniogopolicy "github.com/minio/minio-go/pkg/policy" "github.com/minio/minio/cmd/logger" "github.com/minio/minio/pkg/auth" "github.com/minio/minio/pkg/hash" + "github.com/minio/minio/pkg/policy" + "github.com/minio/minio/pkg/policy/condition" "google.golang.org/api/googleapi" "google.golang.org/api/iterator" @@ -1132,10 +1134,16 @@ func (l *gcsGateway) CompleteMultipartUpload(ctx context.Context, bucket string, } // SetBucketPolicy - Set policy on bucket -func (l *gcsGateway) SetBucketPolicy(ctx context.Context, bucket string, policyInfo policy.BucketAccessPolicy) error { - var policies []minio.BucketAccessPolicy +func (l *gcsGateway) SetBucketPolicy(ctx context.Context, bucket string, bucketPolicy *policy.Policy) error { + policyInfo, err := minio.PolicyToBucketAccessPolicy(bucketPolicy) + if err != nil { + // This should not happen. + logger.LogIf(ctx, err) + return gcsToObjectError(err, bucket) + } - for prefix, policy := range policy.GetPolicies(policyInfo.Statements, bucket, "") { + var policies []minio.BucketAccessPolicy + for prefix, policy := range miniogopolicy.GetPolicies(policyInfo.Statements, bucket, "") { policies = append(policies, minio.BucketAccessPolicy{ Prefix: prefix, Policy: policy, @@ -1154,7 +1162,7 @@ func (l *gcsGateway) SetBucketPolicy(ctx context.Context, bucket string, policyI } acl := l.client.Bucket(bucket).ACL() - if policies[0].Policy == policy.BucketPolicyNone { + if policies[0].Policy == miniogopolicy.BucketPolicyNone { if err := acl.Delete(l.ctx, storage.AllUsers); err != nil { logger.LogIf(ctx, err) return gcsToObjectError(err, bucket) @@ -1164,9 +1172,9 @@ func (l *gcsGateway) SetBucketPolicy(ctx context.Context, bucket string, policyI var role storage.ACLRole switch policies[0].Policy { - case policy.BucketPolicyReadOnly: + case miniogopolicy.BucketPolicyReadOnly: role = storage.RoleReader - case policy.BucketPolicyWriteOnly: + case miniogopolicy.BucketPolicyWriteOnly: role = storage.RoleWriter default: logger.LogIf(ctx, minio.NotImplemented{}) @@ -1182,30 +1190,63 @@ func (l *gcsGateway) SetBucketPolicy(ctx context.Context, bucket string, policyI } // GetBucketPolicy - Get policy on bucket -func (l *gcsGateway) GetBucketPolicy(ctx context.Context, bucket string) (policy.BucketAccessPolicy, error) { +func (l *gcsGateway) GetBucketPolicy(ctx context.Context, bucket string) (*policy.Policy, error) { rules, err := l.client.Bucket(bucket).ACL().List(l.ctx) if err != nil { logger.LogIf(ctx, err) - return policy.BucketAccessPolicy{}, gcsToObjectError(err, bucket) + return nil, gcsToObjectError(err, bucket) } - policyInfo := policy.BucketAccessPolicy{Version: "2012-10-17"} + + var readOnly, writeOnly bool for _, r := range rules { if r.Entity != storage.AllUsers || r.Role == storage.RoleOwner { continue } + switch r.Role { case storage.RoleReader: - policyInfo.Statements = policy.SetPolicy(policyInfo.Statements, policy.BucketPolicyReadOnly, bucket, "") + readOnly = true case storage.RoleWriter: - policyInfo.Statements = policy.SetPolicy(policyInfo.Statements, policy.BucketPolicyWriteOnly, bucket, "") + writeOnly = true } } - // Return NoSuchBucketPolicy error, when policy is not set - if len(policyInfo.Statements) == 0 { - logger.LogIf(ctx, minio.PolicyNotFound{}) - return policy.BucketAccessPolicy{}, gcsToObjectError(minio.PolicyNotFound{}, bucket) + + actionSet := policy.NewActionSet() + if readOnly { + actionSet.Add(policy.GetBucketLocationAction) + actionSet.Add(policy.ListBucketAction) + actionSet.Add(policy.GetObjectAction) } - return policyInfo, nil + if writeOnly { + actionSet.Add(policy.GetBucketLocationAction) + actionSet.Add(policy.ListBucketMultipartUploadsAction) + actionSet.Add(policy.AbortMultipartUploadAction) + actionSet.Add(policy.DeleteObjectAction) + actionSet.Add(policy.ListMultipartUploadPartsAction) + actionSet.Add(policy.PutObjectAction) + } + + // Return NoSuchBucketPolicy error, when policy is not set + if len(actionSet) == 0 { + logger.LogIf(ctx, minio.BucketPolicyNotFound{}) + return nil, gcsToObjectError(minio.BucketPolicyNotFound{}, bucket) + } + + return &policy.Policy{ + Version: policy.DefaultVersion, + Statements: []policy.Statement{ + policy.NewStatement( + policy.Allow, + policy.NewPrincipal("*"), + actionSet, + policy.NewResourceSet( + policy.NewResource(bucket, ""), + policy.NewResource(bucket, "*"), + ), + condition.NewFunctions(), + ), + }, + }, nil } // DeleteBucketPolicy - Delete all policies on bucket diff --git a/cmd/gateway/nas/gateway-nas.go b/cmd/gateway/nas/gateway-nas.go index a3b5d72fe..9a5c67be4 100644 --- a/cmd/gateway/nas/gateway-nas.go +++ b/cmd/gateway/nas/gateway-nas.go @@ -20,9 +20,9 @@ import ( "context" "github.com/minio/cli" - "github.com/minio/minio-go/pkg/policy" minio "github.com/minio/minio/cmd" "github.com/minio/minio/pkg/auth" + "github.com/minio/minio/pkg/policy" ) const ( @@ -132,6 +132,6 @@ func (l *nasObjects) IsNotificationSupported() bool { } // GetBucketPolicy will get policy on bucket -func (l *nasObjects) GetBucketPolicy(ctx context.Context, bucket string) (policy.BucketAccessPolicy, error) { - return minio.ReadBucketPolicy(bucket, l) +func (l *nasObjects) GetBucketPolicy(ctx context.Context, bucket string) (*policy.Policy, error) { + return minio.GetPolicyConfig(l, bucket) } diff --git a/cmd/gateway/oss/gateway-oss.go b/cmd/gateway/oss/gateway-oss.go index b7bd9100e..7b554a122 100644 --- a/cmd/gateway/oss/gateway-oss.go +++ b/cmd/gateway/oss/gateway-oss.go @@ -30,11 +30,13 @@ import ( "github.com/dustin/go-humanize" "github.com/minio/cli" - "github.com/minio/minio-go/pkg/policy" + miniogopolicy "github.com/minio/minio-go/pkg/policy" minio "github.com/minio/minio/cmd" "github.com/minio/minio/cmd/logger" "github.com/minio/minio/pkg/auth" "github.com/minio/minio/pkg/hash" + "github.com/minio/minio/pkg/policy" + "github.com/minio/minio/pkg/policy/condition" ) const ( @@ -962,8 +964,14 @@ func (l *ossObjects) CompleteMultipartUpload(ctx context.Context, bucket, object // oss.ACLPublicReadWrite: readwrite in minio terminology // oss.ACLPublicRead: readonly in minio terminology // oss.ACLPrivate: none in minio terminology -func (l *ossObjects) SetBucketPolicy(ctx context.Context, bucket string, policyInfo policy.BucketAccessPolicy) error { - bucketPolicies := policy.GetPolicies(policyInfo.Statements, bucket, "") +func (l *ossObjects) SetBucketPolicy(ctx context.Context, bucket string, bucketPolicy *policy.Policy) error { + policyInfo, err := minio.PolicyToBucketAccessPolicy(bucketPolicy) + if err != nil { + // This should not happen. + return ossToObjectError(err, bucket) + } + + bucketPolicies := miniogopolicy.GetPolicies(policyInfo.Statements, bucket, "") if len(bucketPolicies) != 1 { logger.LogIf(ctx, minio.NotImplemented{}) return minio.NotImplemented{} @@ -978,11 +986,11 @@ func (l *ossObjects) SetBucketPolicy(ctx context.Context, bucket string, policyI var acl oss.ACLType switch bucketPolicy { - case policy.BucketPolicyNone: + case miniogopolicy.BucketPolicyNone: acl = oss.ACLPrivate - case policy.BucketPolicyReadOnly: + case miniogopolicy.BucketPolicyReadOnly: acl = oss.ACLPublicRead - case policy.BucketPolicyReadWrite: + case miniogopolicy.BucketPolicyReadWrite: acl = oss.ACLPublicReadWrite default: logger.LogIf(ctx, minio.NotImplemented{}) @@ -1000,29 +1008,60 @@ func (l *ossObjects) SetBucketPolicy(ctx context.Context, bucket string, policyI } // GetBucketPolicy will get policy on bucket. -func (l *ossObjects) GetBucketPolicy(ctx context.Context, bucket string) (policy.BucketAccessPolicy, error) { +func (l *ossObjects) GetBucketPolicy(ctx context.Context, bucket string) (*policy.Policy, error) { result, err := l.Client.GetBucketACL(bucket) if err != nil { logger.LogIf(ctx, err) - return policy.BucketAccessPolicy{}, ossToObjectError(err) + return nil, ossToObjectError(err) } - policyInfo := policy.BucketAccessPolicy{Version: "2012-10-17"} + var readOnly, readWrite bool switch result.ACL { case string(oss.ACLPrivate): // By default, all buckets starts with a "private" policy. - logger.LogIf(ctx, minio.PolicyNotFound{}) - return policy.BucketAccessPolicy{}, ossToObjectError(minio.PolicyNotFound{}, bucket) + logger.LogIf(ctx, minio.BucketPolicyNotFound{}) + return nil, ossToObjectError(minio.BucketPolicyNotFound{}, bucket) case string(oss.ACLPublicRead): - policyInfo.Statements = policy.SetPolicy(policyInfo.Statements, policy.BucketPolicyReadOnly, bucket, "") + readOnly = true case string(oss.ACLPublicReadWrite): - policyInfo.Statements = policy.SetPolicy(policyInfo.Statements, policy.BucketPolicyReadWrite, bucket, "") + readWrite = true default: logger.LogIf(ctx, minio.NotImplemented{}) - return policy.BucketAccessPolicy{}, minio.NotImplemented{} + return nil, minio.NotImplemented{} } - return policyInfo, nil + actionSet := policy.NewActionSet() + if readOnly { + actionSet.Add(policy.GetBucketLocationAction) + actionSet.Add(policy.ListBucketAction) + actionSet.Add(policy.GetObjectAction) + } + if readWrite { + actionSet.Add(policy.GetBucketLocationAction) + actionSet.Add(policy.ListBucketAction) + actionSet.Add(policy.GetObjectAction) + actionSet.Add(policy.ListBucketMultipartUploadsAction) + actionSet.Add(policy.AbortMultipartUploadAction) + actionSet.Add(policy.DeleteObjectAction) + actionSet.Add(policy.ListMultipartUploadPartsAction) + actionSet.Add(policy.PutObjectAction) + } + + return &policy.Policy{ + Version: policy.DefaultVersion, + Statements: []policy.Statement{ + policy.NewStatement( + policy.Allow, + policy.NewPrincipal("*"), + actionSet, + policy.NewResourceSet( + policy.NewResource(bucket, ""), + policy.NewResource(bucket, "*"), + ), + condition.NewFunctions(), + ), + }, + }, nil } // DeleteBucketPolicy deletes all policies on bucket. diff --git a/cmd/gateway/s3/gateway-s3.go b/cmd/gateway/s3/gateway-s3.go index 1de1a26bc..97fe90f52 100644 --- a/cmd/gateway/s3/gateway-s3.go +++ b/cmd/gateway/s3/gateway-s3.go @@ -20,14 +20,15 @@ import ( "context" "encoding/json" "io" + "strings" "github.com/minio/cli" miniogo "github.com/minio/minio-go" - "github.com/minio/minio-go/pkg/policy" "github.com/minio/minio-go/pkg/s3utils" "github.com/minio/minio/cmd/logger" "github.com/minio/minio/pkg/auth" "github.com/minio/minio/pkg/hash" + "github.com/minio/minio/pkg/policy" minio "github.com/minio/minio/cmd" ) @@ -428,31 +429,32 @@ func (l *s3Objects) CompleteMultipartUpload(ctx context.Context, bucket string, } // SetBucketPolicy sets policy on bucket -func (l *s3Objects) SetBucketPolicy(ctx context.Context, bucket string, policyInfo policy.BucketAccessPolicy) error { - data, err := json.Marshal(&policyInfo) +func (l *s3Objects) SetBucketPolicy(ctx context.Context, bucket string, bucketPolicy *policy.Policy) error { + data, err := json.Marshal(bucketPolicy) if err != nil { - return err + // This should not happen. + logger.LogIf(ctx, err) + return minio.ErrorRespToObjectError(err, bucket) } + if err := l.Client.SetBucketPolicy(bucket, string(data)); err != nil { logger.LogIf(ctx, err) - return minio.ErrorRespToObjectError(err, bucket, "") + return minio.ErrorRespToObjectError(err, bucket) } return nil } // GetBucketPolicy will get policy on bucket -func (l *s3Objects) GetBucketPolicy(ctx context.Context, bucket string) (policy.BucketAccessPolicy, error) { +func (l *s3Objects) GetBucketPolicy(ctx context.Context, bucket string) (*policy.Policy, error) { data, err := l.Client.GetBucketPolicy(bucket) if err != nil { logger.LogIf(ctx, err) - return policy.BucketAccessPolicy{}, minio.ErrorRespToObjectError(err, bucket, "") + return nil, minio.ErrorRespToObjectError(err, bucket) } - var policyInfo policy.BucketAccessPolicy - if err = json.Unmarshal([]byte(data), &policyInfo); err != nil { - return policyInfo, err - } - return policyInfo, nil + + bucketPolicy, err := policy.ParseConfig(strings.NewReader(data), bucket) + return bucketPolicy, minio.ErrorRespToObjectError(err, bucket) } // DeleteBucketPolicy deletes all policies on bucket diff --git a/cmd/gateway/s3/gateway-s3_test.go b/cmd/gateway/s3/gateway-s3_test.go index d361f883e..a787bc918 100644 --- a/cmd/gateway/s3/gateway-s3_test.go +++ b/cmd/gateway/s3/gateway-s3_test.go @@ -52,7 +52,7 @@ func TestS3ToObjectError(t *testing.T) { }, { inputErr: errResponse("NoSuchBucketPolicy"), - expectedErr: minio.PolicyNotFound{}, + expectedErr: minio.BucketPolicyNotFound{}, }, { inputErr: errResponse("NoSuchBucket"), diff --git a/cmd/globals.go b/cmd/globals.go index 6d8a33c40..7d34d8296 100644 --- a/cmd/globals.go +++ b/cmd/globals.go @@ -120,6 +120,7 @@ var ( globalMinioHost = "" globalNotificationSys *NotificationSys + globalPolicySys *PolicySys // CA root certificates, a nil value means system certs pool will be used globalRootCAs *x509.CertPool diff --git a/cmd/notification.go b/cmd/notification.go index 56466ceff..d5dbaa465 100644 --- a/cmd/notification.go +++ b/cmd/notification.go @@ -21,6 +21,7 @@ import ( "context" "encoding/json" "encoding/xml" + "errors" "fmt" "net/url" "path" @@ -30,6 +31,7 @@ import ( "github.com/minio/minio/pkg/event" "github.com/minio/minio/pkg/hash" xnet "github.com/minio/minio/pkg/net" + "github.com/minio/minio/pkg/policy" ) // NotificationSys - notification system. @@ -75,15 +77,33 @@ func (sys *NotificationSys) DeleteBucket(bucketName string) map[xnet.Host]error return errors } -// UpdateBucketPolicy - calls UpdateBucketPolicy RPC call on all peers. -func (sys *NotificationSys) UpdateBucketPolicy(bucketName string) map[xnet.Host]error { +// SetBucketPolicy - calls SetBucketPolicy RPC call on all peers. +func (sys *NotificationSys) SetBucketPolicy(bucketName string, bucketPolicy *policy.Policy) map[xnet.Host]error { errors := make(map[xnet.Host]error) var wg sync.WaitGroup for addr, client := range sys.peerRPCClientMap { wg.Add(1) go func(addr xnet.Host, client *PeerRPCClient) { defer wg.Done() - if err := client.UpdateBucketPolicy(bucketName); err != nil { + if err := client.SetBucketPolicy(bucketName, bucketPolicy); err != nil { + errors[addr] = err + } + }(addr, client) + } + wg.Wait() + + return errors +} + +// RemoveBucketPolicy - calls RemoveBucketPolicy RPC call on all peers. +func (sys *NotificationSys) RemoveBucketPolicy(bucketName string) map[xnet.Host]error { + errors := make(map[xnet.Host]error) + var wg sync.WaitGroup + for addr, client := range sys.peerRPCClientMap { + wg.Add(1) + go func(addr xnet.Host, client *PeerRPCClient) { + defer wg.Done() + if err := client.RemoveBucketPolicy(bucketName); err != nil { errors[addr] = err } }(addr, client) @@ -182,7 +202,7 @@ func (sys *NotificationSys) initListeners(ctx context.Context, objAPI ObjectLaye defer objLock.Unlock() reader, e := readConfig(ctx, objAPI, configFile) - if e != nil && !IsErrIgnored(e, errDiskNotFound, errNoSuchNotifications) { + if e != nil && !IsErrIgnored(e, errDiskNotFound, errConfigNotFound) { return e } @@ -433,7 +453,7 @@ func (args eventArgs) ToEvent() event.Event { Bucket: event.Bucket{ Name: args.BucketName, OwnerIdentity: event.Identity{creds.AccessKey}, - ARN: bucketARNPrefix + args.BucketName, + ARN: policy.ResourceARNPrefix + args.BucketName, }, Object: event.Object{ Key: url.QueryEscape(args.Object.Name), @@ -483,6 +503,8 @@ func saveConfig(objAPI ObjectLayer, configFile string, data []byte) error { return err } +var errConfigNotFound = errors.New("config file not found") + func readConfig(ctx context.Context, objAPI ObjectLayer, configFile string) (*bytes.Buffer, error) { var buffer bytes.Buffer // Read entire content by setting size to -1 @@ -490,8 +512,9 @@ func readConfig(ctx context.Context, objAPI ObjectLayer, configFile string) (*by if err != nil { // Ignore if err is ObjectNotFound or IncompleteBody when bucket is not configured with notification if isErrObjectNotFound(err) || isErrIncompleteBody(err) { - return nil, errNoSuchNotifications + return nil, errConfigNotFound } + logger.GetReqInfo(ctx).AppendTags("configFile", configFile) logger.LogIf(ctx, err) return nil, err @@ -510,6 +533,10 @@ func readNotificationConfig(ctx context.Context, objAPI ObjectLayer, bucketName configFile := path.Join(bucketConfigPrefix, bucketName, bucketNotificationConfig) reader, err := readConfig(ctx, objAPI, configFile) if err != nil { + if err == errConfigNotFound { + err = errNoSuchNotifications + } + return nil, err } @@ -551,7 +578,7 @@ func SaveListener(objAPI ObjectLayer, bucketName string, eventNames []event.Name defer objLock.Unlock() reader, err := readConfig(ctx, objAPI, configFile) - if err != nil && !IsErrIgnored(err, errDiskNotFound, errNoSuchNotifications) { + if err != nil && !IsErrIgnored(err, errDiskNotFound, errConfigNotFound) { return err } @@ -602,7 +629,7 @@ func RemoveListener(objAPI ObjectLayer, bucketName string, targetID event.Target defer objLock.Unlock() reader, err := readConfig(ctx, objAPI, configFile) - if err != nil && !IsErrIgnored(err, errDiskNotFound, errNoSuchNotifications) { + if err != nil && !IsErrIgnored(err, errDiskNotFound, errConfigNotFound) { return err } diff --git a/cmd/object-api-common.go b/cmd/object-api-common.go index f8e79e90c..81e1f0e02 100644 --- a/cmd/object-api-common.go +++ b/cmd/object-api-common.go @@ -85,7 +85,7 @@ func dirObjectInfo(bucket, object string, size int64, metadata map[string]string func deleteBucketMetadata(ctx context.Context, bucket string, objAPI ObjectLayer) { // Delete bucket access policy, if present - ignore any errors. - removeBucketPolicy(ctx, bucket, objAPI) + removePolicyConfig(ctx, objAPI, bucket) // Delete notification config, if present - ignore any errors. removeNotificationConfig(ctx, objAPI, bucket) diff --git a/cmd/object-api-errors.go b/cmd/object-api-errors.go index 37936e03f..077135a7a 100644 --- a/cmd/object-api-errors.go +++ b/cmd/object-api-errors.go @@ -366,13 +366,6 @@ func (e PolicyNesting) Error() string { return "New bucket policy conflicts with an existing policy. Please try again with new prefix." } -// PolicyNotFound - policy not found -type PolicyNotFound GenericError - -func (e PolicyNotFound) Error() string { - return "Policy not found" -} - // UnsupportedMetadata - unsupported metadata type UnsupportedMetadata struct{} @@ -396,15 +389,6 @@ func isErrIncompleteBody(err error) bool { return false } -// isErrBucketPolicyNotFound - Check if error type is BucketPolicyNotFound. -func isErrBucketPolicyNotFound(err error) bool { - switch err.(type) { - case PolicyNotFound: - return true - } - return false -} - // isErrObjectNotFound - Check if error type is ObjectNotFound. func isErrObjectNotFound(err error) bool { switch err.(type) { diff --git a/cmd/object-api-interface.go b/cmd/object-api-interface.go index b9912398c..dd3fbc1ba 100644 --- a/cmd/object-api-interface.go +++ b/cmd/object-api-interface.go @@ -21,9 +21,9 @@ import ( "io" "time" - "github.com/minio/minio-go/pkg/policy" "github.com/minio/minio/pkg/hash" "github.com/minio/minio/pkg/madmin" + "github.com/minio/minio/pkg/policy" ) // ObjectLayer implements primitives for object API layer. @@ -70,9 +70,8 @@ type ObjectLayer interface { ClearLocks(context.Context, []VolumeLockInfo) error // Policy operations - SetBucketPolicy(context.Context, string, policy.BucketAccessPolicy) error - GetBucketPolicy(context.Context, string) (policy.BucketAccessPolicy, error) - RefreshBucketPolicy(context.Context, string) error + SetBucketPolicy(context.Context, string, *policy.Policy) error + GetBucketPolicy(context.Context, string) (*policy.Policy, error) DeleteBucketPolicy(context.Context, string) error // Supported operations check diff --git a/cmd/object-handlers.go b/cmd/object-handlers.go index 4013ea1c1..7b7f4f85f 100644 --- a/cmd/object-handlers.go +++ b/cmd/object-handlers.go @@ -34,9 +34,9 @@ import ( "github.com/gorilla/mux" "github.com/minio/minio/cmd/logger" "github.com/minio/minio/pkg/event" - "github.com/minio/minio/pkg/handlers" "github.com/minio/minio/pkg/hash" "github.com/minio/minio/pkg/ioutil" + "github.com/minio/minio/pkg/policy" sha256 "github.com/minio/sha256-simd" "github.com/minio/sio" ) @@ -60,23 +60,6 @@ func setHeadGetRespHeaders(w http.ResponseWriter, reqParams url.Values) { } } -// errAllowableNotFound - For an anon user, return 404 if have ListBucket, 403 otherwise -// this is in keeping with the permissions sections of the docs of both: -// HEAD Object: http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectHEAD.html -// GET Object: http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectGET.html -func errAllowableObjectNotFound(ctx context.Context, bucket string, r *http.Request) APIErrorCode { - if getRequestAuthType(r) == authTypeAnonymous { - // We care about the bucket as a whole, not a particular resource. - resource := "/" + bucket - sourceIP := handlers.GetSourceIP(r) - if s3Error := enforceBucketPolicy(ctx, bucket, "s3:ListBucket", resource, - r.Referer(), sourceIP, r.URL.Query()); s3Error != ErrNone { - return ErrAccessDenied - } - } - return ErrNoSuchKey -} - // GetObjectHandler - GET Object // ---------- // This implementation of the GET operation retrieves object. To use GET, @@ -96,7 +79,7 @@ func (api objectAPIHandlers) GetObjectHandler(w http.ResponseWriter, r *http.Req return } - if s3Error := checkRequestAuthType(ctx, r, bucket, "s3:GetObject", globalServerConfig.GetRegion()); s3Error != ErrNone { + if s3Error := checkRequestAuthType(ctx, r, policy.GetObjectAction, bucket, object); s3Error != ErrNone { writeErrorResponse(w, s3Error, r.URL) return } @@ -109,9 +92,21 @@ func (api objectAPIHandlers) GetObjectHandler(w http.ResponseWriter, r *http.Req objInfo, err := getObjectInfo(ctx, bucket, object) if err != nil { apiErr := toAPIErrorCode(err) - if apiErr == ErrNoSuchKey { - apiErr = errAllowableObjectNotFound(ctx, bucket, r) + if apiErr == ErrNoSuchKey && getRequestAuthType(r) == authTypeAnonymous { + // As per "Permission" section in https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectGET.html + // If the object you request does not exist, the error Amazon S3 returns depends on whether you also have the s3:ListBucket permission. + // * If you have the s3:ListBucket permission on the bucket, Amazon S3 will return an HTTP status code 404 ("no such key") error. + // * if you don’t have the s3:ListBucket permission, Amazon S3 will return an HTTP status code 403 ("access denied") error.` + if !globalPolicySys.IsAllowed(policy.Args{ + Action: policy.ListBucketAction, + BucketName: bucket, + ConditionValues: getConditionValues(r, ""), + IsOwner: false, + }) { + apiErr = ErrAccessDenied + } } + writeErrorResponse(w, apiErr, r.URL) return } @@ -232,7 +227,7 @@ func (api objectAPIHandlers) HeadObjectHandler(w http.ResponseWriter, r *http.Re return } - if s3Error := checkRequestAuthType(ctx, r, bucket, "s3:GetObject", globalServerConfig.GetRegion()); s3Error != ErrNone { + if s3Error := checkRequestAuthType(ctx, r, policy.GetObjectAction, bucket, object); s3Error != ErrNone { writeErrorResponseHeadersOnly(w, s3Error) return } @@ -245,9 +240,21 @@ func (api objectAPIHandlers) HeadObjectHandler(w http.ResponseWriter, r *http.Re objInfo, err := getObjectInfo(ctx, bucket, object) if err != nil { apiErr := toAPIErrorCode(err) - if apiErr == ErrNoSuchKey { - apiErr = errAllowableObjectNotFound(ctx, bucket, r) + if apiErr == ErrNoSuchKey && getRequestAuthType(r) == authTypeAnonymous { + // As per "Permission" section in https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectHEAD.html + // If the object you request does not exist, the error Amazon S3 returns depends on whether you also have the s3:ListBucket permission. + // * If you have the s3:ListBucket permission on the bucket, Amazon S3 will return an HTTP status code 404 ("no such key") error. + // * if you don’t have the s3:ListBucket permission, Amazon S3 will return an HTTP status code 403 ("access denied") error.` + if !globalPolicySys.IsAllowed(policy.Args{ + Action: policy.ListBucketAction, + BucketName: bucket, + ConditionValues: getConditionValues(r, ""), + IsOwner: false, + }) { + apiErr = ErrAccessDenied + } } + writeErrorResponseHeadersOnly(w, apiErr) return } @@ -340,7 +347,7 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re return } - if s3Error := checkRequestAuthType(ctx, r, dstBucket, "s3:PutObject", globalServerConfig.GetRegion()); s3Error != ErrNone { + if s3Error := checkRequestAuthType(ctx, r, policy.PutObjectAction, dstBucket, dstObject); s3Error != ErrNone { writeErrorResponse(w, s3Error, r.URL) return } @@ -666,10 +673,14 @@ func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Req writeErrorResponse(w, ErrAccessDenied, r.URL) return case authTypeAnonymous: - // http://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html - sourceIP := handlers.GetSourceIP(r) - if s3Err = enforceBucketPolicy(ctx, bucket, "s3:PutObject", r.URL.Path, r.Referer(), sourceIP, r.URL.Query()); s3Err != ErrNone { - writeErrorResponse(w, s3Err, r.URL) + if !globalPolicySys.IsAllowed(policy.Args{ + Action: policy.PutObjectAction, + BucketName: bucket, + ConditionValues: getConditionValues(r, ""), + IsOwner: false, + ObjectName: object, + }) { + writeErrorResponse(w, ErrAccessDenied, r.URL) return } case authTypeStreamingSigned: @@ -781,7 +792,7 @@ func (api objectAPIHandlers) NewMultipartUploadHandler(w http.ResponseWriter, r return } - if s3Error := checkRequestAuthType(ctx, r, bucket, "s3:PutObject", globalServerConfig.GetRegion()); s3Error != ErrNone { + if s3Error := checkRequestAuthType(ctx, r, policy.PutObjectAction, bucket, object); s3Error != ErrNone { writeErrorResponse(w, s3Error, r.URL) return } @@ -867,7 +878,7 @@ func (api objectAPIHandlers) CopyObjectPartHandler(w http.ResponseWriter, r *htt return } - if s3Error := checkRequestAuthType(ctx, r, dstBucket, "s3:PutObject", globalServerConfig.GetRegion()); s3Error != ErrNone { + if s3Error := checkRequestAuthType(ctx, r, policy.PutObjectAction, dstBucket, dstObject); s3Error != ErrNone { writeErrorResponse(w, s3Error, r.URL) return } @@ -1133,10 +1144,14 @@ func (api objectAPIHandlers) PutObjectPartHandler(w http.ResponseWriter, r *http writeErrorResponse(w, ErrAccessDenied, r.URL) return case authTypeAnonymous: - // http://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html - if s3Error := enforceBucketPolicy(ctx, bucket, "s3:PutObject", r.URL.Path, - r.Referer(), handlers.GetSourceIP(r), r.URL.Query()); s3Error != ErrNone { - writeErrorResponse(w, s3Error, r.URL) + if !globalPolicySys.IsAllowed(policy.Args{ + Action: policy.PutObjectAction, + BucketName: bucket, + ConditionValues: getConditionValues(r, ""), + IsOwner: false, + ObjectName: object, + }) { + writeErrorResponse(w, ErrAccessDenied, r.URL) return } case authTypeStreamingSigned: @@ -1262,7 +1277,8 @@ func (api objectAPIHandlers) AbortMultipartUploadHandler(w http.ResponseWriter, if api.CacheAPI() != nil { abortMultipartUpload = api.CacheAPI().AbortMultipartUpload } - if s3Error := checkRequestAuthType(ctx, r, bucket, "s3:AbortMultipartUpload", globalServerConfig.GetRegion()); s3Error != ErrNone { + + if s3Error := checkRequestAuthType(ctx, r, policy.AbortMultipartUploadAction, bucket, object); s3Error != ErrNone { writeErrorResponse(w, s3Error, r.URL) return } @@ -1297,7 +1313,7 @@ func (api objectAPIHandlers) ListObjectPartsHandler(w http.ResponseWriter, r *ht return } - if s3Error := checkRequestAuthType(ctx, r, bucket, "s3:ListMultipartUploadParts", globalServerConfig.GetRegion()); s3Error != ErrNone { + if s3Error := checkRequestAuthType(ctx, r, policy.ListMultipartUploadPartsAction, bucket, object); s3Error != ErrNone { writeErrorResponse(w, s3Error, r.URL) return } @@ -1337,7 +1353,7 @@ func (api objectAPIHandlers) CompleteMultipartUploadHandler(w http.ResponseWrite return } - if s3Error := checkRequestAuthType(ctx, r, bucket, "s3:PutObject", globalServerConfig.GetRegion()); s3Error != ErrNone { + if s3Error := checkRequestAuthType(ctx, r, policy.PutObjectAction, bucket, object); s3Error != ErrNone { writeErrorResponse(w, s3Error, r.URL) return } @@ -1446,7 +1462,7 @@ func (api objectAPIHandlers) DeleteObjectHandler(w http.ResponseWriter, r *http. return } - if s3Error := checkRequestAuthType(ctx, r, bucket, "s3:DeleteObject", globalServerConfig.GetRegion()); s3Error != ErrNone { + if s3Error := checkRequestAuthType(ctx, r, policy.DeleteObjectAction, bucket, object); s3Error != ErrNone { writeErrorResponse(w, s3Error, r.URL) return } diff --git a/cmd/object-handlers_test.go b/cmd/object-handlers_test.go index 68780ad51..4dd12b74e 100644 --- a/cmd/object-handlers_test.go +++ b/cmd/object-handlers_test.go @@ -172,7 +172,7 @@ func testAPIHeadObjectHandler(obj ObjectLayer, instanceType, bucketName string, // ExecObjectLayerAPIAnonTest - Calls the HTTP API handler using the anonymous request, validates the ErrAccessDeniedResponse, // sets the bucket policy using the policy statement generated from `getWriteOnlyObjectStatement` so that the // unsigned request goes through and its validated again. - ExecObjectLayerAPIAnonTest(t, obj, "TestAPIHeadObjectHandler", bucketName, objectName, instanceType, apiRouter, anonReq, getReadOnlyObjectStatement) + ExecObjectLayerAPIAnonTest(t, obj, "TestAPIHeadObjectHandler", bucketName, objectName, instanceType, apiRouter, anonReq, getAnonReadOnlyObjectPolicy(bucketName, objectName)) // HTTP request for testing when `objectLayer` is set to `nil`. // There is no need to use an existing bucket and valid input for creating the request @@ -445,7 +445,7 @@ func testAPIGetObjectHandler(obj ObjectLayer, instanceType, bucketName string, a // ExecObjectLayerAPIAnonTest - Calls the HTTP API handler using the anonymous request, validates the ErrAccessDeniedResponse, // sets the bucket policy using the policy statement generated from `getWriteOnlyObjectStatement` so that the // unsigned request goes through and its validated again. - ExecObjectLayerAPIAnonTest(t, obj, "TestAPIGetObjectHandler", bucketName, objectName, instanceType, apiRouter, anonReq, getReadOnlyObjectStatement) + ExecObjectLayerAPIAnonTest(t, obj, "TestAPIGetObjectHandler", bucketName, objectName, instanceType, apiRouter, anonReq, getAnonReadOnlyObjectPolicy(bucketName, objectName)) // HTTP request for testing when `objectLayer` is set to `nil`. // There is no need to use an existing bucket and valid input for creating the request @@ -1001,7 +1001,7 @@ func testAPIPutObjectHandler(obj ObjectLayer, instanceType, bucketName string, a // ExecObjectLayerAPIAnonTest - Calls the HTTP API handler using the anonymous request, validates the ErrAccessDeniedResponse, // sets the bucket policy using the policy statement generated from `getWriteOnlyObjectStatement` so that the // unsigned request goes through and its validated again. - ExecObjectLayerAPIAnonTest(t, obj, "TestAPIPutObjectHandler", bucketName, objectName, instanceType, apiRouter, anonReq, getWriteOnlyObjectStatement) + ExecObjectLayerAPIAnonTest(t, obj, "TestAPIPutObjectHandler", bucketName, objectName, instanceType, apiRouter, anonReq, getAnonWriteOnlyObjectPolicy(bucketName, objectName)) // HTTP request to test the case of `objectLayer` being set to `nil`. // There is no need to use an existing bucket or valid input for creating the request, @@ -1847,7 +1847,7 @@ func testAPICopyObjectHandler(obj ObjectLayer, instanceType, bucketName string, // ExecObjectLayerAPIAnonTest - Calls the HTTP API handler using the anonymous request, validates the ErrAccessDeniedResponse, // sets the bucket policy using the policy statement generated from `getWriteOnlyObjectStatement` so that the // unsigned request goes through and its validated again. - ExecObjectLayerAPIAnonTest(t, obj, "TestAPICopyObjectHandler", bucketName, newCopyAnonObject, instanceType, apiRouter, anonReq, getWriteOnlyObjectStatement) + ExecObjectLayerAPIAnonTest(t, obj, "TestAPICopyObjectHandler", bucketName, newCopyAnonObject, instanceType, apiRouter, anonReq, getAnonWriteOnlyObjectPolicy(bucketName, newCopyAnonObject)) // HTTP request to test the case of `objectLayer` being set to `nil`. // There is no need to use an existing bucket or valid input for creating the request, @@ -1998,7 +1998,7 @@ func testAPINewMultipartHandler(obj ObjectLayer, instanceType, bucketName string // ExecObjectLayerAPIAnonTest - Calls the HTTP API handler using the anonymous request, validates the ErrAccessDeniedResponse, // sets the bucket policy using the policy statement generated from `getWriteOnlyObjectStatement` so that the // unsigned request goes through and its validated again. - ExecObjectLayerAPIAnonTest(t, obj, "TestAPINewMultipartHandler", bucketName, objectName, instanceType, apiRouter, anonReq, getWriteOnlyObjectStatement) + ExecObjectLayerAPIAnonTest(t, obj, "TestAPINewMultipartHandler", bucketName, objectName, instanceType, apiRouter, anonReq, getAnonWriteOnlyObjectPolicy(bucketName, objectName)) // HTTP request to test the case of `objectLayer` being set to `nil`. // There is no need to use an existing bucket or valid input for creating the request, @@ -2409,7 +2409,7 @@ func testAPICompleteMultipartHandler(obj ObjectLayer, instanceType, bucketName s // sets the bucket policy using the policy statement generated from `getWriteOnlyObjectStatement` so that the // unsigned request goes through and its validated again. ExecObjectLayerAPIAnonTest(t, obj, "TestAPICompleteMultipartHandler", bucketName, objectName, instanceType, - apiRouter, anonReq, getWriteOnlyObjectStatement) + apiRouter, anonReq, getAnonWriteOnlyObjectPolicy(bucketName, objectName)) // HTTP request to test the case of `objectLayer` being set to `nil`. // There is no need to use an existing bucket or valid input for creating the request, @@ -2572,7 +2572,7 @@ func testAPIAbortMultipartHandler(obj ObjectLayer, instanceType, bucketName stri // sets the bucket policy using the policy statement generated from `getWriteOnlyObjectStatement` so that the // unsigned request goes through and its validated again. ExecObjectLayerAPIAnonTest(t, obj, "TestAPIAbortMultipartHandler", bucketName, objectName, instanceType, - apiRouter, anonReq, getWriteOnlyObjectStatement) + apiRouter, anonReq, getAnonWriteOnlyObjectPolicy(bucketName, objectName)) // HTTP request to test the case of `objectLayer` being set to `nil`. // There is no need to use an existing bucket or valid input for creating the request, @@ -2734,7 +2734,7 @@ func testAPIDeleteObjectHandler(obj ObjectLayer, instanceType, bucketName string // ExecObjectLayerAPIAnonTest - Calls the HTTP API handler using the anonymous request, validates the ErrAccessDeniedResponse, // sets the bucket policy using the policy statement generated from `getWriteOnlyObjectStatement` so that the // unsigned request goes through and its validated again. - ExecObjectLayerAPIAnonTest(t, obj, "TestAPIDeleteObjectHandler", bucketName, anonObjectName, instanceType, apiRouter, anonReq, getWriteOnlyObjectStatement) + ExecObjectLayerAPIAnonTest(t, obj, "TestAPIDeleteObjectHandler", bucketName, anonObjectName, instanceType, apiRouter, anonReq, getAnonWriteOnlyObjectPolicy(bucketName, anonObjectName)) // HTTP request to test the case of `objectLayer` being set to `nil`. // There is no need to use an existing bucket or valid input for creating the request, @@ -3205,7 +3205,7 @@ func testAPIPutObjectPartHandler(obj ObjectLayer, instanceType, bucketName strin // ExecObjectLayerAPIAnonTest - Calls the HTTP API handler using the anonymous request, validates the ErrAccessDeniedResponse, // sets the bucket policy using the policy statement generated from `getWriteOnlyObjectStatement` so that the // unsigned request goes through and its validated again. - ExecObjectLayerAPIAnonTest(t, obj, "TestAPIPutObjectPartHandler", bucketName, testObject, instanceType, apiRouter, anonReq, getWriteOnlyObjectStatement) + ExecObjectLayerAPIAnonTest(t, obj, "TestAPIPutObjectPartHandler", bucketName, testObject, instanceType, apiRouter, anonReq, getAnonWriteOnlyObjectPolicy(bucketName, testObject)) // HTTP request for testing when `ObjectLayer` is set to `nil`. // There is no need to use an existing bucket and valid input for creating the request @@ -3508,7 +3508,7 @@ func testAPIListObjectPartsHandler(obj ObjectLayer, instanceType, bucketName str // ExecObjectLayerAPIAnonTest - Calls the HTTP API handler using the anonymous request, validates the ErrAccessDeniedResponse, // sets the bucket policy using the policy statement generated from `getWriteOnlyObjectStatement` so that the // unsigned request goes through and its validated again. - ExecObjectLayerAPIAnonTest(t, obj, "TestAPIListObjectPartsHandler", bucketName, testObject, instanceType, apiRouter, anonReq, getWriteOnlyObjectStatement) + ExecObjectLayerAPIAnonTest(t, obj, "TestAPIListObjectPartsHandler", bucketName, testObject, instanceType, apiRouter, anonReq, getAnonWriteOnlyObjectPolicy(bucketName, testObject)) // HTTP request for testing when `objectLayer` is set to `nil`. // There is no need to use an existing bucket and valid input for creating the request diff --git a/cmd/peer-rpc.go b/cmd/peer-rpc.go index 4be111ebc..c7f1ebed9 100644 --- a/cmd/peer-rpc.go +++ b/cmd/peer-rpc.go @@ -25,6 +25,7 @@ import ( "github.com/minio/minio/cmd/logger" "github.com/minio/minio/pkg/event" xnet "github.com/minio/minio/pkg/net" + "github.com/minio/minio/pkg/policy" ) const s3Path = "/s3/remote" @@ -43,23 +44,33 @@ type DeleteBucketArgs struct { // DeleteBucket - handles delete bucket RPC call which removes all values of given bucket in global NotificationSys object. func (receiver *PeerRPCReceiver) DeleteBucket(args *DeleteBucketArgs, reply *AuthRPCArgs) error { globalNotificationSys.RemoveNotification(args.BucketName) + globalPolicySys.Remove(args.BucketName) return nil } -// UpdateBucketPolicyArgs - update bucket policy RPC arguments. -type UpdateBucketPolicyArgs struct { +// SetBucketPolicyArgs - set bucket policy RPC arguments. +type SetBucketPolicyArgs struct { + AuthRPCArgs + BucketName string + Policy policy.Policy +} + +// SetBucketPolicy - handles set bucket policy RPC call which adds bucket policy to globalPolicySys. +func (receiver *PeerRPCReceiver) SetBucketPolicy(args *SetBucketPolicyArgs, reply *AuthRPCArgs) error { + globalPolicySys.Set(args.BucketName, args.Policy) + return nil +} + +// RemoveBucketPolicyArgs - delete bucket policy RPC arguments. +type RemoveBucketPolicyArgs struct { AuthRPCArgs BucketName string } -// UpdateBucketPolicy - handles update bucket policy RPC call which sets bucket policies to given bucket in global BucketPolicies object. -func (receiver *PeerRPCReceiver) UpdateBucketPolicy(args *UpdateBucketPolicyArgs, reply *AuthRPCArgs) error { - objectAPI := newObjectLayerFn() - if objectAPI == nil { - // If the object layer is just coming up then it will load the policy from the disk. - return nil - } - return objectAPI.RefreshBucketPolicy(context.Background(), args.BucketName) +// RemoveBucketPolicy - handles delete bucket policy RPC call which removes bucket policy to globalPolicySys. +func (receiver *PeerRPCReceiver) RemoveBucketPolicy(args *RemoveBucketPolicyArgs, reply *AuthRPCArgs) error { + globalPolicySys.Remove(args.BucketName) + return nil } // PutBucketNotificationArgs - put bucket notification RPC arguments. @@ -195,13 +206,23 @@ func (rpcClient *PeerRPCClient) DeleteBucket(bucketName string) error { return rpcClient.Call("Peer.DeleteBucket", &args, &reply) } -// UpdateBucketPolicy - calls update bucket policy RPC. -func (rpcClient *PeerRPCClient) UpdateBucketPolicy(bucketName string) error { - args := UpdateBucketPolicyArgs{ +// SetBucketPolicy - calls set bucket policy RPC. +func (rpcClient *PeerRPCClient) SetBucketPolicy(bucketName string, bucketPolicy *policy.Policy) error { + args := SetBucketPolicyArgs{ + BucketName: bucketName, + Policy: *bucketPolicy, + } + reply := AuthRPCReply{} + return rpcClient.Call("Peer.SetBucketPolicy", &args, &reply) +} + +// RemoveBucketPolicy - calls remove bucket policy RPC. +func (rpcClient *PeerRPCClient) RemoveBucketPolicy(bucketName string) error { + args := RemoveBucketPolicyArgs{ BucketName: bucketName, } reply := AuthRPCReply{} - return rpcClient.Call("Peer.UpdateBucketPolicy", &args, &reply) + return rpcClient.Call("Peer.RemoveBucketPolicy", &args, &reply) } // PutBucketNotification - calls put bukcet notification RPC. diff --git a/cmd/policy.go b/cmd/policy.go new file mode 100644 index 000000000..afb0fa27f --- /dev/null +++ b/cmd/policy.go @@ -0,0 +1,218 @@ +/* + * Minio Cloud Storage, (C) 2018 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package cmd + +import ( + "context" + "encoding/json" + "net/http" + "path" + "sync" + + miniogopolicy "github.com/minio/minio-go/pkg/policy" + "github.com/minio/minio/pkg/handlers" + "github.com/minio/minio/pkg/policy" +) + +// PolicySys - policy system. +type PolicySys struct { + sync.RWMutex + bucketPolicyMap map[string]policy.Policy +} + +// Set - sets policy to given bucket name. If policy is empty, existing policy is removed. +func (sys *PolicySys) Set(bucketName string, policy policy.Policy) { + sys.Lock() + defer sys.Unlock() + + if policy.IsEmpty() { + delete(sys.bucketPolicyMap, bucketName) + } else { + sys.bucketPolicyMap[bucketName] = policy + } +} + +// Remove - removes policy for given bucket name. +func (sys *PolicySys) Remove(bucketName string) { + sys.Lock() + defer sys.Unlock() + + delete(sys.bucketPolicyMap, bucketName) +} + +// IsAllowed - checks given policy args is allowed to continue the Rest API. +func (sys *PolicySys) IsAllowed(args policy.Args) bool { + sys.RLock() + defer sys.RUnlock() + + // If policy is available for given bucket, check the policy. + if p, found := sys.bucketPolicyMap[args.BucketName]; found { + return p.IsAllowed(args) + } + + // As policy is not available for given bucket name, returns IsOwner i.e. + // operation is allowed only for owner. + return args.IsOwner +} + +// Init - initializes policy system from policy.json of all buckets. +func (sys *PolicySys) Init(objAPI ObjectLayer) error { + if objAPI == nil { + return errInvalidArgument + } + + buckets, err := objAPI.ListBuckets(context.Background()) + if err != nil { + return err + } + + for _, bucket := range buckets { + config, err := GetPolicyConfig(objAPI, bucket.Name) + if err != nil { + if _, ok := err.(BucketPolicyNotFound); !ok { + return err + } + } else { + sys.Set(bucket.Name, *config) + } + } + + return nil +} + +// NewPolicySys - creates new policy system. +func NewPolicySys() *PolicySys { + return &PolicySys{ + bucketPolicyMap: make(map[string]policy.Policy), + } +} + +func getConditionValues(request *http.Request, locationConstraint string) map[string][]string { + args := make(map[string][]string) + + for key, values := range request.Header { + if existingValues, found := args[key]; found { + args[key] = append(existingValues, values...) + } else { + args[key] = values + } + } + + for key, values := range request.URL.Query() { + if existingValues, found := args[key]; found { + args[key] = append(existingValues, values...) + } else { + args[key] = values + } + } + + args["SourceIp"] = []string{handlers.GetSourceIP(request)} + + if locationConstraint != "" { + args["LocationConstraint"] = []string{locationConstraint} + } + + return args +} + +// GetPolicyConfig - get policy config for given bucket name. +func GetPolicyConfig(objAPI ObjectLayer, bucketName string) (*policy.Policy, error) { + // Construct path to policy.json for the given bucket. + configFile := path.Join(bucketConfigPrefix, bucketName, bucketPolicyConfig) + + reader, err := readConfig(context.Background(), objAPI, configFile) + if err != nil { + if err == errConfigNotFound { + err = BucketPolicyNotFound{Bucket: bucketName} + } + + return nil, err + } + + bucketPolicy, err := policy.ParseConfig(reader, bucketName) + if err != nil { + return nil, err + } + + return bucketPolicy, nil +} + +func savePolicyConfig(objAPI ObjectLayer, bucketName string, bucketPolicy *policy.Policy) error { + data, err := json.Marshal(bucketPolicy) + if err != nil { + return err + } + + // Construct path to policy.json for the given bucket. + configFile := path.Join(bucketConfigPrefix, bucketName, bucketPolicyConfig) + + return saveConfig(objAPI, configFile, data) +} + +func removePolicyConfig(ctx context.Context, objAPI ObjectLayer, bucketName string) error { + // Construct path to policy.json for the given bucket. + configFile := path.Join(bucketConfigPrefix, bucketName, bucketPolicyConfig) + + if err := objAPI.DeleteObject(ctx, minioMetaBucket, configFile); err != nil { + if _, ok := err.(ObjectNotFound); ok { + return BucketPolicyNotFound{Bucket: bucketName} + } + + return err + } + + return nil +} + +// PolicyToBucketAccessPolicy - converts policy.Policy to minio-go/policy.BucketAccessPolicy. +func PolicyToBucketAccessPolicy(bucketPolicy *policy.Policy) (*miniogopolicy.BucketAccessPolicy, error) { + // Return empty BucketAccessPolicy for empty bucket policy. + if bucketPolicy == nil { + return &miniogopolicy.BucketAccessPolicy{Version: policy.DefaultVersion}, nil + } + + data, err := json.Marshal(bucketPolicy) + if err != nil { + // This should not happen because bucketPolicy is valid to convert to JSON data. + return nil, err + } + + var policyInfo miniogopolicy.BucketAccessPolicy + if err = json.Unmarshal(data, &policyInfo); err != nil { + // This should not happen because data is valid to JSON data. + return nil, err + } + + return &policyInfo, nil +} + +// BucketAccessPolicyToPolicy - converts minio-go/policy.BucketAccessPolicy to policy.Policy. +func BucketAccessPolicyToPolicy(policyInfo *miniogopolicy.BucketAccessPolicy) (*policy.Policy, error) { + data, err := json.Marshal(policyInfo) + if err != nil { + // This should not happen because policyInfo is valid to convert to JSON data. + return nil, err + } + + var bucketPolicy policy.Policy + if err = json.Unmarshal(data, &bucketPolicy); err != nil { + // This should not happen because data is valid to JSON data. + return nil, err + } + + return &bucketPolicy, nil +} diff --git a/cmd/policy_test.go b/cmd/policy_test.go new file mode 100644 index 000000000..1bb259c1e --- /dev/null +++ b/cmd/policy_test.go @@ -0,0 +1,424 @@ +/* + * Minio Cloud Storage, (C) 2018 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package cmd + +import ( + "reflect" + "testing" + + miniogopolicy "github.com/minio/minio-go/pkg/policy" + "github.com/minio/minio-go/pkg/set" + "github.com/minio/minio/pkg/policy" + "github.com/minio/minio/pkg/policy/condition" +) + +func TestPolicySysSet(t *testing.T) { + case1PolicySys := NewPolicySys() + case1Policy := policy.Policy{ + Version: policy.DefaultVersion, + Statements: []policy.Statement{ + policy.NewStatement( + policy.Allow, + policy.NewPrincipal("*"), + policy.NewActionSet(policy.PutObjectAction), + policy.NewResourceSet(policy.NewResource("mybucket", "/myobject*")), + condition.NewFunctions(), + ), + }, + } + case1Result := NewPolicySys() + case1Result.bucketPolicyMap["mybucket"] = case1Policy + + case2PolicySys := NewPolicySys() + case2PolicySys.bucketPolicyMap["mybucket"] = case1Policy + case2Policy := policy.Policy{ + Version: policy.DefaultVersion, + Statements: []policy.Statement{ + policy.NewStatement( + policy.Allow, + policy.NewPrincipal("*"), + policy.NewActionSet(policy.GetObjectAction), + policy.NewResourceSet(policy.NewResource("mybucket", "/myobject*")), + condition.NewFunctions(), + ), + }, + } + case2Result := NewPolicySys() + case2Result.bucketPolicyMap["mybucket"] = case2Policy + + case3PolicySys := NewPolicySys() + case3PolicySys.bucketPolicyMap["mybucket"] = case2Policy + case3Policy := policy.Policy{ + ID: "MyPolicyForMyBucket", + Version: policy.DefaultVersion, + } + case3Result := NewPolicySys() + + testCases := []struct { + policySys *PolicySys + bucketName string + bucketPolicy policy.Policy + expectedResult *PolicySys + }{ + {case1PolicySys, "mybucket", case1Policy, case1Result}, + {case2PolicySys, "mybucket", case2Policy, case2Result}, + {case3PolicySys, "mybucket", case3Policy, case3Result}, + } + + for i, testCase := range testCases { + result := testCase.policySys + result.Set(testCase.bucketName, testCase.bucketPolicy) + + if !reflect.DeepEqual(result, testCase.expectedResult) { + t.Fatalf("case %v: expected: %v, got: %v\n", i+1, testCase.expectedResult, result) + } + } +} + +func TestPolicySysRemove(t *testing.T) { + case1Policy := policy.Policy{ + Version: policy.DefaultVersion, + Statements: []policy.Statement{ + policy.NewStatement( + policy.Allow, + policy.NewPrincipal("*"), + policy.NewActionSet(policy.PutObjectAction), + policy.NewResourceSet(policy.NewResource("mybucket", "/myobject*")), + condition.NewFunctions(), + ), + }, + } + case1PolicySys := NewPolicySys() + case1PolicySys.bucketPolicyMap["mybucket"] = case1Policy + case1Result := NewPolicySys() + + case2Policy := policy.Policy{ + Version: policy.DefaultVersion, + Statements: []policy.Statement{ + policy.NewStatement( + policy.Allow, + policy.NewPrincipal("*"), + policy.NewActionSet(policy.GetObjectAction), + policy.NewResourceSet(policy.NewResource("mybucket", "/myobject*")), + condition.NewFunctions(), + ), + }, + } + case2PolicySys := NewPolicySys() + case2PolicySys.bucketPolicyMap["mybucket"] = case2Policy + case2Result := NewPolicySys() + case2Result.bucketPolicyMap["mybucket"] = case2Policy + + case3PolicySys := NewPolicySys() + case3Result := NewPolicySys() + + testCases := []struct { + policySys *PolicySys + bucketName string + expectedResult *PolicySys + }{ + {case1PolicySys, "mybucket", case1Result}, + {case2PolicySys, "yourbucket", case2Result}, + {case3PolicySys, "mybucket", case3Result}, + } + + for i, testCase := range testCases { + result := testCase.policySys + result.Remove(testCase.bucketName) + + if !reflect.DeepEqual(result, testCase.expectedResult) { + t.Fatalf("case %v: expected: %v, got: %v\n", i+1, testCase.expectedResult, result) + } + } +} + +func TestPolicySysIsAllowed(t *testing.T) { + policySys := NewPolicySys() + policySys.Set("mybucket", policy.Policy{ + Version: policy.DefaultVersion, + Statements: []policy.Statement{ + policy.NewStatement( + policy.Allow, + policy.NewPrincipal("*"), + policy.NewActionSet(policy.GetBucketLocationAction), + policy.NewResourceSet(policy.NewResource("mybucket", "")), + condition.NewFunctions(), + ), + policy.NewStatement( + policy.Allow, + policy.NewPrincipal("*"), + policy.NewActionSet(policy.PutObjectAction), + policy.NewResourceSet(policy.NewResource("mybucket", "/myobject*")), + condition.NewFunctions(), + ), + }, + }) + + anonGetBucketLocationArgs := policy.Args{ + AccountName: "Q3AM3UQ867SPQQA43P2F", + Action: policy.GetBucketLocationAction, + BucketName: "mybucket", + ConditionValues: map[string][]string{}, + } + + anonPutObjectActionArgs := policy.Args{ + AccountName: "Q3AM3UQ867SPQQA43P2F", + Action: policy.PutObjectAction, + BucketName: "mybucket", + ConditionValues: map[string][]string{ + "x-amz-copy-source": {"mybucket/myobject"}, + "SourceIp": {"192.168.1.10"}, + }, + ObjectName: "myobject", + } + + anonGetObjectActionArgs := policy.Args{ + AccountName: "Q3AM3UQ867SPQQA43P2F", + Action: policy.GetObjectAction, + BucketName: "mybucket", + ConditionValues: map[string][]string{}, + ObjectName: "myobject", + } + + getBucketLocationArgs := policy.Args{ + AccountName: "Q3AM3UQ867SPQQA43P2F", + Action: policy.GetBucketLocationAction, + BucketName: "mybucket", + ConditionValues: map[string][]string{}, + IsOwner: true, + } + + putObjectActionArgs := policy.Args{ + AccountName: "Q3AM3UQ867SPQQA43P2F", + Action: policy.PutObjectAction, + BucketName: "mybucket", + ConditionValues: map[string][]string{ + "x-amz-copy-source": {"mybucket/myobject"}, + "SourceIp": {"192.168.1.10"}, + }, + IsOwner: true, + ObjectName: "myobject", + } + + getObjectActionArgs := policy.Args{ + AccountName: "Q3AM3UQ867SPQQA43P2F", + Action: policy.GetObjectAction, + BucketName: "mybucket", + ConditionValues: map[string][]string{}, + IsOwner: true, + ObjectName: "myobject", + } + + yourbucketAnonGetObjectActionArgs := policy.Args{ + AccountName: "Q3AM3UQ867SPQQA43P2F", + Action: policy.GetObjectAction, + BucketName: "yourbucket", + ConditionValues: map[string][]string{}, + ObjectName: "yourobject", + } + + yourbucketGetObjectActionArgs := policy.Args{ + AccountName: "Q3AM3UQ867SPQQA43P2F", + Action: policy.GetObjectAction, + BucketName: "yourbucket", + ConditionValues: map[string][]string{}, + IsOwner: true, + ObjectName: "yourobject", + } + + testCases := []struct { + policySys *PolicySys + args policy.Args + expectedResult bool + }{ + {policySys, anonGetBucketLocationArgs, true}, + {policySys, anonPutObjectActionArgs, true}, + {policySys, anonGetObjectActionArgs, false}, + {policySys, getBucketLocationArgs, true}, + {policySys, putObjectActionArgs, true}, + {policySys, getObjectActionArgs, true}, + {policySys, yourbucketAnonGetObjectActionArgs, false}, + {policySys, yourbucketGetObjectActionArgs, true}, + } + + for i, testCase := range testCases { + result := testCase.policySys.IsAllowed(testCase.args) + + if result != testCase.expectedResult { + t.Fatalf("case %v: expected: %v, got: %v\n", i+1, testCase.expectedResult, result) + } + } +} + +func getReadOnlyStatement(bucketName, prefix string) []miniogopolicy.Statement { + return []miniogopolicy.Statement{ + { + Effect: string(policy.Allow), + Principal: miniogopolicy.User{AWS: set.CreateStringSet("*")}, + Resources: set.CreateStringSet(policy.NewResource(bucketName, "").String()), + Actions: set.CreateStringSet("s3:GetBucketLocation", "s3:ListBucket"), + }, + { + Effect: string(policy.Allow), + Principal: miniogopolicy.User{AWS: set.CreateStringSet("*")}, + Resources: set.CreateStringSet(policy.NewResource(bucketName, prefix).String()), + Actions: set.CreateStringSet("s3:GetObject"), + }, + } +} + +func TestPolicyToBucketAccessPolicy(t *testing.T) { + case1Policy := &policy.Policy{ + Version: policy.DefaultVersion, + Statements: []policy.Statement{ + policy.NewStatement( + policy.Allow, + policy.NewPrincipal("*"), + policy.NewActionSet(policy.GetBucketLocationAction, policy.ListBucketAction), + policy.NewResourceSet(policy.NewResource("mybucket", "")), + condition.NewFunctions(), + ), + policy.NewStatement( + policy.Allow, + policy.NewPrincipal("*"), + policy.NewActionSet(policy.GetObjectAction), + policy.NewResourceSet(policy.NewResource("mybucket", "/myobject*")), + condition.NewFunctions(), + ), + }, + } + + case1Result := &miniogopolicy.BucketAccessPolicy{ + Version: policy.DefaultVersion, + Statements: getReadOnlyStatement("mybucket", "/myobject*"), + } + + case2Policy := &policy.Policy{ + Version: policy.DefaultVersion, + Statements: []policy.Statement{}, + } + + case2Result := &miniogopolicy.BucketAccessPolicy{ + Version: policy.DefaultVersion, + Statements: []miniogopolicy.Statement{}, + } + + case3Policy := &policy.Policy{ + Version: "12-10-2012", + Statements: []policy.Statement{ + policy.NewStatement( + policy.Allow, + policy.NewPrincipal("*"), + policy.NewActionSet(policy.PutObjectAction), + policy.NewResourceSet(policy.NewResource("mybucket", "/myobject*")), + condition.NewFunctions(), + ), + }, + } + + testCases := []struct { + bucketPolicy *policy.Policy + expectedResult *miniogopolicy.BucketAccessPolicy + expectErr bool + }{ + {case1Policy, case1Result, false}, + {case2Policy, case2Result, false}, + {case3Policy, nil, true}, + } + + for i, testCase := range testCases { + result, err := PolicyToBucketAccessPolicy(testCase.bucketPolicy) + expectErr := (err != nil) + + if expectErr != testCase.expectErr { + t.Fatalf("case %v: error: expected: %v, got: %v\n", i+1, testCase.expectErr, expectErr) + } + + if !testCase.expectErr { + if !reflect.DeepEqual(result, testCase.expectedResult) { + t.Fatalf("case %v: result: expected: %+v, got: %+v\n", i+1, testCase.expectedResult, result) + } + } + } +} + +func TestBucketAccessPolicyToPolicy(t *testing.T) { + case1PolicyInfo := &miniogopolicy.BucketAccessPolicy{ + Version: policy.DefaultVersion, + Statements: getReadOnlyStatement("mybucket", "/myobject*"), + } + + case1Result := &policy.Policy{ + Version: policy.DefaultVersion, + Statements: []policy.Statement{ + policy.NewStatement( + policy.Allow, + policy.NewPrincipal("*"), + policy.NewActionSet(policy.GetBucketLocationAction, policy.ListBucketAction), + policy.NewResourceSet(policy.NewResource("mybucket", "")), + condition.NewFunctions(), + ), + policy.NewStatement( + policy.Allow, + policy.NewPrincipal("*"), + policy.NewActionSet(policy.GetObjectAction), + policy.NewResourceSet(policy.NewResource("mybucket", "/myobject*")), + condition.NewFunctions(), + ), + }, + } + + case2PolicyInfo := &miniogopolicy.BucketAccessPolicy{ + Version: policy.DefaultVersion, + Statements: []miniogopolicy.Statement{}, + } + + case2Result := &policy.Policy{ + Version: policy.DefaultVersion, + Statements: []policy.Statement{}, + } + + case3PolicyInfo := &miniogopolicy.BucketAccessPolicy{ + Version: "12-10-2012", + Statements: getReadOnlyStatement("mybucket", "/myobject*"), + } + + testCases := []struct { + policyInfo *miniogopolicy.BucketAccessPolicy + expectedResult *policy.Policy + expectErr bool + }{ + {case1PolicyInfo, case1Result, false}, + {case2PolicyInfo, case2Result, false}, + {case3PolicyInfo, nil, true}, + } + + for i, testCase := range testCases { + result, err := BucketAccessPolicyToPolicy(testCase.policyInfo) + expectErr := (err != nil) + + if expectErr != testCase.expectErr { + t.Fatalf("case %v: error: expected: %v, got: %v\n", i+1, testCase.expectErr, expectErr) + } + + if !testCase.expectErr { + if !reflect.DeepEqual(result, testCase.expectedResult) { + t.Fatalf("case %v: result: expected: %+v, got: %+v\n", i+1, testCase.expectedResult, result) + } + } + } +} diff --git a/cmd/server-main.go b/cmd/server-main.go index e405761d1..4168169ff 100644 --- a/cmd/server-main.go +++ b/cmd/server-main.go @@ -239,9 +239,12 @@ func serverMain(ctx *cli.Context) { handler, err = configureServerHandler(globalEndpoints) logger.FatalIf(err, "Unable to configure one of server's RPC services.") - // Initialize notification system. + // Create new notification system. globalNotificationSys, err = NewNotificationSys(globalServerConfig, globalEndpoints) - logger.FatalIf(err, "Unable to initialize notification system.") + logger.FatalIf(err, "Unable to create new notification system.") + + // Create new policy system. + globalPolicySys = NewPolicySys() // Initialize Admin Peers inter-node communication only in distributed setup. initGlobalAdminPeers(globalEndpoints) diff --git a/cmd/server_test.go b/cmd/server_test.go index b6f930fa1..9ca777325 100644 --- a/cmd/server_test.go +++ b/cmd/server_test.go @@ -36,6 +36,7 @@ import ( "time" humanize "github.com/dustin/go-humanize" + "github.com/minio/minio/pkg/policy" ) // API suite container common to both FS and XL. @@ -319,7 +320,7 @@ func (s *TestSuiteCommon) TestBucketSQSNotificationAMQP(c *check) { // Deletes the policy and verifies the deletion by fetching it back. func (s *TestSuiteCommon) TestBucketPolicy(c *check) { // Sample bucket policy. - bucketPolicyBuf := `{"Version":"2012-10-17","Statement":[{"Action":["s3:GetBucketLocation","s3:ListBucket"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::%s"],"Sid":""},{"Action":["s3:GetObject"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::%s/this*"],"Sid":""}]}` + bucketPolicyBuf := `{"Version":"2012-10-17","Statement":[{"Action":["s3:GetBucketLocation","s3:ListBucket"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::%s"]},{"Action":["s3:GetObject"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::%s/this*"]}]}` // generate a random bucket Name. bucketName := getRandomBucketName() @@ -361,7 +362,11 @@ func (s *TestSuiteCommon) TestBucketPolicy(c *check) { bucketPolicyReadBuf, err := ioutil.ReadAll(response.Body) c.Assert(err, nil) // Verify if downloaded policy matches with previousy uploaded. - c.Assert(bytes.Equal([]byte(bucketPolicyStr), bucketPolicyReadBuf), true) + expectedPolicy, err := policy.ParseConfig(strings.NewReader(bucketPolicyStr), bucketName) + c.Assert(err, nil) + gotPolicy, err := policy.ParseConfig(bytes.NewReader(bucketPolicyReadBuf), bucketName) + c.Assert(err, nil) + c.Assert(reflect.DeepEqual(expectedPolicy, gotPolicy), true) // Delete policy. request, err = newTestSignedRequest("DELETE", getDeletePolicyURL(s.endPoint, bucketName), 0, nil, diff --git a/cmd/test-utils_test.go b/cmd/test-utils_test.go index 211d90b3f..4cbe980f7 100644 --- a/cmd/test-utils_test.go +++ b/cmd/test-utils_test.go @@ -53,12 +53,12 @@ import ( "github.com/fatih/color" "github.com/gorilla/mux" - "github.com/minio/minio-go/pkg/policy" "github.com/minio/minio-go/pkg/s3signer" "github.com/minio/minio/cmd/logger" "github.com/minio/minio/pkg/auth" "github.com/minio/minio/pkg/bpool" "github.com/minio/minio/pkg/hash" + "github.com/minio/minio/pkg/policy" ) // Tests should initNSLock only once. @@ -354,9 +354,12 @@ func UnstartedTestServer(t TestErrHandler, instanceType string) TestServer { globalMinioAddr = getEndpointsLocalAddr(testServer.Disks) globalNotificationSys, err = NewNotificationSys(globalServerConfig, testServer.Disks) if err != nil { - t.Fatalf("Unable to initialize queue configuration") + t.Fatalf("Unable to create new notification system. %v", err) } + // Create new policy system. + globalPolicySys = NewPolicySys() + return testServer } @@ -1715,17 +1718,14 @@ func newTestObjectLayer(endpoints EndpointList) (newObject ObjectLayer, err erro return xl.storageDisks } - // Initialize and load bucket policies. - xl.bucketPolicies, err = initBucketPolicies(xl) - if err != nil { - return nil, err - } - - // Initialize a new event notifier. + // Create new notification system. if globalNotificationSys, err = NewNotificationSys(globalServerConfig, endpoints); err != nil { return nil, err } + // Create new policy system. + globalPolicySys = NewPolicySys() + return xl, nil } @@ -1821,7 +1821,7 @@ func prepareTestBackend(instanceType string) (ObjectLayer, []string, error) { // STEP 2: Set the policy to allow the unsigned request, use the policyFunc to obtain the relevant statement and call // the handler again to verify its success. func ExecObjectLayerAPIAnonTest(t *testing.T, obj ObjectLayer, testName, bucketName, objectName, instanceType string, apiRouter http.Handler, - anonReq *http.Request, policyFunc func(string, string) policy.Statement) { + anonReq *http.Request, bucketPolicy *policy.Policy) { anonTestStr := "Anonymous HTTP request test" unknownSignTestStr := "Unknown HTTP signature test" @@ -1863,7 +1863,8 @@ func ExecObjectLayerAPIAnonTest(t *testing.T, obj ObjectLayer, testName, bucketN // HEAD HTTTP request doesn't contain response body. if anonReq.Method != "HEAD" { // read the response body. - actualContent, err := ioutil.ReadAll(rec.Body) + var actualContent []byte + actualContent, err = ioutil.ReadAll(rec.Body) if err != nil { t.Fatal(failTestStr(anonTestStr, fmt.Sprintf("Failed parsing response body: %v", err))) } @@ -1872,13 +1873,13 @@ func ExecObjectLayerAPIAnonTest(t *testing.T, obj ObjectLayer, testName, bucketN t.Fatal(failTestStr(anonTestStr, "error response content differs from expected value")) } } - // Set write only policy on bucket to allow anonymous HTTP request for the operation under test. - // request to go through. - bp := policy.BucketAccessPolicy{ - Version: "1.0", - Statements: []policy.Statement{policyFunc(bucketName, "")}, + + if err := obj.SetBucketPolicy(context.Background(), bucketName, bucketPolicy); err != nil { + t.Fatalf("unexpected error. %v", err) } - obj.SetBucketPolicy(context.Background(), bucketName, bp) + globalPolicySys.Set(bucketName, *bucketPolicy) + defer globalPolicySys.Remove(bucketName) + // now call the handler again with the unsigned/anonymous request, it should be accepted. rec = httptest.NewRecorder() diff --git a/cmd/web-handlers.go b/cmd/web-handlers.go index 0040c4823..b6827a885 100644 --- a/cmd/web-handlers.go +++ b/cmd/web-handlers.go @@ -33,12 +33,13 @@ import ( humanize "github.com/dustin/go-humanize" "github.com/gorilla/mux" "github.com/gorilla/rpc/v2/json2" - "github.com/minio/minio-go/pkg/policy" + miniogopolicy "github.com/minio/minio-go/pkg/policy" "github.com/minio/minio/browser" "github.com/minio/minio/cmd/logger" "github.com/minio/minio/pkg/auth" "github.com/minio/minio/pkg/event" "github.com/minio/minio/pkg/hash" + "github.com/minio/minio/pkg/policy" ) // WebGenericArgs - empty struct for calls that don't accept arguments @@ -155,15 +156,24 @@ func (web *webAPIHandlers) DeleteBucket(r *http.Request, args *RemoveBucketArgs, return toJSONError(errAuthentication) } + ctx := context.Background() + deleteBucket := objectAPI.DeleteBucket if web.CacheAPI() != nil { deleteBucket = web.CacheAPI().DeleteBucket } - err := deleteBucket(context.Background(), args.BucketName) - if err != nil { + + if err := deleteBucket(ctx, args.BucketName); err != nil { return toJSONError(err, args.BucketName) } + globalNotificationSys.RemoveNotification(args.BucketName) + globalPolicySys.Remove(args.BucketName) + for addr, err := range globalNotificationSys.DeleteBucket(args.BucketName) { + logger.GetReqInfo(ctx).AppendTags("remotePeer", addr.Name) + logger.LogIf(ctx, err) + } + reply.UIVersion = browser.UIVersion return nil } @@ -249,26 +259,37 @@ func (web *webAPIHandlers) ListObjects(r *http.Request, args *ListObjectsArgs, r if web.CacheAPI() != nil { listObjects = web.CacheAPI().ListObjects } - prefix := args.Prefix + "test" // To test if GetObject/PutObject with the specified prefix is allowed. - readable := isBucketActionAllowed("s3:GetObject", args.BucketName, prefix, objectAPI) - writable := isBucketActionAllowed("s3:PutObject", args.BucketName, prefix, objectAPI) - authErr := webRequestAuthenticate(r) - switch { - case authErr == errAuthentication: - return toJSONError(authErr) - case authErr == nil: - break - case readable && writable: - reply.Writable = true - break - case readable: - break - case writable: - reply.Writable = true - return nil - default: - return errAuthentication + + // Check if anonymous (non-owner) has access to download objects. + readable := globalPolicySys.IsAllowed(policy.Args{ + Action: policy.GetObjectAction, + BucketName: args.BucketName, + ConditionValues: getConditionValues(r, ""), + IsOwner: false, + ObjectName: args.Prefix + "/", + }) + // Check if anonymous (non-owner) has access to upload objects. + writable := globalPolicySys.IsAllowed(policy.Args{ + Action: policy.PutObjectAction, + BucketName: args.BucketName, + ConditionValues: getConditionValues(r, ""), + IsOwner: false, + ObjectName: args.Prefix + "/", + }) + + if authErr := webRequestAuthenticate(r); authErr != nil { + if authErr == errAuthentication { + return toJSONError(authErr) + } + + // Error out anonymous (non-owner) has no access download or upload objects. + if !readable && !writable { + return errAuthentication + } + + reply.Writable = writable } + lo, err := listObjects(context.Background(), args.BucketName, args.Prefix, args.Marker, slashSeparator, 1000) if err != nil { return &json2.Error{Message: err.Error()} @@ -556,14 +577,23 @@ func (web *webAPIHandlers) Upload(w http.ResponseWriter, r *http.Request) { bucket := vars["bucket"] object := vars["object"] - authErr := webRequestAuthenticate(r) - if authErr == errAuthentication { - writeWebErrorResponse(w, errAuthentication) - return - } - if authErr != nil && !isBucketActionAllowed("s3:PutObject", bucket, object, objectAPI) { - writeWebErrorResponse(w, errAuthentication) - return + if authErr := webRequestAuthenticate(r); authErr != nil { + if authErr == errAuthentication { + writeWebErrorResponse(w, errAuthentication) + return + } + + // Check if anonymous (non-owner) has access to upload objects. + if !globalPolicySys.IsAllowed(policy.Args{ + Action: policy.PutObjectAction, + BucketName: bucket, + ConditionValues: getConditionValues(r, ""), + IsOwner: false, + ObjectName: object, + }) { + writeWebErrorResponse(w, errAuthentication) + return + } } // Require Content-Length to be set in the request @@ -614,9 +644,18 @@ func (web *webAPIHandlers) Download(w http.ResponseWriter, r *http.Request) { object := vars["object"] token := r.URL.Query().Get("token") - if !isAuthTokenValid(token) && !isBucketActionAllowed("s3:GetObject", bucket, object, objectAPI) { - writeWebErrorResponse(w, errAuthentication) - return + if !isAuthTokenValid(token) { + // Check if anonymous (non-owner) has access to download objects. + if !globalPolicySys.IsAllowed(policy.Args{ + Action: policy.GetObjectAction, + BucketName: bucket, + ConditionValues: getConditionValues(r, ""), + IsOwner: false, + ObjectName: object, + }) { + writeWebErrorResponse(w, errAuthentication) + return + } } getObject := objectAPI.GetObject @@ -669,7 +708,14 @@ func (web *webAPIHandlers) DownloadZip(w http.ResponseWriter, r *http.Request) { token := r.URL.Query().Get("token") if !isAuthTokenValid(token) { for _, object := range args.Objects { - if !isBucketActionAllowed("s3:GetObject", args.BucketName, pathJoin(args.Prefix, object), objectAPI) { + // Check if anonymous (non-owner) has access to download objects. + if !globalPolicySys.IsAllowed(policy.Args{ + Action: policy.GetObjectAction, + BucketName: args.BucketName, + ConditionValues: getConditionValues(r, ""), + IsOwner: false, + ObjectName: pathJoin(args.Prefix, object), + }) { writeWebErrorResponse(w, errAuthentication) return } @@ -742,8 +788,8 @@ type GetBucketPolicyArgs struct { // GetBucketPolicyRep - get bucket policy reply. type GetBucketPolicyRep struct { - UIVersion string `json:"uiVersion"` - Policy policy.BucketPolicy `json:"policy"` + UIVersion string `json:"uiVersion"` + Policy miniogopolicy.BucketPolicy `json:"policy"` } // GetBucketPolicy - get bucket policy for the requested prefix. @@ -757,16 +803,21 @@ func (web *webAPIHandlers) GetBucketPolicy(r *http.Request, args *GetBucketPolic return toJSONError(errAuthentication) } - var policyInfo, err = objectAPI.GetBucketPolicy(context.Background(), args.BucketName) + bucketPolicy, err := objectAPI.GetBucketPolicy(context.Background(), args.BucketName) if err != nil { - _, ok := err.(BucketPolicyNotFound) - if !ok { + if _, ok := err.(BucketPolicyNotFound); !ok { return toJSONError(err, args.BucketName) } } + policyInfo, err := PolicyToBucketAccessPolicy(bucketPolicy) + if err != nil { + // This should not happen. + return toJSONError(err, args.BucketName) + } + reply.UIVersion = browser.UIVersion - reply.Policy = policy.GetPolicy(policyInfo.Statements, args.BucketName, args.Prefix) + reply.Policy = miniogopolicy.GetPolicy(policyInfo.Statements, args.BucketName, args.Prefix) return nil } @@ -778,9 +829,9 @@ type ListAllBucketPoliciesArgs struct { // BucketAccessPolicy - Collection of canned bucket policy at a given prefix. type BucketAccessPolicy struct { - Bucket string `json:"bucket"` - Prefix string `json:"prefix"` - Policy policy.BucketPolicy `json:"policy"` + Bucket string `json:"bucket"` + Prefix string `json:"prefix"` + Policy miniogopolicy.BucketPolicy `json:"policy"` } // ListAllBucketPoliciesRep - get all bucket policy reply. @@ -789,7 +840,7 @@ type ListAllBucketPoliciesRep struct { Policies []BucketAccessPolicy `json:"policies"` } -// GetllBucketPolicy - get all bucket policy. +// ListAllBucketPolicies - get all bucket policy. func (web *webAPIHandlers) ListAllBucketPolicies(r *http.Request, args *ListAllBucketPoliciesArgs, reply *ListAllBucketPoliciesRep) error { objectAPI := web.ObjectAPI() if objectAPI == nil { @@ -799,15 +850,22 @@ func (web *webAPIHandlers) ListAllBucketPolicies(r *http.Request, args *ListAllB if !isHTTPRequestValid(r) { return toJSONError(errAuthentication) } - var policyInfo, err = objectAPI.GetBucketPolicy(context.Background(), args.BucketName) + + bucketPolicy, err := objectAPI.GetBucketPolicy(context.Background(), args.BucketName) if err != nil { - _, ok := err.(PolicyNotFound) - if !ok { + if _, ok := err.(BucketPolicyNotFound); !ok { return toJSONError(err, args.BucketName) } } + + policyInfo, err := PolicyToBucketAccessPolicy(bucketPolicy) + if err != nil { + // This should not happen. + return toJSONError(err, args.BucketName) + } + reply.UIVersion = browser.UIVersion - for prefix, policy := range policy.GetPolicies(policyInfo.Statements, args.BucketName, "") { + for prefix, policy := range miniogopolicy.GetPolicies(policyInfo.Statements, args.BucketName, "") { bucketName, objectPrefix := urlPath2BucketObjectName(prefix) objectPrefix = strings.TrimSuffix(objectPrefix, "*") reply.Policies = append(reply.Policies, BucketAccessPolicy{ @@ -816,18 +874,19 @@ func (web *webAPIHandlers) ListAllBucketPolicies(r *http.Request, args *ListAllB Policy: policy, }) } + return nil } -// SetBucketPolicyArgs - set bucket policy args. -type SetBucketPolicyArgs struct { +// SetBucketPolicyWebArgs - set bucket policy args. +type SetBucketPolicyWebArgs struct { BucketName string `json:"bucketName"` Prefix string `json:"prefix"` Policy string `json:"policy"` } // SetBucketPolicy - set bucket policy. -func (web *webAPIHandlers) SetBucketPolicy(r *http.Request, args *SetBucketPolicyArgs, reply *WebGenericRep) error { +func (web *webAPIHandlers) SetBucketPolicy(r *http.Request, args *SetBucketPolicyWebArgs, reply *WebGenericRep) error { objectAPI := web.ObjectAPI() reply.UIVersion = browser.UIVersion @@ -839,52 +898,56 @@ func (web *webAPIHandlers) SetBucketPolicy(r *http.Request, args *SetBucketPolic return toJSONError(errAuthentication) } - bucketP := policy.BucketPolicy(args.Policy) - if !bucketP.IsValidBucketPolicy() { + policyType := miniogopolicy.BucketPolicy(args.Policy) + if !policyType.IsValidBucketPolicy() { return &json2.Error{ Message: "Invalid policy type " + args.Policy, } } - var policyInfo, err = objectAPI.GetBucketPolicy(context.Background(), args.BucketName) + ctx := context.Background() + + bucketPolicy, err := objectAPI.GetBucketPolicy(ctx, args.BucketName) if err != nil { - if _, ok := err.(PolicyNotFound); !ok { + if _, ok := err.(BucketPolicyNotFound); !ok { return toJSONError(err, args.BucketName) } - policyInfo = policy.BucketAccessPolicy{Version: "2012-10-17"} } - policyInfo.Statements = policy.SetPolicy(policyInfo.Statements, bucketP, args.BucketName, args.Prefix) + policyInfo, err := PolicyToBucketAccessPolicy(bucketPolicy) + if err != nil { + // This should not happen. + return toJSONError(err, args.BucketName) + } + + policyInfo.Statements = miniogopolicy.SetPolicy(policyInfo.Statements, policyType, args.BucketName, args.Prefix) if len(policyInfo.Statements) == 0 { - if err = objectAPI.DeleteBucketPolicy(context.Background(), args.BucketName); err != nil { + if err = objectAPI.DeleteBucketPolicy(ctx, args.BucketName); err != nil { return toJSONError(err, args.BucketName) } + + globalPolicySys.Remove(args.BucketName) return nil } - _, err = json.Marshal(policyInfo) + bucketPolicy, err = BucketAccessPolicyToPolicy(policyInfo) if err != nil { - return toJSONError(err) - } - - // Parse check bucket policy. - if s3Error := checkBucketPolicyResources(args.BucketName, policyInfo); s3Error != ErrNone { - apiErr := getAPIError(s3Error) - var err error - if apiErr.Code == "XMinioPolicyNesting" { - err = PolicyNesting{} - } else { - err = fmt.Errorf(apiErr.Description) - } + // This should not happen. return toJSONError(err, args.BucketName) } // Parse validate and save bucket policy. - if err := objectAPI.SetBucketPolicy(context.Background(), args.BucketName, policyInfo); err != nil { + if err := objectAPI.SetBucketPolicy(ctx, args.BucketName, bucketPolicy); err != nil { return toJSONError(err, args.BucketName) } + globalPolicySys.Set(args.BucketName, *bucketPolicy) + for addr, err := range globalNotificationSys.SetBucketPolicy(args.BucketName, bucketPolicy) { + logger.GetReqInfo(ctx).AppendTags("remotePeer", addr.Name) + logger.LogIf(ctx, err) + } + return nil } diff --git a/cmd/web-handlers_test.go b/cmd/web-handlers_test.go index 4be9fa98f..102447a13 100644 --- a/cmd/web-handlers_test.go +++ b/cmd/web-handlers_test.go @@ -37,9 +37,10 @@ import ( jwtgo "github.com/dgrijalva/jwt-go" humanize "github.com/dustin/go-humanize" - "github.com/minio/minio-go/pkg/policy" - "github.com/minio/minio-go/pkg/set" + miniogopolicy "github.com/minio/minio-go/pkg/policy" "github.com/minio/minio/pkg/hash" + "github.com/minio/minio/pkg/policy" + "github.com/minio/minio/pkg/policy/condition" ) // Implement a dummy flush writer. @@ -555,12 +556,22 @@ func testListObjectsWebHandler(obj ObjectLayer, instanceType string, t TestErrHa t.Fatalf("Expected error `%s`", err) } - policy := policy.BucketAccessPolicy{ - Version: "1.0", - Statements: []policy.Statement{getReadOnlyObjectStatement(bucketName, "")}, + bucketPolicy := &policy.Policy{ + Version: policy.DefaultVersion, + Statements: []policy.Statement{policy.NewStatement( + policy.Allow, + policy.NewPrincipal("*"), + policy.NewActionSet(policy.GetObjectAction), + policy.NewResourceSet(policy.NewResource(bucketName, "*")), + condition.NewFunctions(), + )}, } - obj.SetBucketPolicy(context.Background(), bucketName, policy) + if err = obj.SetBucketPolicy(context.Background(), bucketName, bucketPolicy); err != nil { + t.Fatalf("unexpected error. %v", err) + } + globalPolicySys.Set(bucketName, *bucketPolicy) + defer globalPolicySys.Remove(bucketName) // Unauthenticated ListObjects with READ bucket policy should succeed. err, reply = test("") @@ -929,12 +940,22 @@ func testUploadWebHandler(obj ObjectLayer, instanceType string, t TestErrHandler t.Fatalf("Expected the response status to be 403, but instead found `%d`", code) } - bp := policy.BucketAccessPolicy{ - Version: "1.0", - Statements: []policy.Statement{getWriteOnlyObjectStatement(bucketName, "")}, + bucketPolicy := &policy.Policy{ + Version: policy.DefaultVersion, + Statements: []policy.Statement{policy.NewStatement( + policy.Allow, + policy.NewPrincipal("*"), + policy.NewActionSet(policy.PutObjectAction), + policy.NewResourceSet(policy.NewResource(bucketName, "*")), + condition.NewFunctions(), + )}, } - obj.SetBucketPolicy(context.Background(), bucketName, bp) + if err := obj.SetBucketPolicy(context.Background(), bucketName, bucketPolicy); err != nil { + t.Fatalf("unexpected error. %v", err) + } + globalPolicySys.Set(bucketName, *bucketPolicy) + defer globalPolicySys.Remove(bucketName) // Unauthenticated upload with WRITE policy should succeed. code = test("", true) @@ -1036,12 +1057,22 @@ func testDownloadWebHandler(obj ObjectLayer, instanceType string, t TestErrHandl t.Fatalf("Expected the response status to be 403, but instead found `%d`", code) } - bp := policy.BucketAccessPolicy{ - Version: "1.0", - Statements: []policy.Statement{getReadOnlyObjectStatement(bucketName, "")}, + bucketPolicy := &policy.Policy{ + Version: policy.DefaultVersion, + Statements: []policy.Statement{policy.NewStatement( + policy.Allow, + policy.NewPrincipal("*"), + policy.NewActionSet(policy.GetObjectAction), + policy.NewResourceSet(policy.NewResource(bucketName, "*")), + condition.NewFunctions(), + )}, } - obj.SetBucketPolicy(context.Background(), bucketName, bp) + if err := obj.SetBucketPolicy(context.Background(), bucketName, bucketPolicy); err != nil { + t.Fatalf("unexpected error. %v", err) + } + globalPolicySys.Set(bucketName, *bucketPolicy) + defer globalPolicySys.Remove(bucketName) // Unauthenticated download with READ policy should succeed. code, bodyContent = test("") @@ -1260,43 +1291,40 @@ func testWebGetBucketPolicyHandler(obj ObjectLayer, instanceType string, t TestE rec := httptest.NewRecorder() bucketName := getRandomBucketName() - if err := obj.MakeBucketWithLocation(context.Background(), bucketName, ""); err != nil { + if err = obj.MakeBucketWithLocation(context.Background(), bucketName, ""); err != nil { t.Fatal("Unexpected error: ", err) } - policyVal := policy.BucketAccessPolicy{ - Version: "2012-10-17", + bucketPolicy := &policy.Policy{ + Version: policy.DefaultVersion, Statements: []policy.Statement{ - { - Actions: set.CreateStringSet("s3:GetBucketLocation", "s3:ListBucket"), - Effect: "Allow", - Principal: policy.User{ - AWS: set.CreateStringSet("*"), - }, - Resources: set.CreateStringSet(bucketARNPrefix + bucketName), - Sid: "", - }, - { - Actions: set.CreateStringSet("s3:GetObject"), - Effect: "Allow", - Principal: policy.User{ - AWS: set.CreateStringSet("*"), - }, - Resources: set.CreateStringSet(bucketARNPrefix + bucketName + "/*"), - Sid: "", - }, + policy.NewStatement( + policy.Allow, + policy.NewPrincipal("*"), + policy.NewActionSet(policy.GetBucketLocationAction, policy.ListBucketAction), + policy.NewResourceSet(policy.NewResource(bucketName, "")), + condition.NewFunctions(), + ), + policy.NewStatement( + policy.Allow, + policy.NewPrincipal("*"), + policy.NewActionSet(policy.GetObjectAction), + policy.NewResourceSet(policy.NewResource(bucketName, "*")), + condition.NewFunctions(), + ), }, } - if err := writeBucketPolicy(context.Background(), bucketName, obj, policyVal); err != nil { + + if err = savePolicyConfig(obj, bucketName, bucketPolicy); err != nil { t.Fatal("Unexpected error: ", err) } testCases := []struct { bucketName string prefix string - expectedResult policy.BucketPolicy + expectedResult miniogopolicy.BucketPolicy }{ - {bucketName, "", policy.BucketPolicyReadOnly}, + {bucketName, "", miniogopolicy.BucketPolicyReadOnly}, } for i, testCase := range testCases { @@ -1338,57 +1366,63 @@ func testWebListAllBucketPoliciesHandler(obj ObjectLayer, instanceType string, t rec := httptest.NewRecorder() bucketName := getRandomBucketName() - if err := obj.MakeBucketWithLocation(context.Background(), bucketName, ""); err != nil { + if err = obj.MakeBucketWithLocation(context.Background(), bucketName, ""); err != nil { t.Fatal("Unexpected error: ", err) } - stringEqualsConditions := policy.ConditionMap{} - stringEqualsConditions["StringEquals"] = make(policy.ConditionKeyMap) - stringEqualsConditions["StringEquals"].Add("s3:prefix", set.CreateStringSet("hello")) + func1, err := condition.NewStringEqualsFunc(condition.S3Prefix, "hello") + if err != nil { + t.Fatalf("Unable to create string equals condition function. %v", err) + } - policyVal := policy.BucketAccessPolicy{ - Version: "2012-10-17", + bucketPolicy := &policy.Policy{ + Version: policy.DefaultVersion, Statements: []policy.Statement{ - { - Actions: set.CreateStringSet("s3:GetBucketLocation"), - Effect: "Allow", - Principal: policy.User{AWS: set.CreateStringSet("*")}, - Resources: set.CreateStringSet(bucketARNPrefix + bucketName), - Sid: "", - }, - { - Actions: set.CreateStringSet("s3:ListBucket"), - Conditions: stringEqualsConditions, - Effect: "Allow", - Principal: policy.User{AWS: set.CreateStringSet("*")}, - Resources: set.CreateStringSet(bucketARNPrefix + bucketName), - Sid: "", - }, - { - Actions: set.CreateStringSet("s3:ListBucketMultipartUploads"), - Effect: "Allow", - Principal: policy.User{AWS: set.CreateStringSet("*")}, - Resources: set.CreateStringSet(bucketARNPrefix + bucketName), - Sid: "", - }, - { - Actions: set.CreateStringSet("s3:AbortMultipartUpload", "s3:DeleteObject", - "s3:GetObject", "s3:ListMultipartUploadParts", "s3:PutObject"), - Effect: "Allow", - Principal: policy.User{AWS: set.CreateStringSet("*")}, - Resources: set.CreateStringSet(bucketARNPrefix + bucketName + "/hello*"), - Sid: "", - }, + policy.NewStatement( + policy.Allow, + policy.NewPrincipal("*"), + policy.NewActionSet(policy.GetBucketLocationAction), + policy.NewResourceSet(policy.NewResource(bucketName, "")), + condition.NewFunctions(), + ), + policy.NewStatement( + policy.Allow, + policy.NewPrincipal("*"), + policy.NewActionSet(policy.ListBucketAction), + policy.NewResourceSet(policy.NewResource(bucketName, "")), + condition.NewFunctions(func1), + ), + policy.NewStatement( + policy.Allow, + policy.NewPrincipal("*"), + policy.NewActionSet(policy.ListBucketMultipartUploadsAction), + policy.NewResourceSet(policy.NewResource(bucketName, "")), + condition.NewFunctions(), + ), + policy.NewStatement( + policy.Allow, + policy.NewPrincipal("*"), + policy.NewActionSet( + policy.AbortMultipartUploadAction, + policy.DeleteObjectAction, + policy.GetObjectAction, + policy.ListMultipartUploadPartsAction, + policy.PutObjectAction, + ), + policy.NewResourceSet(policy.NewResource(bucketName, "hello*")), + condition.NewFunctions(), + ), }, } - if err := writeBucketPolicy(context.Background(), bucketName, obj, policyVal); err != nil { + + if err = savePolicyConfig(obj, bucketName, bucketPolicy); err != nil { t.Fatal("Unexpected error: ", err) } testCaseResult1 := []BucketAccessPolicy{{ Bucket: bucketName, Prefix: "hello", - Policy: policy.BucketPolicyReadWrite, + Policy: miniogopolicy.BucketPolicyReadWrite, }} testCases := []struct { bucketName string @@ -1460,7 +1494,7 @@ func testWebSetBucketPolicyHandler(obj ObjectLayer, instanceType string, t TestE } for i, testCase := range testCases { - args := &SetBucketPolicyArgs{BucketName: testCase.bucketName, Prefix: testCase.prefix, Policy: testCase.policy} + args := &SetBucketPolicyWebArgs{BucketName: testCase.bucketName, Prefix: testCase.prefix, Policy: testCase.policy} reply := &WebGenericRep{} // Call SetBucketPolicy RPC req, err := newTestWebRPCRequest("Web.SetBucketPolicy", authorization, args) @@ -1719,7 +1753,7 @@ func TestWebObjectLayerFaultyDisks(t *testing.T) { {"ListBuckets", AuthRPCArgs{Version: globalRPCAPIVersion}, ListBucketsRep{}}, {"ListObjects", ListObjectsArgs{BucketName: bucketName, Prefix: ""}, ListObjectsRep{}}, {"GetBucketPolicy", GetBucketPolicyArgs{BucketName: bucketName, Prefix: ""}, GetBucketPolicyRep{}}, - {"SetBucketPolicy", SetBucketPolicyArgs{BucketName: bucketName, Prefix: "", Policy: "none"}, WebGenericRep{}}, + {"SetBucketPolicy", SetBucketPolicyWebArgs{BucketName: bucketName, Prefix: "", Policy: "none"}, WebGenericRep{}}, } for _, rpcCall := range webRPCs { diff --git a/cmd/xl-sets.go b/cmd/xl-sets.go index 8600d53f6..630b59853 100644 --- a/cmd/xl-sets.go +++ b/cmd/xl-sets.go @@ -21,17 +21,16 @@ import ( "fmt" "hash/crc32" "io" - "reflect" "sort" "strings" "sync" "time" - "github.com/minio/minio-go/pkg/policy" "github.com/minio/minio/cmd/logger" "github.com/minio/minio/pkg/bpool" "github.com/minio/minio/pkg/hash" "github.com/minio/minio/pkg/madmin" + "github.com/minio/minio/pkg/policy" "github.com/minio/minio/pkg/sync/errgroup" ) @@ -77,9 +76,6 @@ type xlSets struct { // Distribution algorithm of choice. distributionAlgo string - // Variable represents bucket policies in memory. - bucketPolicies *bucketPolicies - // Pack level listObjects pool management. listPool *treeWalkPool } @@ -263,16 +259,14 @@ func newXLSets(endpoints EndpointList, format *formatXLV3, setCount int, drivesP // Connect disks right away. s.connectDisks() - // Initialize and load bucket policies. - var err error - s.bucketPolicies, err = initBucketPolicies(s) - if err != nil { - return nil, err + // Initialize notification system. + if err := globalNotificationSys.Init(s); err != nil { + return nil, fmt.Errorf("Unable to initialize notification system. %v", err) } - // Initialize notification system. - if err = globalNotificationSys.Init(s); err != nil { - return nil, fmt.Errorf("Unable to initialize event notification. %s", err) + // Initialize policy system. + if err := globalPolicySys.Init(s); err != nil { + return nil, fmt.Errorf("Unable to initialize policy system. %v", err) } // Start the disk monitoring and connect routine. @@ -473,35 +467,18 @@ func (s *xlSets) ListObjectsV2(ctx context.Context, bucket, prefix, continuation } // SetBucketPolicy persist the new policy on the bucket. -func (s *xlSets) SetBucketPolicy(ctx context.Context, bucket string, policy policy.BucketAccessPolicy) error { - return persistAndNotifyBucketPolicyChange(ctx, bucket, false, policy, s) +func (s *xlSets) SetBucketPolicy(ctx context.Context, bucket string, policy *policy.Policy) error { + return savePolicyConfig(s, bucket, policy) } // GetBucketPolicy will return a policy on a bucket -func (s *xlSets) GetBucketPolicy(ctx context.Context, bucket string) (policy.BucketAccessPolicy, error) { - // fetch bucket policy from cache. - bpolicy := s.bucketPolicies.GetBucketPolicy(bucket) - if reflect.DeepEqual(bpolicy, emptyBucketPolicy) { - return ReadBucketPolicy(bucket, s) - } - return bpolicy, nil +func (s *xlSets) GetBucketPolicy(ctx context.Context, bucket string) (*policy.Policy, error) { + return GetPolicyConfig(s, bucket) } // DeleteBucketPolicy deletes all policies on bucket func (s *xlSets) DeleteBucketPolicy(ctx context.Context, bucket string) error { - return persistAndNotifyBucketPolicyChange(ctx, bucket, true, emptyBucketPolicy, s) -} - -// RefreshBucketPolicy refreshes policy cache from disk -func (s *xlSets) RefreshBucketPolicy(ctx context.Context, bucket string) error { - policy, err := ReadBucketPolicy(bucket, s) - if err != nil { - if reflect.DeepEqual(policy, emptyBucketPolicy) { - return s.bucketPolicies.DeleteBucketPolicy(bucket) - } - return err - } - return s.bucketPolicies.SetBucketPolicy(bucket, policy) + return removePolicyConfig(ctx, s, bucket) } // IsNotificationSupported returns whether bucket notification is applicable for this layer. diff --git a/cmd/xl-v1-bucket.go b/cmd/xl-v1-bucket.go index f1321d295..e447d9e94 100644 --- a/cmd/xl-v1-bucket.go +++ b/cmd/xl-v1-bucket.go @@ -18,12 +18,11 @@ package cmd import ( "context" - "reflect" "sort" "sync" - "github.com/minio/minio-go/pkg/policy" "github.com/minio/minio/cmd/logger" + "github.com/minio/minio/pkg/policy" ) // list all errors that can be ignore in a bucket operation. @@ -280,36 +279,18 @@ func (xl xlObjects) DeleteBucket(ctx context.Context, bucket string) error { } // SetBucketPolicy sets policy on bucket -func (xl xlObjects) SetBucketPolicy(ctx context.Context, bucket string, policy policy.BucketAccessPolicy) error { - return persistAndNotifyBucketPolicyChange(ctx, bucket, false, policy, xl) +func (xl xlObjects) SetBucketPolicy(ctx context.Context, bucket string, policy *policy.Policy) error { + return savePolicyConfig(xl, bucket, policy) } // GetBucketPolicy will get policy on bucket -func (xl xlObjects) GetBucketPolicy(ctx context.Context, bucket string) (policy.BucketAccessPolicy, error) { - // fetch bucket policy from cache. - bpolicy := xl.bucketPolicies.GetBucketPolicy(bucket) - if reflect.DeepEqual(bpolicy, emptyBucketPolicy) { - return ReadBucketPolicy(bucket, xl) - } - return bpolicy, nil +func (xl xlObjects) GetBucketPolicy(ctx context.Context, bucket string) (*policy.Policy, error) { + return GetPolicyConfig(xl, bucket) } // DeleteBucketPolicy deletes all policies on bucket func (xl xlObjects) DeleteBucketPolicy(ctx context.Context, bucket string) error { - return persistAndNotifyBucketPolicyChange(ctx, bucket, true, emptyBucketPolicy, xl) -} - -// RefreshBucketPolicy refreshes policy cache from disk -func (xl xlObjects) RefreshBucketPolicy(ctx context.Context, bucket string) error { - policy, err := ReadBucketPolicy(bucket, xl) - - if err != nil { - if reflect.DeepEqual(policy, emptyBucketPolicy) { - return xl.bucketPolicies.DeleteBucketPolicy(bucket) - } - return err - } - return xl.bucketPolicies.SetBucketPolicy(bucket, policy) + return removePolicyConfig(ctx, xl, bucket) } // IsNotificationSupported returns whether bucket notification is applicable for this layer. diff --git a/cmd/xl-v1.go b/cmd/xl-v1.go index 53bb50a28..01cc3c882 100644 --- a/cmd/xl-v1.go +++ b/cmd/xl-v1.go @@ -43,9 +43,6 @@ type xlObjects struct { // Byte pools used for temporary i/o buffers. bp *bpool.BytePoolCap - // Variable represents bucket policies in memory. - bucketPolicies *bucketPolicies - // TODO: Deprecated only kept here for tests, should be removed in future. storageDisks []StorageAPI diff --git a/pkg/policy/action.go b/pkg/policy/action.go new file mode 100644 index 000000000..6141ad4d6 --- /dev/null +++ b/pkg/policy/action.go @@ -0,0 +1,267 @@ +/* + * Minio Cloud Storage, (C) 2018 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package policy + +import ( + "encoding/json" + "fmt" + + "github.com/minio/minio/pkg/policy/condition" +) + +// Action - policy action. +// Refer https://docs.aws.amazon.com/IAM/latest/UserGuide/list_s3.html +// for more information about available actions. +type Action string + +const ( + // AbortMultipartUploadAction - AbortMultipartUpload Rest API action. + AbortMultipartUploadAction Action = "s3:AbortMultipartUpload" + + // CreateBucketAction - CreateBucket Rest API action. + CreateBucketAction = "s3:CreateBucket" + + // DeleteBucketAction - DeleteBucket Rest API action. + DeleteBucketAction = "s3:DeleteBucket" + + // DeleteBucketPolicyAction - DeleteBucketPolicy Rest API action. + DeleteBucketPolicyAction = "s3:DeleteBucketPolicy" + + // DeleteObjectAction - DeleteObject Rest API action. + DeleteObjectAction = "s3:DeleteObject" + + // GetBucketLocationAction - GetBucketLocation Rest API action. + GetBucketLocationAction = "s3:GetBucketLocation" + + // GetBucketNotificationAction - GetBucketNotification Rest API action. + GetBucketNotificationAction = "s3:GetBucketNotification" + + // GetBucketPolicyAction - GetBucketPolicy Rest API action. + GetBucketPolicyAction = "s3:GetBucketPolicy" + + // GetObjectAction - GetObject Rest API action. + GetObjectAction = "s3:GetObject" + + // HeadBucketAction - HeadBucket Rest API action. This action is unused in minio. + HeadBucketAction = "s3:HeadBucket" + + // ListAllMyBucketsAction - ListAllMyBuckets (List buckets) Rest API action. + ListAllMyBucketsAction = "s3:ListAllMyBuckets" + + // ListBucketAction - ListBucket Rest API action. + ListBucketAction = "s3:ListBucket" + + // ListBucketMultipartUploadsAction - ListMultipartUploads Rest API action. + ListBucketMultipartUploadsAction = "s3:ListBucketMultipartUploads" + + // ListenBucketNotificationAction - ListenBucketNotification Rest API action. + // This is Minio extension. + ListenBucketNotificationAction = "s3:ListenBucketNotification" + + // ListMultipartUploadPartsAction - ListParts Rest API action. + ListMultipartUploadPartsAction = "s3:ListMultipartUploadParts" + + // ListObjectsAction - ListObjects Rest API action exactly same behavior as ListBucketAction. + ListObjectsAction = "s3:ListObjects" + + // PutBucketNotificationAction - PutObjectNotification Rest API action. + PutBucketNotificationAction = "s3:PutBucketNotification" + + // PutBucketPolicyAction - PutBucketPolicy Rest API action. + PutBucketPolicyAction = "s3:PutBucketPolicy" + + // PutObjectAction - PutObject Rest API action. + PutObjectAction = "s3:PutObject" +) + +// isObjectAction - returns whether action is object type or not. +func (action Action) isObjectAction() bool { + switch action { + case AbortMultipartUploadAction, DeleteObjectAction, GetObjectAction: + fallthrough + case ListMultipartUploadPartsAction, PutObjectAction: + return true + } + + return false +} + +// IsValid - checks if action is valid or not. +func (action Action) IsValid() bool { + switch action { + case AbortMultipartUploadAction, CreateBucketAction, DeleteBucketAction: + fallthrough + case DeleteBucketPolicyAction, DeleteObjectAction, GetBucketLocationAction: + fallthrough + case GetBucketNotificationAction, GetBucketPolicyAction, GetObjectAction: + fallthrough + case HeadBucketAction, ListAllMyBucketsAction, ListBucketAction: + fallthrough + case ListBucketMultipartUploadsAction, ListenBucketNotificationAction: + fallthrough + case ListMultipartUploadPartsAction, ListObjectsAction, PutBucketNotificationAction: + fallthrough + case PutBucketPolicyAction, PutObjectAction: + return true + } + + return false +} + +// MarshalJSON - encodes Action to JSON data. +func (action Action) MarshalJSON() ([]byte, error) { + if action.IsValid() { + return json.Marshal(string(action)) + } + + return nil, fmt.Errorf("invalid action '%v'", action) +} + +// UnmarshalJSON - decodes JSON data to Action. +func (action *Action) UnmarshalJSON(data []byte) error { + var s string + + if err := json.Unmarshal(data, &s); err != nil { + return err + } + + a := Action(s) + if !a.IsValid() { + return fmt.Errorf("invalid action '%v'", s) + } + + *action = a + + return nil +} + +func parseAction(s string) (Action, error) { + action := Action(s) + + if action.IsValid() { + return action, nil + } + + return action, fmt.Errorf("unsupported action '%v'", s) +} + +// actionConditionKeyMap - holds mapping of supported condition key for an action. +var actionConditionKeyMap = map[Action]condition.KeySet{ + AbortMultipartUploadAction: condition.NewKeySet( + condition.AWSReferer, + condition.AWSSourceIP, + ), + + CreateBucketAction: condition.NewKeySet( + condition.AWSReferer, + condition.AWSSourceIP, + ), + + DeleteBucketPolicyAction: condition.NewKeySet( + condition.AWSReferer, + condition.AWSSourceIP, + ), + + DeleteObjectAction: condition.NewKeySet( + condition.AWSReferer, + condition.AWSSourceIP, + ), + + GetBucketLocationAction: condition.NewKeySet( + condition.AWSReferer, + condition.AWSSourceIP, + ), + + GetBucketNotificationAction: condition.NewKeySet( + condition.AWSReferer, + condition.AWSSourceIP, + ), + + GetBucketPolicyAction: condition.NewKeySet( + condition.AWSReferer, + condition.AWSSourceIP, + ), + + GetObjectAction: condition.NewKeySet( + condition.S3XAmzServerSideEncryption, + condition.S3XAmzServerSideEncryptionAwsKMSKeyID, + condition.S3XAmzStorageClass, + condition.AWSReferer, + condition.AWSSourceIP, + ), + + HeadBucketAction: condition.NewKeySet( + condition.AWSReferer, + condition.AWSSourceIP, + ), + + ListAllMyBucketsAction: condition.NewKeySet( + condition.AWSReferer, + condition.AWSSourceIP, + ), + + ListBucketAction: condition.NewKeySet( + condition.S3Prefix, + condition.S3Delimiter, + condition.S3MaxKeys, + condition.AWSReferer, + condition.AWSSourceIP, + ), + + ListBucketMultipartUploadsAction: condition.NewKeySet( + condition.AWSReferer, + condition.AWSSourceIP, + ), + + ListenBucketNotificationAction: condition.NewKeySet( + condition.AWSReferer, + condition.AWSSourceIP, + ), + + ListMultipartUploadPartsAction: condition.NewKeySet( + condition.AWSReferer, + condition.AWSSourceIP, + ), + + ListObjectsAction: condition.NewKeySet( + condition.S3Prefix, + condition.S3Delimiter, + condition.S3MaxKeys, + condition.AWSReferer, + condition.AWSSourceIP, + ), + + PutBucketNotificationAction: condition.NewKeySet( + condition.AWSReferer, + condition.AWSSourceIP, + ), + + PutBucketPolicyAction: condition.NewKeySet( + condition.AWSReferer, + condition.AWSSourceIP, + ), + + PutObjectAction: condition.NewKeySet( + condition.S3XAmzCopySource, + condition.S3XAmzServerSideEncryption, + condition.S3XAmzServerSideEncryptionAwsKMSKeyID, + condition.S3XAmzMetadataDirective, + condition.S3XAmzStorageClass, + condition.AWSReferer, + condition.AWSSourceIP, + ), +} diff --git a/pkg/policy/action_test.go b/pkg/policy/action_test.go new file mode 100644 index 000000000..53d05a904 --- /dev/null +++ b/pkg/policy/action_test.go @@ -0,0 +1,116 @@ +/* + * Minio Cloud Storage, (C) 2018 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package policy + +import ( + "encoding/json" + "reflect" + "testing" +) + +func TestActionIsObjectAction(t *testing.T) { + testCases := []struct { + action Action + expectedResult bool + }{ + {AbortMultipartUploadAction, true}, + {DeleteObjectAction, true}, + {GetObjectAction, true}, + {ListMultipartUploadPartsAction, true}, + {PutObjectAction, true}, + {CreateBucketAction, false}, + } + + for i, testCase := range testCases { + result := testCase.action.isObjectAction() + + if testCase.expectedResult != result { + t.Fatalf("case %v: expected: %v, got: %v", i+1, testCase.expectedResult, result) + } + } +} + +func TestActionIsValid(t *testing.T) { + testCases := []struct { + action Action + expectedResult bool + }{ + {AbortMultipartUploadAction, true}, + {Action("foo"), false}, + } + + for i, testCase := range testCases { + result := testCase.action.IsValid() + + if testCase.expectedResult != result { + t.Fatalf("case %v: expected: %v, got: %v", i+1, testCase.expectedResult, result) + } + } +} + +func TestActionMarshalJSON(t *testing.T) { + testCases := []struct { + action Action + expectedResult []byte + expectErr bool + }{ + {PutObjectAction, []byte(`"s3:PutObject"`), false}, + {Action("foo"), nil, true}, + } + + for i, testCase := range testCases { + result, err := json.Marshal(testCase.action) + expectErr := (err != nil) + + if testCase.expectErr != expectErr { + t.Fatalf("case %v: error: expected: %v, got: %v", i+1, testCase.expectErr, expectErr) + } + + if !testCase.expectErr { + if !reflect.DeepEqual(result, testCase.expectedResult) { + t.Fatalf("case %v: result: expected: %v, got: %v", i+1, testCase.expectedResult, result) + } + } + } +} + +func TestActionUnmarshalJSON(t *testing.T) { + testCases := []struct { + data []byte + expectedResult Action + expectErr bool + }{ + {[]byte(`"s3:PutObject"`), PutObjectAction, false}, + {[]byte(`"foo"`), Action(""), true}, + } + + for i, testCase := range testCases { + var result Action + err := json.Unmarshal(testCase.data, &result) + expectErr := (err != nil) + + if testCase.expectErr != expectErr { + t.Fatalf("case %v: error: expected: %v, got: %v", i+1, testCase.expectErr, expectErr) + } + + if !testCase.expectErr { + if testCase.expectedResult != result { + t.Fatalf("case %v: result: expected: %v, got: %v", i+1, testCase.expectedResult, result) + } + } + } +} diff --git a/pkg/policy/actionset.go b/pkg/policy/actionset.go new file mode 100644 index 000000000..a0762173b --- /dev/null +++ b/pkg/policy/actionset.go @@ -0,0 +1,114 @@ +/* + * Minio Cloud Storage, (C) 2018 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package policy + +import ( + "encoding/json" + "fmt" + "sort" + + "github.com/minio/minio-go/pkg/set" +) + +// ActionSet - set of actions. +type ActionSet map[Action]struct{} + +// Add - add action to the set. +func (actionSet ActionSet) Add(action Action) { + actionSet[action] = struct{}{} +} + +// Contains - checks given action exists in the action set. +func (actionSet ActionSet) Contains(action Action) bool { + _, found := actionSet[action] + return found +} + +// Intersection - returns actions available in both ActionSet. +func (actionSet ActionSet) Intersection(sset ActionSet) ActionSet { + nset := NewActionSet() + for k := range actionSet { + if _, ok := sset[k]; ok { + nset.Add(k) + } + } + + return nset +} + +// MarshalJSON - encodes ActionSet to JSON data. +func (actionSet ActionSet) MarshalJSON() ([]byte, error) { + if len(actionSet) == 0 { + return nil, fmt.Errorf("empty action set") + } + + return json.Marshal(actionSet.ToSlice()) +} + +func (actionSet ActionSet) String() string { + actions := []string{} + for action := range actionSet { + actions = append(actions, string(action)) + } + sort.Strings(actions) + + return fmt.Sprintf("%v", actions) +} + +// ToSlice - returns slice of actions from the action set. +func (actionSet ActionSet) ToSlice() []Action { + actions := []Action{} + for action := range actionSet { + actions = append(actions, action) + } + + return actions +} + +// UnmarshalJSON - decodes JSON data to ActionSet. +func (actionSet *ActionSet) UnmarshalJSON(data []byte) error { + var sset set.StringSet + if err := json.Unmarshal(data, &sset); err != nil { + return err + } + + if len(sset) == 0 { + return fmt.Errorf("empty action set") + } + + *actionSet = make(ActionSet) + for _, s := range sset.ToSlice() { + action, err := parseAction(s) + if err != nil { + return err + } + + actionSet.Add(action) + } + + return nil +} + +// NewActionSet - creates new action set. +func NewActionSet(actions ...Action) ActionSet { + actionSet := make(ActionSet) + for _, action := range actions { + actionSet.Add(action) + } + + return actionSet +} diff --git a/pkg/policy/actionset_test.go b/pkg/policy/actionset_test.go new file mode 100644 index 000000000..56d80e8aa --- /dev/null +++ b/pkg/policy/actionset_test.go @@ -0,0 +1,158 @@ +/* + * Minio Cloud Storage, (C) 2018 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package policy + +import ( + "encoding/json" + "reflect" + "testing" +) + +func TestActionSetAdd(t *testing.T) { + testCases := []struct { + set ActionSet + action Action + expectedResult ActionSet + }{ + {NewActionSet(), PutObjectAction, NewActionSet(PutObjectAction)}, + {NewActionSet(PutObjectAction), PutObjectAction, NewActionSet(PutObjectAction)}, + } + + for i, testCase := range testCases { + testCase.set.Add(testCase.action) + + if !reflect.DeepEqual(testCase.expectedResult, testCase.set) { + t.Fatalf("case %v: expected: %v, got: %v\n", i+1, testCase.expectedResult, testCase.set) + } + } +} + +func TestActionSetContains(t *testing.T) { + testCases := []struct { + set ActionSet + action Action + expectedResult bool + }{ + {NewActionSet(PutObjectAction), PutObjectAction, true}, + {NewActionSet(PutObjectAction, GetObjectAction), PutObjectAction, true}, + {NewActionSet(PutObjectAction, GetObjectAction), AbortMultipartUploadAction, false}, + } + + for i, testCase := range testCases { + result := testCase.set.Contains(testCase.action) + + if result != testCase.expectedResult { + t.Fatalf("case %v: expected: %v, got: %v\n", i+1, testCase.expectedResult, result) + } + } +} + +func TestActionSetIntersection(t *testing.T) { + testCases := []struct { + set ActionSet + setToIntersect ActionSet + expectedResult ActionSet + }{ + {NewActionSet(), NewActionSet(PutObjectAction), NewActionSet()}, + {NewActionSet(PutObjectAction), NewActionSet(), NewActionSet()}, + {NewActionSet(PutObjectAction), NewActionSet(PutObjectAction, GetObjectAction), NewActionSet(PutObjectAction)}, + } + + for i, testCase := range testCases { + result := testCase.set.Intersection(testCase.setToIntersect) + + if !reflect.DeepEqual(result, testCase.expectedResult) { + t.Fatalf("case %v: expected: %v, got: %v\n", i+1, testCase.expectedResult, testCase.set) + } + } +} + +func TestActionSetMarshalJSON(t *testing.T) { + testCases := []struct { + actionSet ActionSet + expectedResult []byte + expectErr bool + }{ + {NewActionSet(PutObjectAction), []byte(`["s3:PutObject"]`), false}, + {NewActionSet(), nil, true}, + } + + for i, testCase := range testCases { + result, err := json.Marshal(testCase.actionSet) + expectErr := (err != nil) + + if expectErr != testCase.expectErr { + t.Fatalf("case %v: error: expected: %v, got: %v\n", i+1, testCase.expectErr, expectErr) + } + + if !testCase.expectErr { + if !reflect.DeepEqual(result, testCase.expectedResult) { + t.Fatalf("case %v: result: expected: %v, got: %v\n", i+1, string(testCase.expectedResult), string(result)) + } + } + } +} + +func TestActionSetToSlice(t *testing.T) { + testCases := []struct { + actionSet ActionSet + expectedResult []Action + }{ + {NewActionSet(PutObjectAction), []Action{PutObjectAction}}, + {NewActionSet(), []Action{}}, + } + + for i, testCase := range testCases { + result := testCase.actionSet.ToSlice() + + if !reflect.DeepEqual(result, testCase.expectedResult) { + t.Fatalf("case %v: result: expected: %v, got: %v\n", i+1, testCase.expectedResult, result) + } + } +} + +func TestActionSetUnmarshalJSON(t *testing.T) { + testCases := []struct { + data []byte + expectedResult ActionSet + expectErr bool + }{ + {[]byte(`"s3:PutObject"`), NewActionSet(PutObjectAction), false}, + {[]byte(`["s3:PutObject"]`), NewActionSet(PutObjectAction), false}, + {[]byte(`["s3:PutObject", "s3:GetObject"]`), NewActionSet(PutObjectAction, GetObjectAction), false}, + {[]byte(`["s3:PutObject", "s3:GetObject", "s3:PutObject"]`), NewActionSet(PutObjectAction, GetObjectAction), false}, + {[]byte(`[]`), NewActionSet(), true}, // Empty array. + {[]byte(`"foo"`), nil, true}, // Invalid action. + {[]byte(`["s3:PutObject", "foo"]`), nil, true}, // Invalid action. + } + + for i, testCase := range testCases { + result := make(ActionSet) + err := json.Unmarshal(testCase.data, &result) + expectErr := (err != nil) + + if expectErr != testCase.expectErr { + t.Fatalf("case %v: error: expected: %v, got: %v\n", i+1, testCase.expectErr, expectErr) + } + + if !testCase.expectErr { + if !reflect.DeepEqual(result, testCase.expectedResult) { + t.Fatalf("case %v: result: expected: %v, got: %v\n", i+1, testCase.expectedResult, result) + } + } + } +} diff --git a/pkg/policy/condition/func.go b/pkg/policy/condition/func.go new file mode 100644 index 000000000..2be59672f --- /dev/null +++ b/pkg/policy/condition/func.go @@ -0,0 +1,168 @@ +/* + * Minio Cloud Storage, (C) 2018 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package condition + +import ( + "encoding/json" + "fmt" + "sort" +) + +// Function - condition function interface. +type Function interface { + // evaluate() - evaluates this condition function with given values. + evaluate(values map[string][]string) bool + + // key() - returns condition key used in this function. + key() Key + + // name() - returns condition name of this function. + name() name + + // String() - returns string representation of function. + String() string + + // toMap - returns map representation of this function. + toMap() map[Key]ValueSet +} + +// Functions - list of functions. +type Functions []Function + +// Evaluate - evaluates all functions with given values map. Each function is evaluated +// sequencely and next function is called only if current function succeeds. +func (functions Functions) Evaluate(values map[string][]string) bool { + for _, f := range functions { + if !f.evaluate(values) { + return false + } + } + + return true +} + +// Keys - returns list of keys used in all functions. +func (functions Functions) Keys() KeySet { + keySet := NewKeySet() + + for _, f := range functions { + keySet.Add(f.key()) + } + + return keySet +} + +// MarshalJSON - encodes Functions to JSON data. +func (functions Functions) MarshalJSON() ([]byte, error) { + nm := make(map[name]map[Key]ValueSet) + + for _, f := range functions { + nm[f.name()] = f.toMap() + } + + return json.Marshal(nm) +} + +func (functions Functions) String() string { + funcStrings := []string{} + for _, f := range functions { + s := fmt.Sprintf("%v", f) + funcStrings = append(funcStrings, s) + } + sort.Strings(funcStrings) + + return fmt.Sprintf("%v", funcStrings) +} + +// UnmarshalJSON - decodes JSON data to Functions. +func (functions *Functions) UnmarshalJSON(data []byte) error { + // As string kind, int kind then json.Unmarshaler is checked at + // https://github.com/golang/go/blob/master/src/encoding/json/decode.go#L618 + // UnmarshalJSON() is not called for types extending string + // see https://play.golang.org/p/HrSsKksHvrS, better way to do is + // https://play.golang.org/p/y9ElWpBgVAB + // + // Due to this issue, name and Key types cannot be used as map keys below. + nm := make(map[string]map[string]ValueSet) + if err := json.Unmarshal(data, &nm); err != nil { + return err + } + + if len(nm) == 0 { + return fmt.Errorf("condition must not be empty") + } + + funcs := []Function{} + for nameString, args := range nm { + n, err := parseName(nameString) + if err != nil { + return err + } + + for keyString, values := range args { + key, err := parseKey(keyString) + if err != nil { + return err + } + + var f Function + switch n { + case stringEquals: + if f, err = newStringEqualsFunc(key, values); err != nil { + return err + } + case stringNotEquals: + if f, err = newStringNotEqualsFunc(key, values); err != nil { + return err + } + case stringLike: + if f, err = newStringLikeFunc(key, values); err != nil { + return err + } + case stringNotLike: + if f, err = newStringNotLikeFunc(key, values); err != nil { + return err + } + case ipAddress: + if f, err = newIPAddressFunc(key, values); err != nil { + return err + } + case notIPAddress: + if f, err = newNotIPAddressFunc(key, values); err != nil { + return err + } + case null: + if f, err = newNullFunc(key, values); err != nil { + return err + } + default: + return fmt.Errorf("%v is not handled", n) + } + + funcs = append(funcs, f) + } + } + + *functions = funcs + + return nil +} + +// NewFunctions - returns new Functions with given function list. +func NewFunctions(functions ...Function) Functions { + return Functions(functions) +} diff --git a/pkg/policy/condition/func_test.go b/pkg/policy/condition/func_test.go new file mode 100644 index 000000000..56df25b60 --- /dev/null +++ b/pkg/policy/condition/func_test.go @@ -0,0 +1,298 @@ +/* + * Minio Cloud Storage, (C) 2018 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package condition + +import ( + "encoding/json" + "reflect" + "testing" +) + +func TestFunctionsEvaluate(t *testing.T) { + func1, err := newNullFunc(S3XAmzCopySource, NewValueSet(NewBoolValue(true))) + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + + func2, err := newIPAddressFunc(AWSSourceIP, NewValueSet(NewStringValue("192.168.1.0/24"))) + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + + func3, err := newStringEqualsFunc(S3XAmzCopySource, NewValueSet(NewStringValue("mybucket/myobject"))) + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + + func4, err := newStringLikeFunc(S3XAmzCopySource, NewValueSet(NewStringValue("mybucket/myobject*"))) + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + + case1Function := NewFunctions(func1, func2, func3, func4) + + testCases := []struct { + functions Functions + values map[string][]string + expectedResult bool + }{ + {case1Function, map[string][]string{ + "x-amz-copy-source": {"mybucket/myobject"}, + "SourceIp": {"192.168.1.10"}, + }, true}, + {case1Function, map[string][]string{ + "x-amz-copy-source": {"mybucket/myobject"}, + "SourceIp": {"192.168.1.10"}, + "Refer": {"http://example.org/"}, + }, true}, + {case1Function, map[string][]string{"x-amz-copy-source": {"mybucket/myobject"}}, false}, + {case1Function, map[string][]string{"SourceIp": {"192.168.1.10"}}, false}, + {case1Function, map[string][]string{ + "x-amz-copy-source": {"mybucket/yourobject"}, + "SourceIp": {"192.168.1.10"}, + }, false}, + {case1Function, map[string][]string{ + "x-amz-copy-source": {"mybucket/myobject"}, + "SourceIp": {"192.168.2.10"}, + }, false}, + {case1Function, map[string][]string{ + "x-amz-copy-source": {"mybucket/myobject"}, + "Refer": {"http://example.org/"}, + }, false}, + } + + for i, testCase := range testCases { + result := testCase.functions.Evaluate(testCase.values) + + if result != testCase.expectedResult { + t.Fatalf("case %v: expected: %v, got: %v\n", i+1, testCase.expectedResult, result) + } + } +} + +func TestFunctionsKeys(t *testing.T) { + func1, err := newNullFunc(S3XAmzCopySource, NewValueSet(NewBoolValue(true))) + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + + func2, err := newIPAddressFunc(AWSSourceIP, NewValueSet(NewStringValue("192.168.1.0/24"))) + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + + func3, err := newStringEqualsFunc(S3XAmzCopySource, NewValueSet(NewStringValue("mybucket/myobject"))) + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + + func4, err := newStringLikeFunc(S3XAmzCopySource, NewValueSet(NewStringValue("mybucket/myobject*"))) + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + + testCases := []struct { + functions Functions + expectedResult KeySet + }{ + {NewFunctions(func1, func2, func3, func4), NewKeySet(S3XAmzCopySource, AWSSourceIP)}, + } + + for i, testCase := range testCases { + result := testCase.functions.Keys() + + if !reflect.DeepEqual(result, testCase.expectedResult) { + t.Fatalf("case %v: expected: %v, got: %v\n", i+1, testCase.expectedResult, result) + } + } +} + +func TestFunctionsMarshalJSON(t *testing.T) { + func1, err := newStringLikeFunc(S3XAmzMetadataDirective, NewValueSet(NewStringValue("REPL*"))) + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + + func2, err := newStringEqualsFunc(S3XAmzCopySource, NewValueSet(NewStringValue("mybucket/myobject"))) + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + + func3, err := newStringNotEqualsFunc(S3XAmzServerSideEncryption, NewValueSet(NewStringValue("AES256"))) + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + + func4, err := newNotIPAddressFunc(AWSSourceIP, + NewValueSet(NewStringValue("10.1.10.0/24"))) + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + + func5, err := newStringNotLikeFunc(S3XAmzStorageClass, NewValueSet(NewStringValue("STANDARD"))) + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + + func6, err := newNullFunc(S3XAmzServerSideEncryptionAwsKMSKeyID, NewValueSet(NewBoolValue(true))) + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + + func7, err := newIPAddressFunc(AWSSourceIP, + NewValueSet(NewStringValue("192.168.1.0/24"))) + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + + case1Result := []byte(`{"IpAddress":{"aws:SourceIp":["192.168.1.0/24"]},"NotIpAddress":{"aws:SourceIp":["10.1.10.0/24"]},"Null":{"s3:x-amz-server-side-encryption-aws-kms-key-id":[true]},"StringEquals":{"s3:x-amz-copy-source":["mybucket/myobject"]},"StringLike":{"s3:x-amz-metadata-directive":["REPL*"]},"StringNotEquals":{"s3:x-amz-server-side-encryption":["AES256"]},"StringNotLike":{"s3:x-amz-storage-class":["STANDARD"]}}`) + + case2Result := []byte(`{"Null":{"s3:x-amz-server-side-encryption-aws-kms-key-id":[true]}}`) + + testCases := []struct { + functions Functions + expectedResult []byte + expectErr bool + }{ + {NewFunctions(func1, func2, func3, func4, func5, func6, func7), case1Result, false}, + {NewFunctions(func6), case2Result, false}, + {NewFunctions(), []byte(`{}`), false}, + {nil, []byte(`{}`), false}, + } + + for i, testCase := range testCases { + result, err := json.Marshal(testCase.functions) + expectErr := (err != nil) + + if testCase.expectErr != expectErr { + t.Fatalf("case %v: error: expected: %v, got: %v", i+1, testCase.expectErr, expectErr) + } + + if !testCase.expectErr { + if !reflect.DeepEqual(result, testCase.expectedResult) { + t.Fatalf("case %v: result: expected: %v, got: %v", i+1, string(testCase.expectedResult), string(result)) + } + } + } +} + +func TestFunctionsUnmarshalJSON(t *testing.T) { + case1Data := []byte(`{ + "StringLike": { + "s3:x-amz-metadata-directive": "REPL*" + }, + "StringEquals": { + "s3:x-amz-copy-source": "mybucket/myobject" + }, + "StringNotEquals": { + "s3:x-amz-server-side-encryption": "AES256" + }, + "NotIpAddress": { + "aws:SourceIp": [ + "10.1.10.0/24", + "10.10.1.0/24" + ] + }, + "StringNotLike": { + "s3:x-amz-storage-class": "STANDARD" + }, + "Null": { + "s3:x-amz-server-side-encryption-aws-kms-key-id": true + }, + "IpAddress": { + "aws:SourceIp": [ + "192.168.1.0/24", + "192.168.2.0/24" + ] + } +}`) + func1, err := newStringLikeFunc(S3XAmzMetadataDirective, NewValueSet(NewStringValue("REPL*"))) + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + + func2, err := newStringEqualsFunc(S3XAmzCopySource, NewValueSet(NewStringValue("mybucket/myobject"))) + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + + func3, err := newStringNotEqualsFunc(S3XAmzServerSideEncryption, NewValueSet(NewStringValue("AES256"))) + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + + func4, err := newNotIPAddressFunc(AWSSourceIP, + NewValueSet(NewStringValue("10.1.10.0/24"), NewStringValue("10.10.1.0/24"))) + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + + func5, err := newStringNotLikeFunc(S3XAmzStorageClass, NewValueSet(NewStringValue("STANDARD"))) + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + + func6, err := newNullFunc(S3XAmzServerSideEncryptionAwsKMSKeyID, NewValueSet(NewBoolValue(true))) + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + + func7, err := newIPAddressFunc(AWSSourceIP, + NewValueSet(NewStringValue("192.168.1.0/24"), NewStringValue("192.168.2.0/24"))) + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + + case2Data := []byte(`{ + "Null": { + "s3:x-amz-server-side-encryption-aws-kms-key-id": true + }, + "Null": { + "s3:x-amz-server-side-encryption-aws-kms-key-id": "true" + } +}`) + + case3Data := []byte(`{}`) + + testCases := []struct { + data []byte + expectedResult Functions + expectErr bool + }{ + {case1Data, NewFunctions(func1, func2, func3, func4, func5, func6, func7), false}, + // duplicate condition error. + {case2Data, NewFunctions(func6), false}, + // empty condition error. + {case3Data, nil, true}, + } + + for i, testCase := range testCases { + result := new(Functions) + err := json.Unmarshal(testCase.data, result) + expectErr := (err != nil) + + if testCase.expectErr != expectErr { + t.Fatalf("case %v: error: expected: %v, got: %v", i+1, testCase.expectErr, expectErr) + } + + if !testCase.expectErr { + if (*result).String() != testCase.expectedResult.String() { + t.Fatalf("case %v: result: expected: %v, got: %v", i+1, testCase.expectedResult, *result) + } + } + } +} diff --git a/pkg/policy/condition/ipaddressfunc.go b/pkg/policy/condition/ipaddressfunc.go new file mode 100644 index 000000000..2d108bfa4 --- /dev/null +++ b/pkg/policy/condition/ipaddressfunc.go @@ -0,0 +1,180 @@ +/* + * Minio Cloud Storage, (C) 2018 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package condition + +import ( + "fmt" + "net" + "sort" +) + +func toIPAddressFuncString(n name, key Key, values []*net.IPNet) string { + valueStrings := []string{} + for _, value := range values { + valueStrings = append(valueStrings, value.String()) + } + sort.Strings(valueStrings) + + return fmt.Sprintf("%v:%v:%v", n, key, valueStrings) +} + +// ipAddressFunc - IP address function. It checks whether value by Key in given +// values is in IP network. Here Key must be AWSSourceIP. +// For example, +// - if values = [192.168.1.0/24], at evaluate() it returns whether IP address +// in value map for AWSSourceIP falls in the network 192.168.1.10/24. +type ipAddressFunc struct { + k Key + values []*net.IPNet +} + +// evaluate() - evaluates to check whether IP address in values map for AWSSourceIP +// falls in one of network or not. +func (f ipAddressFunc) evaluate(values map[string][]string) bool { + IPs := []net.IP{} + for _, s := range values[f.k.Name()] { + IP := net.ParseIP(s) + if IP == nil { + panic(fmt.Errorf("invalid IP address '%v'", s)) + } + + IPs = append(IPs, IP) + } + + for _, IP := range IPs { + for _, IPNet := range f.values { + if IPNet.Contains(IP) { + return true + } + } + } + + return false +} + +// key() - returns condition key which is used by this condition function. +// Key is always AWSSourceIP. +func (f ipAddressFunc) key() Key { + return f.k +} + +// name() - returns "IpAddress" condition name. +func (f ipAddressFunc) name() name { + return ipAddress +} + +func (f ipAddressFunc) String() string { + return toIPAddressFuncString(ipAddress, f.k, f.values) +} + +// toMap - returns map representation of this function. +func (f ipAddressFunc) toMap() map[Key]ValueSet { + if !f.k.IsValid() { + return nil + } + + values := NewValueSet() + for _, value := range f.values { + values.Add(NewStringValue(value.String())) + } + + return map[Key]ValueSet{ + f.k: values, + } +} + +// notIPAddressFunc - Not IP address function. It checks whether value by Key in given +// values is NOT in IP network. Here Key must be AWSSourceIP. +// For example, +// - if values = [192.168.1.0/24], at evaluate() it returns whether IP address +// in value map for AWSSourceIP does not fall in the network 192.168.1.10/24. +type notIPAddressFunc struct { + ipAddressFunc +} + +// evaluate() - evaluates to check whether IP address in values map for AWSSourceIP +// does not fall in one of network. +func (f notIPAddressFunc) evaluate(values map[string][]string) bool { + return !f.ipAddressFunc.evaluate(values) +} + +// name() - returns "NotIpAddress" condition name. +func (f notIPAddressFunc) name() name { + return notIPAddress +} + +func (f notIPAddressFunc) String() string { + return toIPAddressFuncString(notIPAddress, f.ipAddressFunc.k, f.ipAddressFunc.values) +} + +func valuesToIPNets(n name, values ValueSet) ([]*net.IPNet, error) { + IPNets := []*net.IPNet{} + for v := range values { + s, err := v.GetString() + if err != nil { + return nil, fmt.Errorf("value %v must be string representation of CIDR for %v condition", v, n) + } + + var IPNet *net.IPNet + _, IPNet, err = net.ParseCIDR(s) + if err != nil { + return nil, fmt.Errorf("value %v must be CIDR string for %v condition", s, n) + } + + IPNets = append(IPNets, IPNet) + } + + return IPNets, nil +} + +// newIPAddressFunc - returns new IP address function. +func newIPAddressFunc(key Key, values ValueSet) (Function, error) { + IPNets, err := valuesToIPNets(ipAddress, values) + if err != nil { + return nil, err + } + + return NewIPAddressFunc(key, IPNets...) +} + +// NewIPAddressFunc - returns new IP address function. +func NewIPAddressFunc(key Key, IPNets ...*net.IPNet) (Function, error) { + if key != AWSSourceIP { + return nil, fmt.Errorf("only %v key is allowed for %v condition", AWSSourceIP, ipAddress) + } + + return &ipAddressFunc{key, IPNets}, nil +} + +// newNotIPAddressFunc - returns new Not IP address function. +func newNotIPAddressFunc(key Key, values ValueSet) (Function, error) { + IPNets, err := valuesToIPNets(notIPAddress, values) + if err != nil { + return nil, err + } + + return NewNotIPAddressFunc(key, IPNets...) +} + +// NewNotIPAddressFunc - returns new Not IP address function. +func NewNotIPAddressFunc(key Key, IPNets ...*net.IPNet) (Function, error) { + if key != AWSSourceIP { + return nil, fmt.Errorf("only %v key is allowed for %v condition", AWSSourceIP, notIPAddress) + } + + return ¬IPAddressFunc{ipAddressFunc{key, IPNets}}, nil +} diff --git a/pkg/policy/condition/ipaddressfunc_test.go b/pkg/policy/condition/ipaddressfunc_test.go new file mode 100644 index 000000000..b2529bf9f --- /dev/null +++ b/pkg/policy/condition/ipaddressfunc_test.go @@ -0,0 +1,278 @@ +/* + * Minio Cloud Storage, (C) 2018 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package condition + +import ( + "reflect" + "testing" +) + +func TestIPAddressFuncEvaluate(t *testing.T) { + case1Function, err := newIPAddressFunc(AWSSourceIP, NewValueSet(NewStringValue("192.168.1.0/24"))) + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + + testCases := []struct { + function Function + values map[string][]string + expectedResult bool + }{ + {case1Function, map[string][]string{"SourceIp": {"192.168.1.10"}}, true}, + {case1Function, map[string][]string{"SourceIp": {"192.168.2.10"}}, false}, + {case1Function, map[string][]string{}, false}, + {case1Function, map[string][]string{"delimiter": {"/"}}, false}, + } + + for i, testCase := range testCases { + result := testCase.function.evaluate(testCase.values) + + if result != testCase.expectedResult { + t.Fatalf("case %v: expected: %v, got: %v\n", i+1, testCase.expectedResult, result) + } + } +} + +func TestIPAddressFuncKey(t *testing.T) { + case1Function, err := newIPAddressFunc(AWSSourceIP, NewValueSet(NewStringValue("192.168.1.0/24"))) + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + + testCases := []struct { + function Function + expectedResult Key + }{ + {case1Function, AWSSourceIP}, + } + + for i, testCase := range testCases { + result := testCase.function.key() + + if result != testCase.expectedResult { + t.Fatalf("case %v: expected: %v, got: %v\n", i+1, testCase.expectedResult, result) + } + } +} + +func TestIPAddressFuncToMap(t *testing.T) { + case1Function, err := newIPAddressFunc(AWSSourceIP, NewValueSet(NewStringValue("192.168.1.0/24"))) + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + + case2Function, err := newIPAddressFunc(AWSSourceIP, NewValueSet(NewStringValue("192.168.1.0/24"), NewStringValue("10.1.10.1/32"))) + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + + case1Result := map[Key]ValueSet{ + AWSSourceIP: NewValueSet(NewStringValue("192.168.1.0/24")), + } + + case2Result := map[Key]ValueSet{ + AWSSourceIP: NewValueSet(NewStringValue("192.168.1.0/24"), NewStringValue("10.1.10.1/32")), + } + + testCases := []struct { + f Function + expectedResult map[Key]ValueSet + }{ + {case1Function, case1Result}, + {case2Function, case2Result}, + {&ipAddressFunc{}, nil}, + } + + for i, testCase := range testCases { + result := testCase.f.toMap() + + if !reflect.DeepEqual(result, testCase.expectedResult) { + t.Fatalf("case %v: result: expected: %v, got: %v\n", i+1, testCase.expectedResult, result) + } + } +} + +func TestNotIPAddressFuncEvaluate(t *testing.T) { + case1Function, err := newNotIPAddressFunc(AWSSourceIP, NewValueSet(NewStringValue("192.168.1.0/24"))) + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + + testCases := []struct { + function Function + values map[string][]string + expectedResult bool + }{ + {case1Function, map[string][]string{"SourceIp": {"192.168.2.10"}}, true}, + {case1Function, map[string][]string{}, true}, + {case1Function, map[string][]string{"delimiter": {"/"}}, true}, + {case1Function, map[string][]string{"SourceIp": {"192.168.1.10"}}, false}, + } + + for i, testCase := range testCases { + result := testCase.function.evaluate(testCase.values) + + if result != testCase.expectedResult { + t.Fatalf("case %v: expected: %v, got: %v\n", i+1, testCase.expectedResult, result) + } + } +} + +func TestNotIPAddressFuncKey(t *testing.T) { + case1Function, err := newNotIPAddressFunc(AWSSourceIP, NewValueSet(NewStringValue("192.168.1.0/24"))) + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + + testCases := []struct { + function Function + expectedResult Key + }{ + {case1Function, AWSSourceIP}, + } + + for i, testCase := range testCases { + result := testCase.function.key() + + if result != testCase.expectedResult { + t.Fatalf("case %v: expected: %v, got: %v\n", i+1, testCase.expectedResult, result) + } + } +} + +func TestNotIPAddressFuncToMap(t *testing.T) { + case1Function, err := newNotIPAddressFunc(AWSSourceIP, NewValueSet(NewStringValue("192.168.1.0/24"))) + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + + case2Function, err := newNotIPAddressFunc(AWSSourceIP, NewValueSet(NewStringValue("192.168.1.0/24"), NewStringValue("10.1.10.1/32"))) + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + + case1Result := map[Key]ValueSet{ + AWSSourceIP: NewValueSet(NewStringValue("192.168.1.0/24")), + } + + case2Result := map[Key]ValueSet{ + AWSSourceIP: NewValueSet(NewStringValue("192.168.1.0/24"), NewStringValue("10.1.10.1/32")), + } + + testCases := []struct { + f Function + expectedResult map[Key]ValueSet + }{ + {case1Function, case1Result}, + {case2Function, case2Result}, + {¬IPAddressFunc{}, nil}, + } + + for i, testCase := range testCases { + result := testCase.f.toMap() + + if !reflect.DeepEqual(result, testCase.expectedResult) { + t.Fatalf("case %v: result: expected: %v, got: %v\n", i+1, testCase.expectedResult, result) + } + } +} + +func TestNewIPAddressFunc(t *testing.T) { + case1Function, err := newIPAddressFunc(AWSSourceIP, NewValueSet(NewStringValue("192.168.1.0/24"))) + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + + case2Function, err := newIPAddressFunc(AWSSourceIP, NewValueSet(NewStringValue("192.168.1.0/24"), NewStringValue("10.1.10.1/32"))) + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + + testCases := []struct { + key Key + values ValueSet + expectedResult Function + expectErr bool + }{ + {AWSSourceIP, NewValueSet(NewStringValue("192.168.1.0/24")), case1Function, false}, + {AWSSourceIP, NewValueSet(NewStringValue("192.168.1.0/24"), NewStringValue("10.1.10.1/32")), case2Function, false}, + // Unsupported key error. + {S3Prefix, NewValueSet(NewStringValue("192.168.1.0/24")), nil, true}, + // Invalid value error. + {AWSSourceIP, NewValueSet(NewStringValue("node1.example.org")), nil, true}, + // Invalid CIDR format error. + {AWSSourceIP, NewValueSet(NewStringValue("192.168.1.0.0/24")), nil, true}, + } + + for i, testCase := range testCases { + result, err := newIPAddressFunc(testCase.key, testCase.values) + expectErr := (err != nil) + + if expectErr != testCase.expectErr { + t.Fatalf("case %v: error: expected: %v, got: %v\n", i+1, testCase.expectErr, expectErr) + } + + if !testCase.expectErr { + if result.String() != testCase.expectedResult.String() { + t.Fatalf("case %v: result: expected: %v, got: %v\n", i+1, testCase.expectedResult, result) + } + } + } +} + +func TestNewNotIPAddressFunc(t *testing.T) { + case1Function, err := newNotIPAddressFunc(AWSSourceIP, NewValueSet(NewStringValue("192.168.1.0/24"))) + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + + case2Function, err := newNotIPAddressFunc(AWSSourceIP, NewValueSet(NewStringValue("192.168.1.0/24"), NewStringValue("10.1.10.1/32"))) + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + + testCases := []struct { + key Key + values ValueSet + expectedResult Function + expectErr bool + }{ + {AWSSourceIP, NewValueSet(NewStringValue("192.168.1.0/24")), case1Function, false}, + {AWSSourceIP, NewValueSet(NewStringValue("192.168.1.0/24"), NewStringValue("10.1.10.1/32")), case2Function, false}, + // Unsupported key error. + {S3Prefix, NewValueSet(NewStringValue("192.168.1.0/24")), nil, true}, + // Invalid value error. + {AWSSourceIP, NewValueSet(NewStringValue("node1.example.org")), nil, true}, + // Invalid CIDR format error. + {AWSSourceIP, NewValueSet(NewStringValue("192.168.1.0.0/24")), nil, true}, + } + + for i, testCase := range testCases { + result, err := newNotIPAddressFunc(testCase.key, testCase.values) + expectErr := (err != nil) + + if expectErr != testCase.expectErr { + t.Fatalf("case %v: error: expected: %v, got: %v\n", i+1, testCase.expectErr, expectErr) + } + + if !testCase.expectErr { + if result.String() != testCase.expectedResult.String() { + t.Fatalf("case %v: result: expected: %v, got: %v\n", i+1, testCase.expectedResult, result) + } + } + } +} diff --git a/pkg/policy/condition/key.go b/pkg/policy/condition/key.go new file mode 100644 index 000000000..0762d0365 --- /dev/null +++ b/pkg/policy/condition/key.go @@ -0,0 +1,186 @@ +/* + * Minio Cloud Storage, (C) 2018 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package condition + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Key - conditional key which is used to fetch values for any condition. +// Refer https://docs.aws.amazon.com/IAM/latest/UserGuide/list_s3.html +// for more information about available condition keys. +type Key string + +const ( + // S3XAmzCopySource - key representing x-amz-copy-source HTTP header applicable to PutObject API only. + S3XAmzCopySource Key = "s3:x-amz-copy-source" + + // S3XAmzServerSideEncryption - key representing x-amz-server-side-encryption HTTP header applicable + // to PutObject API only. + S3XAmzServerSideEncryption = "s3:x-amz-server-side-encryption" + + // S3XAmzServerSideEncryptionAwsKMSKeyID - key representing x-amz-server-side-encryption-aws-kms-key-id + // HTTP header applicable to PutObject API only. + S3XAmzServerSideEncryptionAwsKMSKeyID = "s3:x-amz-server-side-encryption-aws-kms-key-id" + + // S3XAmzServerSideEncryptionCustomerAlgorithm - key representing + // x-amz-server-side-encryption-customer-algorithm HTTP header applicable to PutObject API only. + S3XAmzServerSideEncryptionCustomerAlgorithm = "s3:x-amz-server-side-encryption-customer-algorithm" + + // S3XAmzMetadataDirective - key representing x-amz-metadata-directive HTTP header applicable to + // PutObject API only. + S3XAmzMetadataDirective = "s3:x-amz-metadata-directive" + + // S3XAmzStorageClass - key representing x-amz-storage-class HTTP header applicable to PutObject API + // only. + S3XAmzStorageClass = "s3:x-amz-storage-class" + + // S3LocationConstraint - key representing LocationConstraint XML tag of CreateBucket API only. + S3LocationConstraint = "s3:LocationConstraint" + + // S3Prefix - key representing prefix query parameter of ListBucket API only. + S3Prefix = "s3:prefix" + + // S3Delimiter - key representing delimiter query parameter of ListBucket API only. + S3Delimiter = "s3:delimiter" + + // S3MaxKeys - key representing max-keys query parameter of ListBucket API only. + S3MaxKeys = "s3:max-keys" + + // AWSReferer - key representing Referer header of any API. + AWSReferer = "aws:Referer" + + // AWSSourceIP - key representing client's IP address (not intermittent proxies) of any API. + AWSSourceIP = "aws:SourceIp" +) + +// IsValid - checks if key is valid or not. +func (key Key) IsValid() bool { + switch key { + case S3XAmzCopySource, S3XAmzServerSideEncryption, S3XAmzServerSideEncryptionAwsKMSKeyID: + fallthrough + case S3XAmzMetadataDirective, S3XAmzStorageClass, S3LocationConstraint, S3Prefix: + fallthrough + case S3Delimiter, S3MaxKeys, AWSReferer, AWSSourceIP: + return true + } + + return false +} + +// MarshalJSON - encodes Key to JSON data. +func (key Key) MarshalJSON() ([]byte, error) { + if !key.IsValid() { + return nil, fmt.Errorf("unknown key %v", key) + } + + return json.Marshal(string(key)) +} + +// Name - returns key name which is stripped value of prefixes "aws:" and "s3:" +func (key Key) Name() string { + keyString := string(key) + + if strings.HasPrefix(keyString, "aws:") { + return strings.TrimPrefix(keyString, "aws:") + } + + return strings.TrimPrefix(keyString, "s3:") +} + +// UnmarshalJSON - decodes JSON data to Key. +func (key *Key) UnmarshalJSON(data []byte) error { + var s string + if err := json.Unmarshal(data, &s); err != nil { + return err + } + + parsedKey, err := parseKey(s) + if err != nil { + return err + } + + *key = parsedKey + return nil +} + +func parseKey(s string) (Key, error) { + key := Key(s) + + if key.IsValid() { + return key, nil + } + + return key, fmt.Errorf("invalid condition key '%v'", s) +} + +// KeySet - set representation of slice of keys. +type KeySet map[Key]struct{} + +// Add - add a key to key set. +func (set KeySet) Add(key Key) { + set[key] = struct{}{} +} + +// Difference - returns a key set contains difference of two keys. +// Example: +// keySet1 := ["one", "two", "three"] +// keySet2 := ["two", "four", "three"] +// keySet1.Difference(keySet2) == ["one"] +func (set KeySet) Difference(sset KeySet) KeySet { + nset := make(KeySet) + + for k := range set { + if _, ok := sset[k]; !ok { + nset.Add(k) + } + } + + return nset +} + +// IsEmpty - returns whether key set is empty or not. +func (set KeySet) IsEmpty() bool { + return len(set) == 0 +} + +func (set KeySet) String() string { + return fmt.Sprintf("%v", set.ToSlice()) +} + +// ToSlice - returns slice of keys. +func (set KeySet) ToSlice() []Key { + keys := []Key{} + + for key := range set { + keys = append(keys, key) + } + + return keys +} + +// NewKeySet - returns new KeySet contains given keys. +func NewKeySet(keys ...Key) KeySet { + set := make(KeySet) + for _, key := range keys { + set.Add(key) + } + + return set +} diff --git a/pkg/policy/condition/key_test.go b/pkg/policy/condition/key_test.go new file mode 100644 index 000000000..b1ba03413 --- /dev/null +++ b/pkg/policy/condition/key_test.go @@ -0,0 +1,214 @@ +/* + * Minio Cloud Storage, (C) 2018 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package condition + +import ( + "encoding/json" + "reflect" + "testing" +) + +func TestKeyIsValid(t *testing.T) { + testCases := []struct { + key Key + expectedResult bool + }{ + {S3XAmzCopySource, true}, + {S3XAmzServerSideEncryption, true}, + {S3XAmzServerSideEncryptionAwsKMSKeyID, true}, + {S3XAmzMetadataDirective, true}, + {S3XAmzStorageClass, true}, + {S3LocationConstraint, true}, + {S3Prefix, true}, + {S3Delimiter, true}, + {S3MaxKeys, true}, + {AWSReferer, true}, + {AWSSourceIP, true}, + {Key("foo"), false}, + } + + for i, testCase := range testCases { + result := testCase.key.IsValid() + + if testCase.expectedResult != result { + t.Fatalf("case %v: expected: %v, got: %v\n", i+1, testCase.expectedResult, result) + } + } +} + +func TestKeyMarshalJSON(t *testing.T) { + testCases := []struct { + key Key + expectedResult []byte + expectErr bool + }{ + {S3XAmzCopySource, []byte(`"s3:x-amz-copy-source"`), false}, + {Key("foo"), nil, true}, + } + + for i, testCase := range testCases { + result, err := json.Marshal(testCase.key) + expectErr := (err != nil) + + if testCase.expectErr != expectErr { + t.Fatalf("case %v: error: expected: %v, got: %v\n", i+1, testCase.expectErr, expectErr) + } + + if !testCase.expectErr { + if !reflect.DeepEqual(result, testCase.expectedResult) { + t.Fatalf("case %v: key: expected: %v, got: %v\n", i+1, string(testCase.expectedResult), string(result)) + } + } + } +} + +func TestKeyName(t *testing.T) { + testCases := []struct { + key Key + expectedResult string + }{ + {S3XAmzCopySource, "x-amz-copy-source"}, + {AWSReferer, "Referer"}, + } + + for i, testCase := range testCases { + result := testCase.key.Name() + + if testCase.expectedResult != result { + t.Fatalf("case %v: expected: %v, got: %v\n", i+1, testCase.expectedResult, result) + } + } +} + +func TestKeyUnmarshalJSON(t *testing.T) { + testCases := []struct { + data []byte + expectedKey Key + expectErr bool + }{ + {[]byte(`"s3:x-amz-copy-source"`), S3XAmzCopySource, false}, + {[]byte(`"foo"`), Key(""), true}, + } + + for i, testCase := range testCases { + var key Key + err := json.Unmarshal(testCase.data, &key) + expectErr := (err != nil) + + if testCase.expectErr != expectErr { + t.Fatalf("case %v: error: expected: %v, got: %v\n", i+1, testCase.expectErr, expectErr) + } + + if !testCase.expectErr { + if testCase.expectedKey != key { + t.Fatalf("case %v: key: expected: %v, got: %v\n", i+1, testCase.expectedKey, key) + } + } + } +} + +func TestKeySetAdd(t *testing.T) { + testCases := []struct { + set KeySet + key Key + expectedResult KeySet + }{ + {NewKeySet(), S3XAmzCopySource, NewKeySet(S3XAmzCopySource)}, + {NewKeySet(S3XAmzCopySource), S3XAmzCopySource, NewKeySet(S3XAmzCopySource)}, + } + + for i, testCase := range testCases { + testCase.set.Add(testCase.key) + + if !reflect.DeepEqual(testCase.expectedResult, testCase.set) { + t.Fatalf("case %v: expected: %v, got: %v\n", i+1, testCase.expectedResult, testCase.set) + } + } +} + +func TestKeySetDifference(t *testing.T) { + testCases := []struct { + set KeySet + setToDiff KeySet + expectedResult KeySet + }{ + {NewKeySet(), NewKeySet(S3XAmzCopySource), NewKeySet()}, + {NewKeySet(S3Prefix, S3Delimiter, S3MaxKeys), NewKeySet(S3Delimiter, S3MaxKeys), NewKeySet(S3Prefix)}, + } + + for i, testCase := range testCases { + result := testCase.set.Difference(testCase.setToDiff) + + if !reflect.DeepEqual(testCase.expectedResult, result) { + t.Fatalf("case %v: expected: %v, got: %v\n", i+1, testCase.expectedResult, result) + } + } +} + +func TestKeySetIsEmpty(t *testing.T) { + testCases := []struct { + set KeySet + expectedResult bool + }{ + {NewKeySet(), true}, + {NewKeySet(S3Delimiter), false}, + } + + for i, testCase := range testCases { + result := testCase.set.IsEmpty() + + if testCase.expectedResult != result { + t.Fatalf("case %v: expected: %v, got: %v\n", i+1, testCase.expectedResult, result) + } + } +} + +func TestKeySetString(t *testing.T) { + testCases := []struct { + set KeySet + expectedResult string + }{ + {NewKeySet(), `[]`}, + {NewKeySet(S3Delimiter), `[s3:delimiter]`}, + } + + for i, testCase := range testCases { + result := testCase.set.String() + + if testCase.expectedResult != result { + t.Fatalf("case %v: expected: %v, got: %v\n", i+1, testCase.expectedResult, result) + } + } +} + +func TestKeySetToSlice(t *testing.T) { + testCases := []struct { + set KeySet + expectedResult []Key + }{ + {NewKeySet(), []Key{}}, + {NewKeySet(S3Delimiter), []Key{S3Delimiter}}, + } + + for i, testCase := range testCases { + result := testCase.set.ToSlice() + + if !reflect.DeepEqual(testCase.expectedResult, result) { + t.Fatalf("case %v: expected: %v, got: %v\n", i+1, testCase.expectedResult, result) + } + } +} diff --git a/pkg/policy/condition/name.go b/pkg/policy/condition/name.go new file mode 100644 index 000000000..0cecabb6c --- /dev/null +++ b/pkg/policy/condition/name.go @@ -0,0 +1,79 @@ +/* + * Minio Cloud Storage, (C) 2018 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package condition + +import ( + "encoding/json" + "fmt" +) + +type name string + +const ( + stringEquals name = "StringEquals" + stringNotEquals = "StringNotEquals" + stringLike = "StringLike" + stringNotLike = "StringNotLike" + ipAddress = "IpAddress" + notIPAddress = "NotIpAddress" + null = "Null" +) + +// IsValid - checks if name is valid or not. +func (n name) IsValid() bool { + switch n { + case stringEquals, stringNotEquals, stringLike, stringNotLike, ipAddress, notIPAddress, null: + return true + } + + return false +} + +// MarshalJSON - encodes name to JSON data. +func (n name) MarshalJSON() ([]byte, error) { + if !n.IsValid() { + return nil, fmt.Errorf("invalid name %v", n) + } + + return json.Marshal(string(n)) +} + +// UnmarshalJSON - decodes JSON data to condition name. +func (n *name) UnmarshalJSON(data []byte) error { + var s string + if err := json.Unmarshal(data, &s); err != nil { + return err + } + + parsedName, err := parseName(s) + if err != nil { + return err + } + + *n = parsedName + return nil +} + +func parseName(s string) (name, error) { + n := name(s) + + if n.IsValid() { + return n, nil + } + + return n, fmt.Errorf("invalid condition name '%v'", s) +} diff --git a/pkg/policy/condition/name_test.go b/pkg/policy/condition/name_test.go new file mode 100644 index 000000000..ec47fa338 --- /dev/null +++ b/pkg/policy/condition/name_test.go @@ -0,0 +1,106 @@ +/* + * Minio Cloud Storage, (C) 2018 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package condition + +import ( + "encoding/json" + "reflect" + "testing" +) + +func TestNameIsValid(t *testing.T) { + testCases := []struct { + n name + expectedResult bool + }{ + {stringEquals, true}, + {stringNotEquals, true}, + {stringLike, true}, + {stringNotLike, true}, + {ipAddress, true}, + {notIPAddress, true}, + {null, true}, + {name("foo"), false}, + } + + for i, testCase := range testCases { + result := testCase.n.IsValid() + + if testCase.expectedResult != result { + t.Fatalf("case %v: expected: %v, got: %v", i+1, testCase.expectedResult, result) + } + } +} + +func TestNameMarshalJSON(t *testing.T) { + testCases := []struct { + n name + expectedResult []byte + expectErr bool + }{ + {stringEquals, []byte(`"StringEquals"`), false}, + {stringNotEquals, []byte(`"StringNotEquals"`), false}, + {stringLike, []byte(`"StringLike"`), false}, + {stringNotLike, []byte(`"StringNotLike"`), false}, + {ipAddress, []byte(`"IpAddress"`), false}, + {notIPAddress, []byte(`"NotIpAddress"`), false}, + {null, []byte(`"Null"`), false}, + {name("foo"), nil, true}, + } + + for i, testCase := range testCases { + result, err := json.Marshal(testCase.n) + expectErr := (err != nil) + + if testCase.expectErr != expectErr { + t.Fatalf("case %v: error: expected: %v, got: %v", i+1, testCase.expectErr, expectErr) + } + + if !testCase.expectErr { + if !reflect.DeepEqual(result, testCase.expectedResult) { + t.Fatalf("case %v: result: expected: %v, got: %v", i+1, string(testCase.expectedResult), string(result)) + } + } + } +} + +func TestNameUnmarshalJSON(t *testing.T) { + testCases := []struct { + data []byte + expectedResult name + expectErr bool + }{ + {[]byte(`"StringEquals"`), stringEquals, false}, + {[]byte(`"foo"`), name(""), true}, + } + + for i, testCase := range testCases { + var result name + err := json.Unmarshal(testCase.data, &result) + expectErr := (err != nil) + + if testCase.expectErr != expectErr { + t.Fatalf("case %v: error: expected: %v, got: %v", i+1, testCase.expectErr, expectErr) + } + + if !testCase.expectErr { + if testCase.expectedResult != result { + t.Fatalf("case %v: result: expected: %v, got: %v", i+1, testCase.expectedResult, result) + } + } + } +} diff --git a/pkg/policy/condition/nullfunc.go b/pkg/policy/condition/nullfunc.go new file mode 100644 index 000000000..353d675a5 --- /dev/null +++ b/pkg/policy/condition/nullfunc.go @@ -0,0 +1,101 @@ +/* + * Minio Cloud Storage, (C) 2018 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package condition + +import ( + "fmt" + "reflect" + "strconv" +) + +// nullFunc - Null condition function. It checks whether Key is present in given +// values or not. +// For example, +// 1. if Key = S3XAmzCopySource and Value = true, at evaluate() it returns whether +// S3XAmzCopySource is in given value map or not. +// 2. if Key = S3XAmzCopySource and Value = false, at evaluate() it returns whether +// S3XAmzCopySource is NOT in given value map or not. +type nullFunc struct { + k Key + value bool +} + +// evaluate() - evaluates to check whether Key is present in given values or not. +// Depending on condition boolean value, this function returns true or false. +func (f nullFunc) evaluate(values map[string][]string) bool { + requestValue := values[f.k.Name()] + + if f.value { + return len(requestValue) != 0 + } + + return len(requestValue) == 0 +} + +// key() - returns condition key which is used by this condition function. +func (f nullFunc) key() Key { + return f.k +} + +// name() - returns "Null" condition name. +func (f nullFunc) name() name { + return null +} + +func (f nullFunc) String() string { + return fmt.Sprintf("%v:%v:%v", null, f.k, f.value) +} + +// toMap - returns map representation of this function. +func (f nullFunc) toMap() map[Key]ValueSet { + if !f.k.IsValid() { + return nil + } + + return map[Key]ValueSet{ + f.k: NewValueSet(NewBoolValue(f.value)), + } +} + +func newNullFunc(key Key, values ValueSet) (Function, error) { + if len(values) != 1 { + return nil, fmt.Errorf("only one value is allowed for Null condition") + } + + var value bool + for v := range values { + switch v.GetType() { + case reflect.Bool: + value, _ = v.GetBool() + case reflect.String: + var err error + s, _ := v.GetString() + if value, err = strconv.ParseBool(s); err != nil { + return nil, fmt.Errorf("value must be a boolean string for Null condition") + } + default: + return nil, fmt.Errorf("value must be a boolean for Null condition") + } + } + + return &nullFunc{key, value}, nil +} + +// NewNullFunc - returns new Null function. +func NewNullFunc(key Key, value bool) (Function, error) { + return &nullFunc{key, value}, nil +} diff --git a/pkg/policy/condition/nullfunc_test.go b/pkg/policy/condition/nullfunc_test.go new file mode 100644 index 000000000..77e218bf7 --- /dev/null +++ b/pkg/policy/condition/nullfunc_test.go @@ -0,0 +1,161 @@ +/* + * Minio Cloud Storage, (C) 2018 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package condition + +import ( + "reflect" + "testing" +) + +func TestNullFuncEvaluate(t *testing.T) { + case1Function, err := newNullFunc(S3Prefix, NewValueSet(NewBoolValue(true))) + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + + case2Function, err := newNullFunc(S3Prefix, NewValueSet(NewBoolValue(false))) + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + + testCases := []struct { + function Function + values map[string][]string + expectedResult bool + }{ + {case1Function, map[string][]string{"prefix": {"true"}}, true}, + {case1Function, map[string][]string{"prefix": {"false"}}, true}, + {case1Function, map[string][]string{"prefix": {"mybucket/foo"}}, true}, + {case1Function, map[string][]string{}, false}, + {case1Function, map[string][]string{"delimiter": {"/"}}, false}, + {case2Function, map[string][]string{"prefix": {"true"}}, false}, + {case2Function, map[string][]string{"prefix": {"false"}}, false}, + {case2Function, map[string][]string{"prefix": {"mybucket/foo"}}, false}, + {case2Function, map[string][]string{}, true}, + {case2Function, map[string][]string{"delimiter": {"/"}}, true}, + } + + for i, testCase := range testCases { + result := testCase.function.evaluate(testCase.values) + + if result != testCase.expectedResult { + t.Fatalf("case %v: expected: %v, got: %v\n", i+1, testCase.expectedResult, result) + } + } +} + +func TestNullFuncKey(t *testing.T) { + case1Function, err := newNullFunc(S3XAmzCopySource, NewValueSet(NewBoolValue(true))) + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + + testCases := []struct { + function Function + expectedResult Key + }{ + {case1Function, S3XAmzCopySource}, + } + + for i, testCase := range testCases { + result := testCase.function.key() + + if result != testCase.expectedResult { + t.Fatalf("case %v: expected: %v, got: %v\n", i+1, testCase.expectedResult, result) + } + } +} + +func TestNullFuncToMap(t *testing.T) { + case1Function, err := newNullFunc(S3Prefix, NewValueSet(NewBoolValue(true))) + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + + case1Result := map[Key]ValueSet{ + S3Prefix: NewValueSet(NewBoolValue(true)), + } + + case2Function, err := newNullFunc(S3Prefix, NewValueSet(NewBoolValue(false))) + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + + case2Result := map[Key]ValueSet{ + S3Prefix: NewValueSet(NewBoolValue(false)), + } + + testCases := []struct { + f Function + expectedResult map[Key]ValueSet + }{ + {case1Function, case1Result}, + {case2Function, case2Result}, + {&nullFunc{}, nil}, + } + + for i, testCase := range testCases { + result := testCase.f.toMap() + + if !reflect.DeepEqual(result, testCase.expectedResult) { + t.Fatalf("case %v: result: expected: %v, got: %v\n", i+1, testCase.expectedResult, result) + } + } +} + +func TestNewNullFunc(t *testing.T) { + case1Function, err := newNullFunc(S3Prefix, NewValueSet(NewBoolValue(true))) + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + + case2Function, err := newNullFunc(S3Prefix, NewValueSet(NewBoolValue(false))) + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + + testCases := []struct { + key Key + values ValueSet + expectedResult Function + expectErr bool + }{ + {S3Prefix, NewValueSet(NewBoolValue(true)), case1Function, false}, + {S3Prefix, NewValueSet(NewStringValue("false")), case2Function, false}, + // Multiple values error. + {S3Prefix, NewValueSet(NewBoolValue(true), NewBoolValue(false)), nil, true}, + // Invalid boolean string error. + {S3Prefix, NewValueSet(NewStringValue("foo")), nil, true}, + // Invalid value error. + {S3Prefix, NewValueSet(NewIntValue(7)), nil, true}, + } + + for i, testCase := range testCases { + result, err := newNullFunc(testCase.key, testCase.values) + expectErr := (err != nil) + + if expectErr != testCase.expectErr { + t.Fatalf("case %v: error: expected: %v, got: %v\n", i+1, testCase.expectErr, expectErr) + } + + if !testCase.expectErr { + if !reflect.DeepEqual(result, testCase.expectedResult) { + t.Fatalf("case %v: result: expected: %v, got: %v\n", i+1, testCase.expectedResult, result) + } + } + } +} diff --git a/pkg/policy/condition/stringequalsfunc.go b/pkg/policy/condition/stringequalsfunc.go new file mode 100644 index 000000000..03170c0c0 --- /dev/null +++ b/pkg/policy/condition/stringequalsfunc.go @@ -0,0 +1,182 @@ +/* + * Minio Cloud Storage, (C) 2018 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package condition + +import ( + "fmt" + "sort" + "strings" + + "github.com/minio/minio-go/pkg/set" +) + +func toStringEqualsFuncString(n name, key Key, values set.StringSet) string { + valueStrings := values.ToSlice() + sort.Strings(valueStrings) + + return fmt.Sprintf("%v:%v:%v", n, key, valueStrings) +} + +// stringEqualsFunc - String equals function. It checks whether value by Key in given +// values map is in condition values. +// For example, +// - if values = ["mybucket/foo"], at evaluate() it returns whether string +// in value map for Key is in values. +type stringEqualsFunc struct { + k Key + values set.StringSet +} + +// evaluate() - evaluates to check whether value by Key in given values is in +// condition values. +func (f stringEqualsFunc) evaluate(values map[string][]string) bool { + requestValue := values[f.k.Name()] + return !f.values.Intersection(set.CreateStringSet(requestValue...)).IsEmpty() +} + +// key() - returns condition key which is used by this condition function. +func (f stringEqualsFunc) key() Key { + return f.k +} + +// name() - returns "StringEquals" condition name. +func (f stringEqualsFunc) name() name { + return stringEquals +} + +func (f stringEqualsFunc) String() string { + return toStringEqualsFuncString(stringEquals, f.k, f.values) +} + +// toMap - returns map representation of this function. +func (f stringEqualsFunc) toMap() map[Key]ValueSet { + if !f.k.IsValid() { + return nil + } + + values := NewValueSet() + for _, value := range f.values.ToSlice() { + values.Add(NewStringValue(value)) + } + + return map[Key]ValueSet{ + f.k: values, + } +} + +// stringNotEqualsFunc - String not equals function. It checks whether value by Key in +// given values is NOT in condition values. +// For example, +// - if values = ["mybucket/foo"], at evaluate() it returns whether string +// in value map for Key is NOT in values. +type stringNotEqualsFunc struct { + stringEqualsFunc +} + +// evaluate() - evaluates to check whether value by Key in given values is NOT in +// condition values. +func (f stringNotEqualsFunc) evaluate(values map[string][]string) bool { + return !f.stringEqualsFunc.evaluate(values) +} + +// name() - returns "StringNotEquals" condition name. +func (f stringNotEqualsFunc) name() name { + return stringNotEquals +} + +func (f stringNotEqualsFunc) String() string { + return toStringEqualsFuncString(stringNotEquals, f.stringEqualsFunc.k, f.stringEqualsFunc.values) +} + +func valuesToStringSlice(n name, values ValueSet) ([]string, error) { + valueStrings := []string{} + + for value := range values { + // FIXME: if AWS supports non-string values, we would need to support it. + s, err := value.GetString() + if err != nil { + return nil, fmt.Errorf("value must be a string for %v condition", n) + } + + valueStrings = append(valueStrings, s) + } + + return valueStrings, nil +} + +func validateStringEqualsValues(n name, key Key, values set.StringSet) error { + for _, s := range values.ToSlice() { + switch key { + case S3XAmzCopySource: + tokens := strings.SplitN(s, "/", 2) + if len(tokens) < 2 { + return fmt.Errorf("invalid value '%v' for '%v' for %v condition", s, S3XAmzCopySource, n) + } + // FIXME: tokens[0] must be a valid bucket name. + case S3XAmzServerSideEncryption: + if s != "aws:kms" && s != "AES256" { + return fmt.Errorf("invalid value '%v' for '%v' for %v condition", s, S3XAmzServerSideEncryption, n) + } + case S3XAmzMetadataDirective: + if s != "COPY" && s != "REPLACE" { + return fmt.Errorf("invalid value '%v' for '%v' for %v condition", s, S3XAmzMetadataDirective, n) + } + } + } + + return nil +} + +// newStringEqualsFunc - returns new StringEquals function. +func newStringEqualsFunc(key Key, values ValueSet) (Function, error) { + valueStrings, err := valuesToStringSlice(stringEquals, values) + if err != nil { + return nil, err + } + + return NewStringEqualsFunc(key, valueStrings...) +} + +// NewStringEqualsFunc - returns new StringEquals function. +func NewStringEqualsFunc(key Key, values ...string) (Function, error) { + sset := set.CreateStringSet(values...) + if err := validateStringEqualsValues(stringEquals, key, sset); err != nil { + return nil, err + } + + return &stringEqualsFunc{key, sset}, nil +} + +// newStringNotEqualsFunc - returns new StringNotEquals function. +func newStringNotEqualsFunc(key Key, values ValueSet) (Function, error) { + valueStrings, err := valuesToStringSlice(stringNotEquals, values) + if err != nil { + return nil, err + } + + return NewStringNotEqualsFunc(key, valueStrings...) +} + +// NewStringNotEqualsFunc - returns new StringNotEquals function. +func NewStringNotEqualsFunc(key Key, values ...string) (Function, error) { + sset := set.CreateStringSet(values...) + if err := validateStringEqualsValues(stringNotEquals, key, sset); err != nil { + return nil, err + } + + return &stringNotEqualsFunc{stringEqualsFunc{key, sset}}, nil +} diff --git a/pkg/policy/condition/stringequalsfunc_test.go b/pkg/policy/condition/stringequalsfunc_test.go new file mode 100644 index 000000000..2344bb34d --- /dev/null +++ b/pkg/policy/condition/stringequalsfunc_test.go @@ -0,0 +1,718 @@ +/* + * Minio Cloud Storage, (C) 2018 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package condition + +import ( + "reflect" + "testing" +) + +func TestStringEqualsFuncEvaluate(t *testing.T) { + case1Function, err := newStringEqualsFunc(S3XAmzCopySource, NewValueSet(NewStringValue("mybucket/myobject"))) + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + + case2Function, err := newStringEqualsFunc(S3XAmzServerSideEncryption, NewValueSet(NewStringValue("AES256"))) + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + + case3Function, err := newStringEqualsFunc(S3XAmzMetadataDirective, NewValueSet(NewStringValue("REPLACE"))) + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + + case4Function, err := newStringEqualsFunc(S3LocationConstraint, NewValueSet(NewStringValue("eu-west-1"))) + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + + testCases := []struct { + function Function + values map[string][]string + expectedResult bool + }{ + {case1Function, map[string][]string{"x-amz-copy-source": {"mybucket/myobject"}}, true}, + {case1Function, map[string][]string{"x-amz-copy-source": {"yourbucket/myobject"}}, false}, + {case1Function, map[string][]string{}, false}, + {case1Function, map[string][]string{"delimiter": {"/"}}, false}, + + {case2Function, map[string][]string{"x-amz-server-side-encryption": {"AES256"}}, true}, + {case2Function, map[string][]string{"x-amz-server-side-encryption": {"aws:kms"}}, false}, + {case2Function, map[string][]string{}, false}, + {case2Function, map[string][]string{"delimiter": {"/"}}, false}, + + {case3Function, map[string][]string{"x-amz-metadata-directive": {"REPLACE"}}, true}, + {case3Function, map[string][]string{"x-amz-metadata-directive": {"COPY"}}, false}, + {case3Function, map[string][]string{}, false}, + {case3Function, map[string][]string{"delimiter": {"/"}}, false}, + + {case4Function, map[string][]string{"LocationConstraint": {"eu-west-1"}}, true}, + {case4Function, map[string][]string{"LocationConstraint": {"us-east-1"}}, false}, + {case4Function, map[string][]string{}, false}, + {case4Function, map[string][]string{"delimiter": {"/"}}, false}, + } + + for i, testCase := range testCases { + result := testCase.function.evaluate(testCase.values) + + if result != testCase.expectedResult { + t.Fatalf("case %v: expected: %v, got: %v\n", i+1, testCase.expectedResult, result) + } + } +} + +func TestStringEqualsFuncKey(t *testing.T) { + case1Function, err := newStringEqualsFunc(S3XAmzCopySource, NewValueSet(NewStringValue("mybucket/myobject"))) + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + + case2Function, err := newStringEqualsFunc(S3XAmzServerSideEncryption, NewValueSet(NewStringValue("AES256"))) + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + + case3Function, err := newStringEqualsFunc(S3XAmzMetadataDirective, NewValueSet(NewStringValue("REPLACE"))) + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + + case4Function, err := newStringEqualsFunc(S3LocationConstraint, NewValueSet(NewStringValue("eu-west-1"))) + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + + testCases := []struct { + function Function + expectedResult Key + }{ + {case1Function, S3XAmzCopySource}, + {case2Function, S3XAmzServerSideEncryption}, + {case3Function, S3XAmzMetadataDirective}, + {case4Function, S3LocationConstraint}, + } + + for i, testCase := range testCases { + result := testCase.function.key() + + if result != testCase.expectedResult { + t.Fatalf("case %v: expected: %v, got: %v\n", i+1, testCase.expectedResult, result) + } + } +} + +func TestStringEqualsFuncToMap(t *testing.T) { + case1Function, err := newStringEqualsFunc(S3XAmzCopySource, NewValueSet(NewStringValue("mybucket/myobject"))) + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + + case1Result := map[Key]ValueSet{ + S3XAmzCopySource: NewValueSet(NewStringValue("mybucket/myobject")), + } + + case2Function, err := newStringEqualsFunc(S3XAmzCopySource, + NewValueSet( + NewStringValue("mybucket/myobject"), + NewStringValue("yourbucket/myobject"), + ), + ) + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + + case2Result := map[Key]ValueSet{ + S3XAmzCopySource: NewValueSet( + NewStringValue("mybucket/myobject"), + NewStringValue("yourbucket/myobject"), + ), + } + + case3Function, err := newStringEqualsFunc(S3XAmzServerSideEncryption, NewValueSet(NewStringValue("AES256"))) + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + + case3Result := map[Key]ValueSet{ + S3XAmzServerSideEncryption: NewValueSet(NewStringValue("AES256")), + } + + case4Function, err := newStringEqualsFunc(S3XAmzServerSideEncryption, + NewValueSet( + NewStringValue("AES256"), + NewStringValue("aws:kms"), + ), + ) + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + + case4Result := map[Key]ValueSet{ + S3XAmzServerSideEncryption: NewValueSet( + NewStringValue("AES256"), + NewStringValue("aws:kms"), + ), + } + + case5Function, err := newStringEqualsFunc(S3XAmzMetadataDirective, NewValueSet(NewStringValue("REPLACE"))) + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + + case5Result := map[Key]ValueSet{ + S3XAmzMetadataDirective: NewValueSet(NewStringValue("REPLACE")), + } + + case6Function, err := newStringEqualsFunc(S3XAmzMetadataDirective, + NewValueSet( + NewStringValue("REPLACE"), + NewStringValue("COPY"), + ), + ) + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + + case6Result := map[Key]ValueSet{ + S3XAmzMetadataDirective: NewValueSet( + NewStringValue("REPLACE"), + NewStringValue("COPY"), + ), + } + + case7Function, err := newStringEqualsFunc(S3LocationConstraint, NewValueSet(NewStringValue("eu-west-1"))) + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + + case7Result := map[Key]ValueSet{ + S3LocationConstraint: NewValueSet(NewStringValue("eu-west-1")), + } + + case8Function, err := newStringEqualsFunc(S3LocationConstraint, + NewValueSet( + NewStringValue("eu-west-1"), + NewStringValue("us-west-1"), + ), + ) + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + + case8Result := map[Key]ValueSet{ + S3LocationConstraint: NewValueSet( + NewStringValue("eu-west-1"), + NewStringValue("us-west-1"), + ), + } + + testCases := []struct { + f Function + expectedResult map[Key]ValueSet + }{ + {case1Function, case1Result}, + {case2Function, case2Result}, + {case3Function, case3Result}, + {case4Function, case4Result}, + {case5Function, case5Result}, + {case6Function, case6Result}, + {case7Function, case7Result}, + {case8Function, case8Result}, + {&stringEqualsFunc{}, nil}, + } + + for i, testCase := range testCases { + result := testCase.f.toMap() + + if !reflect.DeepEqual(result, testCase.expectedResult) { + t.Fatalf("case %v: result: expected: %v, got: %v\n", i+1, testCase.expectedResult, result) + } + } +} + +func TestStringNotEqualsFuncEvaluate(t *testing.T) { + case1Function, err := newStringNotEqualsFunc(S3XAmzCopySource, NewValueSet(NewStringValue("mybucket/myobject"))) + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + + case2Function, err := newStringNotEqualsFunc(S3XAmzServerSideEncryption, NewValueSet(NewStringValue("AES256"))) + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + + case3Function, err := newStringNotEqualsFunc(S3XAmzMetadataDirective, NewValueSet(NewStringValue("REPLACE"))) + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + + case4Function, err := newStringNotEqualsFunc(S3LocationConstraint, NewValueSet(NewStringValue("eu-west-1"))) + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + + testCases := []struct { + function Function + values map[string][]string + expectedResult bool + }{ + {case1Function, map[string][]string{"x-amz-copy-source": {"mybucket/myobject"}}, false}, + {case1Function, map[string][]string{"x-amz-copy-source": {"yourbucket/myobject"}}, true}, + {case1Function, map[string][]string{}, true}, + {case1Function, map[string][]string{"delimiter": {"/"}}, true}, + + {case2Function, map[string][]string{"x-amz-server-side-encryption": {"AES256"}}, false}, + {case2Function, map[string][]string{"x-amz-server-side-encryption": {"aws:kms"}}, true}, + {case2Function, map[string][]string{}, true}, + {case2Function, map[string][]string{"delimiter": {"/"}}, true}, + + {case3Function, map[string][]string{"x-amz-metadata-directive": {"REPLACE"}}, false}, + {case3Function, map[string][]string{"x-amz-metadata-directive": {"COPY"}}, true}, + {case3Function, map[string][]string{}, true}, + {case3Function, map[string][]string{"delimiter": {"/"}}, true}, + + {case4Function, map[string][]string{"LocationConstraint": {"eu-west-1"}}, false}, + {case4Function, map[string][]string{"LocationConstraint": {"us-east-1"}}, true}, + {case4Function, map[string][]string{}, true}, + {case4Function, map[string][]string{"delimiter": {"/"}}, true}, + } + + for i, testCase := range testCases { + result := testCase.function.evaluate(testCase.values) + + if result != testCase.expectedResult { + t.Fatalf("case %v: expected: %v, got: %v\n", i+1, testCase.expectedResult, result) + } + } +} + +func TestStringNotEqualsFuncKey(t *testing.T) { + case1Function, err := newStringNotEqualsFunc(S3XAmzCopySource, NewValueSet(NewStringValue("mybucket/myobject"))) + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + + case2Function, err := newStringNotEqualsFunc(S3XAmzServerSideEncryption, NewValueSet(NewStringValue("AES256"))) + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + + case3Function, err := newStringNotEqualsFunc(S3XAmzMetadataDirective, NewValueSet(NewStringValue("REPLACE"))) + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + + case4Function, err := newStringNotEqualsFunc(S3LocationConstraint, NewValueSet(NewStringValue("eu-west-1"))) + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + + testCases := []struct { + function Function + expectedResult Key + }{ + {case1Function, S3XAmzCopySource}, + {case2Function, S3XAmzServerSideEncryption}, + {case3Function, S3XAmzMetadataDirective}, + {case4Function, S3LocationConstraint}, + } + + for i, testCase := range testCases { + result := testCase.function.key() + + if result != testCase.expectedResult { + t.Fatalf("case %v: expected: %v, got: %v\n", i+1, testCase.expectedResult, result) + } + } +} + +func TestStringNotEqualsFuncToMap(t *testing.T) { + case1Function, err := newStringNotEqualsFunc(S3XAmzCopySource, NewValueSet(NewStringValue("mybucket/myobject"))) + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + + case1Result := map[Key]ValueSet{ + S3XAmzCopySource: NewValueSet(NewStringValue("mybucket/myobject")), + } + + case2Function, err := newStringNotEqualsFunc(S3XAmzCopySource, + NewValueSet( + NewStringValue("mybucket/myobject"), + NewStringValue("yourbucket/myobject"), + ), + ) + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + + case2Result := map[Key]ValueSet{ + S3XAmzCopySource: NewValueSet( + NewStringValue("mybucket/myobject"), + NewStringValue("yourbucket/myobject"), + ), + } + + case3Function, err := newStringNotEqualsFunc(S3XAmzServerSideEncryption, NewValueSet(NewStringValue("AES256"))) + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + + case3Result := map[Key]ValueSet{ + S3XAmzServerSideEncryption: NewValueSet(NewStringValue("AES256")), + } + + case4Function, err := newStringNotEqualsFunc(S3XAmzServerSideEncryption, + NewValueSet( + NewStringValue("AES256"), + NewStringValue("aws:kms"), + ), + ) + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + + case4Result := map[Key]ValueSet{ + S3XAmzServerSideEncryption: NewValueSet( + NewStringValue("AES256"), + NewStringValue("aws:kms"), + ), + } + + case5Function, err := newStringNotEqualsFunc(S3XAmzMetadataDirective, NewValueSet(NewStringValue("REPLACE"))) + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + + case5Result := map[Key]ValueSet{ + S3XAmzMetadataDirective: NewValueSet(NewStringValue("REPLACE")), + } + + case6Function, err := newStringNotEqualsFunc(S3XAmzMetadataDirective, + NewValueSet( + NewStringValue("REPLACE"), + NewStringValue("COPY"), + ), + ) + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + + case6Result := map[Key]ValueSet{ + S3XAmzMetadataDirective: NewValueSet( + NewStringValue("REPLACE"), + NewStringValue("COPY"), + ), + } + + case7Function, err := newStringNotEqualsFunc(S3LocationConstraint, NewValueSet(NewStringValue("eu-west-1"))) + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + + case7Result := map[Key]ValueSet{ + S3LocationConstraint: NewValueSet(NewStringValue("eu-west-1")), + } + + case8Function, err := newStringNotEqualsFunc(S3LocationConstraint, + NewValueSet( + NewStringValue("eu-west-1"), + NewStringValue("us-west-1"), + ), + ) + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + + case8Result := map[Key]ValueSet{ + S3LocationConstraint: NewValueSet( + NewStringValue("eu-west-1"), + NewStringValue("us-west-1"), + ), + } + + testCases := []struct { + f Function + expectedResult map[Key]ValueSet + }{ + {case1Function, case1Result}, + {case2Function, case2Result}, + {case3Function, case3Result}, + {case4Function, case4Result}, + {case5Function, case5Result}, + {case6Function, case6Result}, + {case7Function, case7Result}, + {case8Function, case8Result}, + {&stringNotEqualsFunc{}, nil}, + } + + for i, testCase := range testCases { + result := testCase.f.toMap() + + if !reflect.DeepEqual(result, testCase.expectedResult) { + t.Fatalf("case %v: result: expected: %v, got: %v\n", i+1, testCase.expectedResult, result) + } + } +} + +func TestNewStringEqualsFunc(t *testing.T) { + case1Function, err := newStringEqualsFunc(S3XAmzCopySource, NewValueSet(NewStringValue("mybucket/myobject"))) + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + + case2Function, err := newStringEqualsFunc(S3XAmzCopySource, + NewValueSet( + NewStringValue("mybucket/myobject"), + NewStringValue("yourbucket/myobject"), + ), + ) + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + + case3Function, err := newStringEqualsFunc(S3XAmzServerSideEncryption, NewValueSet(NewStringValue("AES256"))) + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + + case4Function, err := newStringEqualsFunc(S3XAmzServerSideEncryption, + NewValueSet( + NewStringValue("AES256"), + NewStringValue("aws:kms"), + ), + ) + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + + case5Function, err := newStringEqualsFunc(S3XAmzMetadataDirective, NewValueSet(NewStringValue("REPLACE"))) + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + + case6Function, err := newStringEqualsFunc(S3XAmzMetadataDirective, + NewValueSet( + NewStringValue("REPLACE"), + NewStringValue("COPY"), + ), + ) + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + + case7Function, err := newStringEqualsFunc(S3LocationConstraint, NewValueSet(NewStringValue("eu-west-1"))) + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + + case8Function, err := newStringEqualsFunc(S3LocationConstraint, + NewValueSet( + NewStringValue("eu-west-1"), + NewStringValue("us-west-1"), + ), + ) + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + + testCases := []struct { + key Key + values ValueSet + expectedResult Function + expectErr bool + }{ + {S3XAmzCopySource, NewValueSet(NewStringValue("mybucket/myobject")), case1Function, false}, + {S3XAmzCopySource, + NewValueSet( + NewStringValue("mybucket/myobject"), + NewStringValue("yourbucket/myobject"), + ), case2Function, false}, + + {S3XAmzServerSideEncryption, NewValueSet(NewStringValue("AES256")), case3Function, false}, + {S3XAmzServerSideEncryption, + NewValueSet( + NewStringValue("AES256"), + NewStringValue("aws:kms"), + ), case4Function, false}, + + {S3XAmzMetadataDirective, NewValueSet(NewStringValue("REPLACE")), case5Function, false}, + {S3XAmzMetadataDirective, + NewValueSet( + NewStringValue("REPLACE"), + NewStringValue("COPY"), + ), case6Function, false}, + + {S3LocationConstraint, NewValueSet(NewStringValue("eu-west-1")), case7Function, false}, + {S3LocationConstraint, + NewValueSet( + NewStringValue("eu-west-1"), + NewStringValue("us-west-1"), + ), case8Function, false}, + + // Unsupported value error. + {S3XAmzCopySource, NewValueSet(NewStringValue("mybucket/myobject"), NewIntValue(7)), nil, true}, + {S3XAmzServerSideEncryption, NewValueSet(NewStringValue("AES256"), NewIntValue(7)), nil, true}, + {S3XAmzMetadataDirective, NewValueSet(NewStringValue("REPLACE"), NewIntValue(7)), nil, true}, + {S3LocationConstraint, NewValueSet(NewStringValue("eu-west-1"), NewIntValue(7)), nil, true}, + + // Invalid value error. + {S3XAmzCopySource, NewValueSet(NewStringValue("mybucket")), nil, true}, + {S3XAmzServerSideEncryption, NewValueSet(NewStringValue("SSE-C")), nil, true}, + {S3XAmzMetadataDirective, NewValueSet(NewStringValue("DUPLICATE")), nil, true}, + } + + for i, testCase := range testCases { + result, err := newStringEqualsFunc(testCase.key, testCase.values) + expectErr := (err != nil) + + if expectErr != testCase.expectErr { + t.Fatalf("case %v: error: expected: %v, got: %v\n", i+1, testCase.expectErr, expectErr) + } + + if !testCase.expectErr { + if !reflect.DeepEqual(result, testCase.expectedResult) { + t.Fatalf("case %v: result: expected: %v, got: %v\n", i+1, testCase.expectedResult, result) + } + } + } +} + +func TestNewStringNotEqualsFunc(t *testing.T) { + case1Function, err := newStringNotEqualsFunc(S3XAmzCopySource, NewValueSet(NewStringValue("mybucket/myobject"))) + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + + case2Function, err := newStringNotEqualsFunc(S3XAmzCopySource, + NewValueSet( + NewStringValue("mybucket/myobject"), + NewStringValue("yourbucket/myobject"), + ), + ) + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + + case3Function, err := newStringNotEqualsFunc(S3XAmzServerSideEncryption, NewValueSet(NewStringValue("AES256"))) + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + + case4Function, err := newStringNotEqualsFunc(S3XAmzServerSideEncryption, + NewValueSet( + NewStringValue("AES256"), + NewStringValue("aws:kms"), + ), + ) + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + + case5Function, err := newStringNotEqualsFunc(S3XAmzMetadataDirective, NewValueSet(NewStringValue("REPLACE"))) + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + + case6Function, err := newStringNotEqualsFunc(S3XAmzMetadataDirective, + NewValueSet( + NewStringValue("REPLACE"), + NewStringValue("COPY"), + ), + ) + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + + case7Function, err := newStringNotEqualsFunc(S3LocationConstraint, NewValueSet(NewStringValue("eu-west-1"))) + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + + case8Function, err := newStringNotEqualsFunc(S3LocationConstraint, + NewValueSet( + NewStringValue("eu-west-1"), + NewStringValue("us-west-1"), + ), + ) + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + + testCases := []struct { + key Key + values ValueSet + expectedResult Function + expectErr bool + }{ + {S3XAmzCopySource, NewValueSet(NewStringValue("mybucket/myobject")), case1Function, false}, + {S3XAmzCopySource, + NewValueSet( + NewStringValue("mybucket/myobject"), + NewStringValue("yourbucket/myobject"), + ), case2Function, false}, + + {S3XAmzServerSideEncryption, NewValueSet(NewStringValue("AES256")), case3Function, false}, + {S3XAmzServerSideEncryption, + NewValueSet( + NewStringValue("AES256"), + NewStringValue("aws:kms"), + ), case4Function, false}, + + {S3XAmzMetadataDirective, NewValueSet(NewStringValue("REPLACE")), case5Function, false}, + {S3XAmzMetadataDirective, + NewValueSet( + NewStringValue("REPLACE"), + NewStringValue("COPY"), + ), case6Function, false}, + + {S3LocationConstraint, NewValueSet(NewStringValue("eu-west-1")), case7Function, false}, + {S3LocationConstraint, + NewValueSet( + NewStringValue("eu-west-1"), + NewStringValue("us-west-1"), + ), case8Function, false}, + + // Unsupported value error. + {S3XAmzCopySource, NewValueSet(NewStringValue("mybucket/myobject"), NewIntValue(7)), nil, true}, + {S3XAmzServerSideEncryption, NewValueSet(NewStringValue("AES256"), NewIntValue(7)), nil, true}, + {S3XAmzMetadataDirective, NewValueSet(NewStringValue("REPLACE"), NewIntValue(7)), nil, true}, + {S3LocationConstraint, NewValueSet(NewStringValue("eu-west-1"), NewIntValue(7)), nil, true}, + + // Invalid value error. + {S3XAmzCopySource, NewValueSet(NewStringValue("mybucket")), nil, true}, + {S3XAmzServerSideEncryption, NewValueSet(NewStringValue("SSE-C")), nil, true}, + {S3XAmzMetadataDirective, NewValueSet(NewStringValue("DUPLICATE")), nil, true}, + } + + for i, testCase := range testCases { + result, err := newStringNotEqualsFunc(testCase.key, testCase.values) + expectErr := (err != nil) + + if expectErr != testCase.expectErr { + t.Fatalf("case %v: error: expected: %v, got: %v\n", i+1, testCase.expectErr, expectErr) + } + + if !testCase.expectErr { + if !reflect.DeepEqual(result, testCase.expectedResult) { + t.Fatalf("case %v: result: expected: %v, got: %v\n", i+1, testCase.expectedResult, result) + } + } + } +} diff --git a/pkg/policy/condition/stringlikefunc.go b/pkg/policy/condition/stringlikefunc.go new file mode 100644 index 000000000..99d20dd2b --- /dev/null +++ b/pkg/policy/condition/stringlikefunc.go @@ -0,0 +1,165 @@ +/* + * Minio Cloud Storage, (C) 2018 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package condition + +import ( + "fmt" + "sort" + "strings" + + "github.com/minio/minio-go/pkg/set" + "github.com/minio/minio/pkg/wildcard" +) + +func toStringLikeFuncString(n name, key Key, values set.StringSet) string { + valueStrings := values.ToSlice() + sort.Strings(valueStrings) + + return fmt.Sprintf("%v:%v:%v", n, key, valueStrings) +} + +// stringLikeFunc - String like function. It checks whether value by Key in given +// values map is widcard matching in condition values. +// For example, +// - if values = ["mybucket/foo*"], at evaluate() it returns whether string +// in value map for Key is wildcard matching in values. +type stringLikeFunc struct { + k Key + values set.StringSet +} + +// evaluate() - evaluates to check whether value by Key in given values is wildcard +// matching in condition values. +func (f stringLikeFunc) evaluate(values map[string][]string) bool { + for _, v := range values[f.k.Name()] { + if !f.values.FuncMatch(wildcard.Match, v).IsEmpty() { + return true + } + } + + return false +} + +// key() - returns condition key which is used by this condition function. +func (f stringLikeFunc) key() Key { + return f.k +} + +// name() - returns "StringLike" function name. +func (f stringLikeFunc) name() name { + return stringLike +} + +func (f stringLikeFunc) String() string { + return toStringLikeFuncString(stringLike, f.k, f.values) +} + +// toMap - returns map representation of this function. +func (f stringLikeFunc) toMap() map[Key]ValueSet { + if !f.k.IsValid() { + return nil + } + + values := NewValueSet() + for _, value := range f.values.ToSlice() { + values.Add(NewStringValue(value)) + } + + return map[Key]ValueSet{ + f.k: values, + } +} + +// stringNotLikeFunc - String not like function. It checks whether value by Key in given +// values map is NOT widcard matching in condition values. +// For example, +// - if values = ["mybucket/foo*"], at evaluate() it returns whether string +// in value map for Key is NOT wildcard matching in values. +type stringNotLikeFunc struct { + stringLikeFunc +} + +// evaluate() - evaluates to check whether value by Key in given values is NOT wildcard +// matching in condition values. +func (f stringNotLikeFunc) evaluate(values map[string][]string) bool { + return !f.stringLikeFunc.evaluate(values) +} + +// name() - returns "StringNotLike" function name. +func (f stringNotLikeFunc) name() name { + return stringNotLike +} + +func (f stringNotLikeFunc) String() string { + return toStringLikeFuncString(stringNotLike, f.stringLikeFunc.k, f.stringLikeFunc.values) +} + +func validateStringLikeValues(n name, key Key, values set.StringSet) error { + for _, s := range values.ToSlice() { + switch key { + case S3XAmzCopySource: + tokens := strings.SplitN(s, "/", 2) + if len(tokens) < 2 { + return fmt.Errorf("invalid value '%v' for '%v' in %v condition", s, key, n) + } + + // FIXME: tokens[0] must be a valid bucket name. + } + } + + return nil +} + +// newStringLikeFunc - returns new StringLike function. +func newStringLikeFunc(key Key, values ValueSet) (Function, error) { + valueStrings, err := valuesToStringSlice(stringLike, values) + if err != nil { + return nil, err + } + + return NewStringLikeFunc(key, valueStrings...) +} + +// NewStringLikeFunc - returns new StringLike function. +func NewStringLikeFunc(key Key, values ...string) (Function, error) { + sset := set.CreateStringSet(values...) + if err := validateStringLikeValues(stringLike, key, sset); err != nil { + return nil, err + } + + return &stringLikeFunc{key, sset}, nil +} + +// newStringNotLikeFunc - returns new StringNotLike function. +func newStringNotLikeFunc(key Key, values ValueSet) (Function, error) { + valueStrings, err := valuesToStringSlice(stringNotLike, values) + if err != nil { + return nil, err + } + + return NewStringNotLikeFunc(key, valueStrings...) +} + +// NewStringNotLikeFunc - returns new StringNotLike function. +func NewStringNotLikeFunc(key Key, values ...string) (Function, error) { + sset := set.CreateStringSet(values...) + if err := validateStringLikeValues(stringNotLike, key, sset); err != nil { + return nil, err + } + + return &stringNotLikeFunc{stringLikeFunc{key, sset}}, nil +} diff --git a/pkg/policy/condition/stringlikefunc_test.go b/pkg/policy/condition/stringlikefunc_test.go new file mode 100644 index 000000000..94275e58a --- /dev/null +++ b/pkg/policy/condition/stringlikefunc_test.go @@ -0,0 +1,810 @@ +/* + * Minio Cloud Storage, (C) 2018 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package condition + +import ( + "reflect" + "testing" +) + +func TestStringLikeFuncEvaluate(t *testing.T) { + case1Function, err := newStringLikeFunc(S3XAmzCopySource, NewValueSet(NewStringValue("mybucket/myobject*"))) + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + + case2Function, err := newStringLikeFunc(S3XAmzCopySource, NewValueSet(NewStringValue("mybucket/myobject"))) + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + + case3Function, err := newStringLikeFunc(S3XAmzServerSideEncryption, NewValueSet(NewStringValue("AES*"))) + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + + case4Function, err := newStringLikeFunc(S3XAmzServerSideEncryption, NewValueSet(NewStringValue("AES256"))) + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + + case5Function, err := newStringLikeFunc(S3XAmzMetadataDirective, NewValueSet(NewStringValue("REPL*"))) + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + + case6Function, err := newStringLikeFunc(S3XAmzMetadataDirective, NewValueSet(NewStringValue("REPLACE"))) + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + + case7Function, err := newStringLikeFunc(S3LocationConstraint, NewValueSet(NewStringValue("eu-west-*"))) + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + + case8Function, err := newStringLikeFunc(S3LocationConstraint, NewValueSet(NewStringValue("eu-west-1"))) + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + + testCases := []struct { + function Function + values map[string][]string + expectedResult bool + }{ + {case1Function, map[string][]string{"x-amz-copy-source": {"mybucket/myobject"}}, true}, + {case1Function, map[string][]string{"x-amz-copy-source": {"mybucket/myobject.png"}}, true}, + {case1Function, map[string][]string{"x-amz-copy-source": {"yourbucket/myobject"}}, false}, + {case1Function, map[string][]string{}, false}, + {case1Function, map[string][]string{"delimiter": {"/"}}, false}, + + {case2Function, map[string][]string{"x-amz-copy-source": {"mybucket/myobject"}}, true}, + {case2Function, map[string][]string{"x-amz-copy-source": {"mybucket/myobject.png"}}, false}, + {case2Function, map[string][]string{"x-amz-copy-source": {"yourbucket/myobject"}}, false}, + {case2Function, map[string][]string{}, false}, + {case2Function, map[string][]string{"delimiter": {"/"}}, false}, + + {case3Function, map[string][]string{"x-amz-server-side-encryption": {"AES256"}}, true}, + {case3Function, map[string][]string{"x-amz-server-side-encryption": {"AES512"}}, true}, + {case3Function, map[string][]string{"x-amz-server-side-encryption": {"aws:kms"}}, false}, + {case3Function, map[string][]string{}, false}, + {case3Function, map[string][]string{"delimiter": {"/"}}, false}, + + {case4Function, map[string][]string{"x-amz-server-side-encryption": {"AES256"}}, true}, + {case4Function, map[string][]string{"x-amz-server-side-encryption": {"AES512"}}, false}, + {case4Function, map[string][]string{"x-amz-server-side-encryption": {"aws:kms"}}, false}, + {case4Function, map[string][]string{}, false}, + {case4Function, map[string][]string{"delimiter": {"/"}}, false}, + + {case5Function, map[string][]string{"x-amz-metadata-directive": {"REPLACE"}}, true}, + {case5Function, map[string][]string{"x-amz-metadata-directive": {"REPLACE/COPY"}}, true}, + {case5Function, map[string][]string{"x-amz-metadata-directive": {"COPY"}}, false}, + {case5Function, map[string][]string{}, false}, + {case5Function, map[string][]string{"delimiter": {"/"}}, false}, + + {case6Function, map[string][]string{"x-amz-metadata-directive": {"REPLACE"}}, true}, + {case6Function, map[string][]string{"x-amz-metadata-directive": {"REPLACE/COPY"}}, false}, + {case6Function, map[string][]string{"x-amz-metadata-directive": {"COPY"}}, false}, + {case6Function, map[string][]string{}, false}, + {case6Function, map[string][]string{"delimiter": {"/"}}, false}, + + {case7Function, map[string][]string{"LocationConstraint": {"eu-west-1"}}, true}, + {case7Function, map[string][]string{"LocationConstraint": {"eu-west-2"}}, true}, + {case7Function, map[string][]string{"LocationConstraint": {"us-east-1"}}, false}, + {case7Function, map[string][]string{}, false}, + {case7Function, map[string][]string{"delimiter": {"/"}}, false}, + + {case8Function, map[string][]string{"LocationConstraint": {"eu-west-1"}}, true}, + {case8Function, map[string][]string{"LocationConstraint": {"eu-west-2"}}, false}, + {case8Function, map[string][]string{"LocationConstraint": {"us-east-1"}}, false}, + {case8Function, map[string][]string{}, false}, + {case8Function, map[string][]string{"delimiter": {"/"}}, false}, + } + + for i, testCase := range testCases { + result := testCase.function.evaluate(testCase.values) + + if result != testCase.expectedResult { + t.Fatalf("case %v: expected: %v, got: %v\n", i+1, testCase.expectedResult, result) + } + } +} + +func TestStringLikeFuncKey(t *testing.T) { + case1Function, err := newStringLikeFunc(S3XAmzCopySource, NewValueSet(NewStringValue("mybucket/myobject"))) + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + + case2Function, err := newStringLikeFunc(S3XAmzServerSideEncryption, NewValueSet(NewStringValue("AES256"))) + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + + case3Function, err := newStringLikeFunc(S3XAmzMetadataDirective, NewValueSet(NewStringValue("REPLACE"))) + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + + case4Function, err := newStringLikeFunc(S3LocationConstraint, NewValueSet(NewStringValue("eu-west-1"))) + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + + testCases := []struct { + function Function + expectedResult Key + }{ + {case1Function, S3XAmzCopySource}, + {case2Function, S3XAmzServerSideEncryption}, + {case3Function, S3XAmzMetadataDirective}, + {case4Function, S3LocationConstraint}, + } + + for i, testCase := range testCases { + result := testCase.function.key() + + if result != testCase.expectedResult { + t.Fatalf("case %v: expected: %v, got: %v\n", i+1, testCase.expectedResult, result) + } + } +} + +func TestStringLikeFuncToMap(t *testing.T) { + case1Function, err := newStringLikeFunc(S3XAmzCopySource, NewValueSet(NewStringValue("mybucket/*"))) + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + + case1Result := map[Key]ValueSet{ + S3XAmzCopySource: NewValueSet(NewStringValue("mybucket/*")), + } + + case2Function, err := newStringLikeFunc(S3XAmzCopySource, + NewValueSet( + NewStringValue("mybucket/*"), + NewStringValue("yourbucket/myobject*"), + ), + ) + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + + case2Result := map[Key]ValueSet{ + S3XAmzCopySource: NewValueSet( + NewStringValue("mybucket/*"), + NewStringValue("yourbucket/myobject*"), + ), + } + + case3Function, err := newStringLikeFunc(S3XAmzServerSideEncryption, NewValueSet(NewStringValue("AES*"))) + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + + case3Result := map[Key]ValueSet{ + S3XAmzServerSideEncryption: NewValueSet(NewStringValue("AES*")), + } + + case4Function, err := newStringLikeFunc(S3XAmzServerSideEncryption, + NewValueSet( + NewStringValue("AES*"), + NewStringValue("aws:*"), + ), + ) + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + + case4Result := map[Key]ValueSet{ + S3XAmzServerSideEncryption: NewValueSet( + NewStringValue("AES*"), + NewStringValue("aws:*"), + ), + } + + case5Function, err := newStringLikeFunc(S3XAmzMetadataDirective, NewValueSet(NewStringValue("REPL*"))) + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + + case5Result := map[Key]ValueSet{ + S3XAmzMetadataDirective: NewValueSet(NewStringValue("REPL*")), + } + + case6Function, err := newStringLikeFunc(S3XAmzMetadataDirective, + NewValueSet( + NewStringValue("REPL*"), + NewStringValue("COPY*"), + ), + ) + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + + case6Result := map[Key]ValueSet{ + S3XAmzMetadataDirective: NewValueSet( + NewStringValue("REPL*"), + NewStringValue("COPY*"), + ), + } + + case7Function, err := newStringLikeFunc(S3LocationConstraint, NewValueSet(NewStringValue("eu-west-*"))) + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + + case7Result := map[Key]ValueSet{ + S3LocationConstraint: NewValueSet(NewStringValue("eu-west-*")), + } + + case8Function, err := newStringLikeFunc(S3LocationConstraint, + NewValueSet( + NewStringValue("eu-west-*"), + NewStringValue("us-west-*"), + ), + ) + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + + case8Result := map[Key]ValueSet{ + S3LocationConstraint: NewValueSet( + NewStringValue("eu-west-*"), + NewStringValue("us-west-*"), + ), + } + + testCases := []struct { + f Function + expectedResult map[Key]ValueSet + }{ + {case1Function, case1Result}, + {case2Function, case2Result}, + {case3Function, case3Result}, + {case4Function, case4Result}, + {case5Function, case5Result}, + {case6Function, case6Result}, + {case7Function, case7Result}, + {case8Function, case8Result}, + {&stringLikeFunc{}, nil}, + } + + for i, testCase := range testCases { + result := testCase.f.toMap() + + if !reflect.DeepEqual(result, testCase.expectedResult) { + t.Fatalf("case %v: result: expected: %v, got: %v\n", i+1, testCase.expectedResult, result) + } + } +} + +func TestStringNotLikeFuncEvaluate(t *testing.T) { + case1Function, err := newStringNotLikeFunc(S3XAmzCopySource, NewValueSet(NewStringValue("mybucket/myobject*"))) + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + + case2Function, err := newStringNotLikeFunc(S3XAmzCopySource, NewValueSet(NewStringValue("mybucket/myobject"))) + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + + case3Function, err := newStringNotLikeFunc(S3XAmzServerSideEncryption, NewValueSet(NewStringValue("AES*"))) + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + + case4Function, err := newStringNotLikeFunc(S3XAmzServerSideEncryption, NewValueSet(NewStringValue("AES256"))) + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + + case5Function, err := newStringNotLikeFunc(S3XAmzMetadataDirective, NewValueSet(NewStringValue("REPL*"))) + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + + case6Function, err := newStringNotLikeFunc(S3XAmzMetadataDirective, NewValueSet(NewStringValue("REPLACE"))) + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + + case7Function, err := newStringNotLikeFunc(S3LocationConstraint, NewValueSet(NewStringValue("eu-west-*"))) + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + + case8Function, err := newStringNotLikeFunc(S3LocationConstraint, NewValueSet(NewStringValue("eu-west-1"))) + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + + testCases := []struct { + function Function + values map[string][]string + expectedResult bool + }{ + {case1Function, map[string][]string{"x-amz-copy-source": {"mybucket/myobject"}}, false}, + {case1Function, map[string][]string{"x-amz-copy-source": {"mybucket/myobject.png"}}, false}, + {case1Function, map[string][]string{"x-amz-copy-source": {"yourbucket/myobject"}}, true}, + {case1Function, map[string][]string{}, true}, + {case1Function, map[string][]string{"delimiter": {"/"}}, true}, + + {case2Function, map[string][]string{"x-amz-copy-source": {"mybucket/myobject"}}, false}, + {case2Function, map[string][]string{"x-amz-copy-source": {"mybucket/myobject.png"}}, true}, + {case2Function, map[string][]string{"x-amz-copy-source": {"yourbucket/myobject"}}, true}, + {case2Function, map[string][]string{}, true}, + {case2Function, map[string][]string{"delimiter": {"/"}}, true}, + + {case3Function, map[string][]string{"x-amz-server-side-encryption": {"AES256"}}, false}, + {case3Function, map[string][]string{"x-amz-server-side-encryption": {"AES512"}}, false}, + {case3Function, map[string][]string{"x-amz-server-side-encryption": {"aws:kms"}}, true}, + {case3Function, map[string][]string{}, true}, + {case3Function, map[string][]string{"delimiter": {"/"}}, true}, + + {case4Function, map[string][]string{"x-amz-server-side-encryption": {"AES256"}}, false}, + {case4Function, map[string][]string{"x-amz-server-side-encryption": {"AES512"}}, true}, + {case4Function, map[string][]string{"x-amz-server-side-encryption": {"aws:kms"}}, true}, + {case4Function, map[string][]string{}, true}, + {case4Function, map[string][]string{"delimiter": {"/"}}, true}, + + {case5Function, map[string][]string{"x-amz-metadata-directive": {"REPLACE"}}, false}, + {case5Function, map[string][]string{"x-amz-metadata-directive": {"REPLACE/COPY"}}, false}, + {case5Function, map[string][]string{"x-amz-metadata-directive": {"COPY"}}, true}, + {case5Function, map[string][]string{}, true}, + {case5Function, map[string][]string{"delimiter": {"/"}}, true}, + + {case6Function, map[string][]string{"x-amz-metadata-directive": {"REPLACE"}}, false}, + {case6Function, map[string][]string{"x-amz-metadata-directive": {"REPLACE/COPY"}}, true}, + {case6Function, map[string][]string{"x-amz-metadata-directive": {"COPY"}}, true}, + {case6Function, map[string][]string{}, true}, + {case6Function, map[string][]string{"delimiter": {"/"}}, true}, + + {case7Function, map[string][]string{"LocationConstraint": {"eu-west-1"}}, false}, + {case7Function, map[string][]string{"LocationConstraint": {"eu-west-2"}}, false}, + {case7Function, map[string][]string{"LocationConstraint": {"us-east-1"}}, true}, + {case7Function, map[string][]string{}, true}, + {case7Function, map[string][]string{"delimiter": {"/"}}, true}, + + {case8Function, map[string][]string{"LocationConstraint": {"eu-west-1"}}, false}, + {case8Function, map[string][]string{"LocationConstraint": {"eu-west-2"}}, true}, + {case8Function, map[string][]string{"LocationConstraint": {"us-east-1"}}, true}, + {case8Function, map[string][]string{}, true}, + {case8Function, map[string][]string{"delimiter": {"/"}}, true}, + } + + for i, testCase := range testCases { + result := testCase.function.evaluate(testCase.values) + + if result != testCase.expectedResult { + t.Fatalf("case %v: expected: %v, got: %v\n", i+1, testCase.expectedResult, result) + } + } +} + +func TestStringNotLikeFuncKey(t *testing.T) { + case1Function, err := newStringNotLikeFunc(S3XAmzCopySource, NewValueSet(NewStringValue("mybucket/myobject"))) + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + + case2Function, err := newStringNotLikeFunc(S3XAmzServerSideEncryption, NewValueSet(NewStringValue("AES256"))) + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + + case3Function, err := newStringNotLikeFunc(S3XAmzMetadataDirective, NewValueSet(NewStringValue("REPLACE"))) + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + + case4Function, err := newStringNotLikeFunc(S3LocationConstraint, NewValueSet(NewStringValue("eu-west-1"))) + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + + testCases := []struct { + function Function + expectedResult Key + }{ + {case1Function, S3XAmzCopySource}, + {case2Function, S3XAmzServerSideEncryption}, + {case3Function, S3XAmzMetadataDirective}, + {case4Function, S3LocationConstraint}, + } + + for i, testCase := range testCases { + result := testCase.function.key() + + if result != testCase.expectedResult { + t.Fatalf("case %v: expected: %v, got: %v\n", i+1, testCase.expectedResult, result) + } + } +} + +func TestStringNotLikeFuncToMap(t *testing.T) { + case1Function, err := newStringNotLikeFunc(S3XAmzCopySource, NewValueSet(NewStringValue("mybucket/*"))) + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + + case1Result := map[Key]ValueSet{ + S3XAmzCopySource: NewValueSet(NewStringValue("mybucket/*")), + } + + case2Function, err := newStringNotLikeFunc(S3XAmzCopySource, + NewValueSet( + NewStringValue("mybucket/*"), + NewStringValue("yourbucket/myobject*"), + ), + ) + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + + case2Result := map[Key]ValueSet{ + S3XAmzCopySource: NewValueSet( + NewStringValue("mybucket/*"), + NewStringValue("yourbucket/myobject*"), + ), + } + + case3Function, err := newStringNotLikeFunc(S3XAmzServerSideEncryption, NewValueSet(NewStringValue("AES*"))) + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + + case3Result := map[Key]ValueSet{ + S3XAmzServerSideEncryption: NewValueSet(NewStringValue("AES*")), + } + + case4Function, err := newStringNotLikeFunc(S3XAmzServerSideEncryption, + NewValueSet( + NewStringValue("AES*"), + NewStringValue("aws:*"), + ), + ) + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + + case4Result := map[Key]ValueSet{ + S3XAmzServerSideEncryption: NewValueSet( + NewStringValue("AES*"), + NewStringValue("aws:*"), + ), + } + + case5Function, err := newStringNotLikeFunc(S3XAmzMetadataDirective, NewValueSet(NewStringValue("REPL*"))) + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + + case5Result := map[Key]ValueSet{ + S3XAmzMetadataDirective: NewValueSet(NewStringValue("REPL*")), + } + + case6Function, err := newStringNotLikeFunc(S3XAmzMetadataDirective, + NewValueSet( + NewStringValue("REPL*"), + NewStringValue("COPY*"), + ), + ) + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + + case6Result := map[Key]ValueSet{ + S3XAmzMetadataDirective: NewValueSet( + NewStringValue("REPL*"), + NewStringValue("COPY*"), + ), + } + + case7Function, err := newStringNotLikeFunc(S3LocationConstraint, NewValueSet(NewStringValue("eu-west-*"))) + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + + case7Result := map[Key]ValueSet{ + S3LocationConstraint: NewValueSet(NewStringValue("eu-west-*")), + } + + case8Function, err := newStringNotLikeFunc(S3LocationConstraint, + NewValueSet( + NewStringValue("eu-west-*"), + NewStringValue("us-west-*"), + ), + ) + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + + case8Result := map[Key]ValueSet{ + S3LocationConstraint: NewValueSet( + NewStringValue("eu-west-*"), + NewStringValue("us-west-*"), + ), + } + + testCases := []struct { + f Function + expectedResult map[Key]ValueSet + }{ + {case1Function, case1Result}, + {case2Function, case2Result}, + {case3Function, case3Result}, + {case4Function, case4Result}, + {case5Function, case5Result}, + {case6Function, case6Result}, + {case7Function, case7Result}, + {case8Function, case8Result}, + {&stringNotLikeFunc{}, nil}, + } + + for i, testCase := range testCases { + result := testCase.f.toMap() + + if !reflect.DeepEqual(result, testCase.expectedResult) { + t.Fatalf("case %v: result: expected: %v, got: %v\n", i+1, testCase.expectedResult, result) + } + } +} + +func TestNewStringLikeFunc(t *testing.T) { + case1Function, err := newStringLikeFunc(S3XAmzCopySource, NewValueSet(NewStringValue("mybucket/*"))) + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + + case2Function, err := newStringLikeFunc(S3XAmzCopySource, + NewValueSet( + NewStringValue("mybucket/*"), + NewStringValue("yourbucket/myobject*"), + ), + ) + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + + case3Function, err := newStringLikeFunc(S3XAmzServerSideEncryption, NewValueSet(NewStringValue("AES*"))) + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + + case4Function, err := newStringLikeFunc(S3XAmzServerSideEncryption, + NewValueSet( + NewStringValue("AES*"), + NewStringValue("aws:*"), + ), + ) + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + + case5Function, err := newStringLikeFunc(S3XAmzMetadataDirective, NewValueSet(NewStringValue("REPL*"))) + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + + case6Function, err := newStringLikeFunc(S3XAmzMetadataDirective, + NewValueSet( + NewStringValue("REPL*"), + NewStringValue("COPY*"), + ), + ) + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + + case7Function, err := newStringLikeFunc(S3LocationConstraint, NewValueSet(NewStringValue("eu-west-*"))) + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + + case8Function, err := newStringLikeFunc(S3LocationConstraint, + NewValueSet( + NewStringValue("eu-west-*"), + NewStringValue("us-west-*"), + ), + ) + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + + testCases := []struct { + key Key + values ValueSet + expectedResult Function + expectErr bool + }{ + {S3XAmzCopySource, NewValueSet(NewStringValue("mybucket/*")), case1Function, false}, + {S3XAmzCopySource, + NewValueSet( + NewStringValue("mybucket/*"), + NewStringValue("yourbucket/myobject*"), + ), case2Function, false}, + + {S3XAmzServerSideEncryption, NewValueSet(NewStringValue("AES*")), case3Function, false}, + {S3XAmzServerSideEncryption, + NewValueSet( + NewStringValue("AES*"), + NewStringValue("aws:*"), + ), case4Function, false}, + + {S3XAmzMetadataDirective, NewValueSet(NewStringValue("REPL*")), case5Function, false}, + {S3XAmzMetadataDirective, + NewValueSet( + NewStringValue("REPL*"), + NewStringValue("COPY*"), + ), case6Function, false}, + + {S3LocationConstraint, NewValueSet(NewStringValue("eu-west-*")), case7Function, false}, + {S3LocationConstraint, + NewValueSet( + NewStringValue("eu-west-*"), + NewStringValue("us-west-*"), + ), case8Function, false}, + + // Unsupported value error. + {S3XAmzCopySource, NewValueSet(NewStringValue("mybucket/myobject"), NewIntValue(7)), nil, true}, + {S3XAmzServerSideEncryption, NewValueSet(NewStringValue("AES256"), NewIntValue(7)), nil, true}, + {S3XAmzMetadataDirective, NewValueSet(NewStringValue("REPLACE"), NewIntValue(7)), nil, true}, + {S3LocationConstraint, NewValueSet(NewStringValue("eu-west-1"), NewIntValue(7)), nil, true}, + + // Invalid value error. + {S3XAmzCopySource, NewValueSet(NewStringValue("mybucket")), nil, true}, + } + + for i, testCase := range testCases { + result, err := newStringLikeFunc(testCase.key, testCase.values) + expectErr := (err != nil) + + if expectErr != testCase.expectErr { + t.Fatalf("case %v: error: expected: %v, got: %v\n", i+1, testCase.expectErr, expectErr) + } + + if !testCase.expectErr { + if !reflect.DeepEqual(result, testCase.expectedResult) { + t.Fatalf("case %v: result: expected: %v, got: %v\n", i+1, testCase.expectedResult, result) + } + } + } +} + +func TestNewStringNotLikeFunc(t *testing.T) { + case1Function, err := newStringNotLikeFunc(S3XAmzCopySource, NewValueSet(NewStringValue("mybucket/*"))) + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + + case2Function, err := newStringNotLikeFunc(S3XAmzCopySource, + NewValueSet( + NewStringValue("mybucket/*"), + NewStringValue("yourbucket/myobject*"), + ), + ) + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + + case3Function, err := newStringNotLikeFunc(S3XAmzServerSideEncryption, NewValueSet(NewStringValue("AES*"))) + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + + case4Function, err := newStringNotLikeFunc(S3XAmzServerSideEncryption, + NewValueSet( + NewStringValue("AES*"), + NewStringValue("aws:*"), + ), + ) + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + + case5Function, err := newStringNotLikeFunc(S3XAmzMetadataDirective, NewValueSet(NewStringValue("REPL*"))) + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + + case6Function, err := newStringNotLikeFunc(S3XAmzMetadataDirective, + NewValueSet( + NewStringValue("REPL*"), + NewStringValue("COPY*"), + ), + ) + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + + case7Function, err := newStringNotLikeFunc(S3LocationConstraint, NewValueSet(NewStringValue("eu-west-*"))) + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + + case8Function, err := newStringNotLikeFunc(S3LocationConstraint, + NewValueSet( + NewStringValue("eu-west-*"), + NewStringValue("us-west-*"), + ), + ) + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + + testCases := []struct { + key Key + values ValueSet + expectedResult Function + expectErr bool + }{ + {S3XAmzCopySource, NewValueSet(NewStringValue("mybucket/*")), case1Function, false}, + {S3XAmzCopySource, + NewValueSet( + NewStringValue("mybucket/*"), + NewStringValue("yourbucket/myobject*"), + ), case2Function, false}, + + {S3XAmzServerSideEncryption, NewValueSet(NewStringValue("AES*")), case3Function, false}, + {S3XAmzServerSideEncryption, + NewValueSet( + NewStringValue("AES*"), + NewStringValue("aws:*"), + ), case4Function, false}, + + {S3XAmzMetadataDirective, NewValueSet(NewStringValue("REPL*")), case5Function, false}, + {S3XAmzMetadataDirective, + NewValueSet( + NewStringValue("REPL*"), + NewStringValue("COPY*"), + ), case6Function, false}, + + {S3LocationConstraint, NewValueSet(NewStringValue("eu-west-*")), case7Function, false}, + {S3LocationConstraint, + NewValueSet( + NewStringValue("eu-west-*"), + NewStringValue("us-west-*"), + ), case8Function, false}, + + // Unsupported value error. + {S3XAmzCopySource, NewValueSet(NewStringValue("mybucket/myobject"), NewIntValue(7)), nil, true}, + {S3XAmzServerSideEncryption, NewValueSet(NewStringValue("AES256"), NewIntValue(7)), nil, true}, + {S3XAmzMetadataDirective, NewValueSet(NewStringValue("REPLACE"), NewIntValue(7)), nil, true}, + {S3LocationConstraint, NewValueSet(NewStringValue("eu-west-1"), NewIntValue(7)), nil, true}, + + // Invalid value error. + {S3XAmzCopySource, NewValueSet(NewStringValue("mybucket")), nil, true}, + } + + for i, testCase := range testCases { + result, err := newStringNotLikeFunc(testCase.key, testCase.values) + expectErr := (err != nil) + + if expectErr != testCase.expectErr { + t.Fatalf("case %v: error: expected: %v, got: %v\n", i+1, testCase.expectErr, expectErr) + } + + if !testCase.expectErr { + if !reflect.DeepEqual(result, testCase.expectedResult) { + t.Fatalf("case %v: result: expected: %v, got: %v\n", i+1, testCase.expectedResult, result) + } + } + } +} diff --git a/pkg/policy/condition/value.go b/pkg/policy/condition/value.go new file mode 100644 index 000000000..2e0ad938c --- /dev/null +++ b/pkg/policy/condition/value.go @@ -0,0 +1,157 @@ +/* + * Minio Cloud Storage, (C) 2018 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package condition + +import ( + "encoding/json" + "fmt" + "reflect" + "strconv" +) + +// Value - is enum type of string, int or bool. +type Value struct { + t reflect.Kind + s string + i int + b bool +} + +// GetBool - gets stored bool value. +func (v Value) GetBool() (bool, error) { + var err error + + if v.t != reflect.Bool { + err = fmt.Errorf("not a bool Value") + } + + return v.b, err +} + +// GetInt - gets stored int value. +func (v Value) GetInt() (int, error) { + var err error + + if v.t != reflect.Int { + err = fmt.Errorf("not a int Value") + } + + return v.i, err +} + +// GetString - gets stored string value. +func (v Value) GetString() (string, error) { + var err error + + if v.t != reflect.String { + err = fmt.Errorf("not a string Value") + } + + return v.s, err +} + +// GetType - gets enum type. +func (v Value) GetType() reflect.Kind { + return v.t +} + +// MarshalJSON - encodes Value to JSON data. +func (v Value) MarshalJSON() ([]byte, error) { + switch v.t { + case reflect.String: + return json.Marshal(v.s) + case reflect.Int: + return json.Marshal(v.i) + case reflect.Bool: + return json.Marshal(v.b) + } + + return nil, fmt.Errorf("unknown value kind %v", v.t) +} + +// StoreBool - stores bool value. +func (v *Value) StoreBool(b bool) { + *v = Value{t: reflect.Bool, b: b} +} + +// StoreInt - stores int value. +func (v *Value) StoreInt(i int) { + *v = Value{t: reflect.Int, i: i} +} + +// StoreString - stores string value. +func (v *Value) StoreString(s string) { + *v = Value{t: reflect.String, s: s} +} + +// String - returns string representation of value. +func (v Value) String() string { + switch v.t { + case reflect.String: + return v.s + case reflect.Int: + return strconv.Itoa(v.i) + case reflect.Bool: + return strconv.FormatBool(v.b) + } + + return "" +} + +// UnmarshalJSON - decodes JSON data. +func (v *Value) UnmarshalJSON(data []byte) error { + var b bool + if err := json.Unmarshal(data, &b); err == nil { + v.StoreBool(b) + return nil + } + + var i int + if err := json.Unmarshal(data, &i); err == nil { + v.StoreInt(i) + return nil + } + + var s string + if err := json.Unmarshal(data, &s); err == nil { + v.StoreString(s) + return nil + } + + return fmt.Errorf("unknown json data '%v'", data) +} + +// NewBoolValue - returns new bool value. +func NewBoolValue(b bool) Value { + value := &Value{} + value.StoreBool(b) + return *value +} + +// NewIntValue - returns new int value. +func NewIntValue(i int) Value { + value := &Value{} + value.StoreInt(i) + return *value +} + +// NewStringValue - returns new string value. +func NewStringValue(s string) Value { + value := &Value{} + value.StoreString(s) + return *value +} diff --git a/pkg/policy/condition/value_test.go b/pkg/policy/condition/value_test.go new file mode 100644 index 000000000..2b7065e54 --- /dev/null +++ b/pkg/policy/condition/value_test.go @@ -0,0 +1,260 @@ +/* + * Minio Cloud Storage, (C) 2018 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package condition + +import ( + "encoding/json" + "reflect" + "testing" +) + +func TestValueGetBool(t *testing.T) { + testCases := []struct { + value Value + expectedResult bool + expectErr bool + }{ + {NewBoolValue(true), true, false}, + {NewIntValue(7), false, true}, + {Value{}, false, true}, + } + + for i, testCase := range testCases { + result, err := testCase.value.GetBool() + expectErr := (err != nil) + + if expectErr != testCase.expectErr { + t.Fatalf("case %v: error: expected: %v, got: %v\n", i+1, testCase.expectErr, expectErr) + } + + if !testCase.expectErr { + if result != testCase.expectedResult { + t.Fatalf("case %v: result: expected: %v, got: %v\n", i+1, testCase.expectedResult, result) + } + } + } +} + +func TestValueGetInt(t *testing.T) { + testCases := []struct { + value Value + expectedResult int + expectErr bool + }{ + {NewIntValue(7), 7, false}, + {NewBoolValue(true), 0, true}, + {Value{}, 0, true}, + } + + for i, testCase := range testCases { + result, err := testCase.value.GetInt() + expectErr := (err != nil) + + if expectErr != testCase.expectErr { + t.Fatalf("case %v: error: expected: %v, got: %v\n", i+1, testCase.expectErr, expectErr) + } + + if !testCase.expectErr { + if result != testCase.expectedResult { + t.Fatalf("case %v: result: expected: %v, got: %v\n", i+1, testCase.expectedResult, result) + } + } + } +} + +func TestValueGetString(t *testing.T) { + testCases := []struct { + value Value + expectedResult string + expectErr bool + }{ + {NewStringValue("foo"), "foo", false}, + {NewBoolValue(true), "", true}, + {Value{}, "", true}, + } + + for i, testCase := range testCases { + result, err := testCase.value.GetString() + expectErr := (err != nil) + + if expectErr != testCase.expectErr { + t.Fatalf("case %v: error: expected: %v, got: %v\n", i+1, testCase.expectErr, expectErr) + } + + if !testCase.expectErr { + if result != testCase.expectedResult { + t.Fatalf("case %v: result: expected: %v, got: %v\n", i+1, testCase.expectedResult, result) + } + } + } +} + +func TestValueGetType(t *testing.T) { + testCases := []struct { + value Value + expectedResult reflect.Kind + }{ + {NewBoolValue(true), reflect.Bool}, + {NewIntValue(7), reflect.Int}, + {NewStringValue("foo"), reflect.String}, + {Value{}, reflect.Invalid}, + } + + for i, testCase := range testCases { + result := testCase.value.GetType() + + if result != testCase.expectedResult { + t.Fatalf("case %v: expected: %v, got: %v\n", i+1, testCase.expectedResult, result) + } + } +} + +func TestValueMarshalJSON(t *testing.T) { + testCases := []struct { + value Value + expectedResult []byte + expectErr bool + }{ + {NewBoolValue(true), []byte("true"), false}, + {NewIntValue(7), []byte("7"), false}, + {NewStringValue("foo"), []byte(`"foo"`), false}, + {Value{}, nil, true}, + } + + for i, testCase := range testCases { + result, err := json.Marshal(testCase.value) + expectErr := (err != nil) + + if expectErr != testCase.expectErr { + t.Fatalf("case %v: error: expected: %v, got: %v\n", i+1, testCase.expectErr, expectErr) + } + + if !testCase.expectErr { + if !reflect.DeepEqual(result, testCase.expectedResult) { + t.Fatalf("case %v: result: expected: %v, got: %v\n", i+1, testCase.expectedResult, result) + } + } + } +} + +func TestValueStoreBool(t *testing.T) { + testCases := []struct { + value bool + expectedResult Value + }{ + {false, NewBoolValue(false)}, + {true, NewBoolValue(true)}, + } + + for i, testCase := range testCases { + var result Value + result.StoreBool(testCase.value) + + if !reflect.DeepEqual(result, testCase.expectedResult) { + t.Fatalf("case %v: expected: %v, got: %v\n", i+1, testCase.expectedResult, result) + } + } +} + +func TestValueStoreInt(t *testing.T) { + testCases := []struct { + value int + expectedResult Value + }{ + {0, NewIntValue(0)}, + {7, NewIntValue(7)}, + } + + for i, testCase := range testCases { + var result Value + result.StoreInt(testCase.value) + + if !reflect.DeepEqual(result, testCase.expectedResult) { + t.Fatalf("case %v: expected: %v, got: %v\n", i+1, testCase.expectedResult, result) + } + } +} + +func TestValueStoreString(t *testing.T) { + testCases := []struct { + value string + expectedResult Value + }{ + {"", NewStringValue("")}, + {"foo", NewStringValue("foo")}, + } + + for i, testCase := range testCases { + var result Value + result.StoreString(testCase.value) + + if !reflect.DeepEqual(result, testCase.expectedResult) { + t.Fatalf("case %v: expected: %v, got: %v\n", i+1, testCase.expectedResult, result) + } + } +} + +func TestValueString(t *testing.T) { + testCases := []struct { + value Value + expectedResult string + }{ + {NewBoolValue(true), "true"}, + {NewIntValue(7), "7"}, + {NewStringValue("foo"), "foo"}, + {Value{}, ""}, + } + + for i, testCase := range testCases { + result := testCase.value.String() + + if result != testCase.expectedResult { + t.Fatalf("case %v: expected: %v, got: %v\n", i+1, testCase.expectedResult, result) + } + } +} + +func TestValueUnmarshalJSON(t *testing.T) { + testCases := []struct { + data []byte + expectedResult Value + expectErr bool + }{ + {[]byte("true"), NewBoolValue(true), false}, + {[]byte("7"), NewIntValue(7), false}, + {[]byte(`"foo"`), NewStringValue("foo"), false}, + {[]byte("True"), Value{}, true}, + {[]byte("7.1"), Value{}, true}, + {[]byte(`["foo"]`), Value{}, true}, + } + + for i, testCase := range testCases { + var result Value + err := json.Unmarshal(testCase.data, &result) + expectErr := (err != nil) + + if expectErr != testCase.expectErr { + t.Fatalf("case %v: error: expected: %v, got: %v\n", i+1, testCase.expectErr, expectErr) + } + + if !testCase.expectErr { + if !reflect.DeepEqual(result, testCase.expectedResult) { + t.Fatalf("case %v: result: expected: %v, got: %v\n", i+1, testCase.expectedResult, result) + } + } + } +} diff --git a/pkg/policy/condition/valueset.go b/pkg/policy/condition/valueset.go new file mode 100644 index 000000000..acd9233d2 --- /dev/null +++ b/pkg/policy/condition/valueset.go @@ -0,0 +1,85 @@ +/* + * Minio Cloud Storage, (C) 2018 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package condition + +import ( + "encoding/json" + "fmt" +) + +// ValueSet - unique list of values. +type ValueSet map[Value]struct{} + +// Add - adds given value to value set. +func (set ValueSet) Add(value Value) { + set[value] = struct{}{} +} + +// MarshalJSON - encodes ValueSet to JSON data. +func (set ValueSet) MarshalJSON() ([]byte, error) { + var values []Value + for k := range set { + values = append(values, k) + } + + if len(values) == 0 { + return nil, fmt.Errorf("invalid value set %v", set) + } + + return json.Marshal(values) +} + +// UnmarshalJSON - decodes JSON data. +func (set *ValueSet) UnmarshalJSON(data []byte) error { + var v Value + if err := json.Unmarshal(data, &v); err == nil { + *set = make(ValueSet) + set.Add(v) + return nil + } + + var values []Value + if err := json.Unmarshal(data, &values); err != nil { + return err + } + + if len(values) < 1 { + return fmt.Errorf("invalid value") + } + + *set = make(ValueSet) + for _, v = range values { + if _, found := (*set)[v]; found { + return fmt.Errorf("duplicate value found '%v'", v) + } + + set.Add(v) + } + + return nil +} + +// NewValueSet - returns new value set containing given values. +func NewValueSet(values ...Value) ValueSet { + set := make(ValueSet) + + for _, value := range values { + set.Add(value) + } + + return set +} diff --git a/pkg/policy/condition/valueset_test.go b/pkg/policy/condition/valueset_test.go new file mode 100644 index 000000000..6a6941b4b --- /dev/null +++ b/pkg/policy/condition/valueset_test.go @@ -0,0 +1,118 @@ +/* + * Minio Cloud Storage, (C) 2018 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package condition + +import ( + "encoding/json" + "reflect" + "testing" +) + +func TestValueSetAdd(t *testing.T) { + testCases := []struct { + value Value + expectedResult ValueSet + }{ + {NewBoolValue(true), NewValueSet(NewBoolValue(true))}, + {NewIntValue(7), NewValueSet(NewIntValue(7))}, + {NewStringValue("foo"), NewValueSet(NewStringValue("foo"))}, + } + + for i, testCase := range testCases { + result := NewValueSet() + result.Add(testCase.value) + + if !reflect.DeepEqual(result, testCase.expectedResult) { + t.Fatalf("case %v: expected: %v, got: %v\n", i+1, testCase.expectedResult, result) + } + } +} + +func TestValueSetMarshalJSON(t *testing.T) { + testCases := []struct { + set ValueSet + expectedResult string + expectErr bool + }{ + {NewValueSet(NewBoolValue(true)), `[true]`, false}, + {NewValueSet(NewIntValue(7)), `[7]`, false}, + {NewValueSet(NewStringValue("foo")), `["foo"]`, false}, + {NewValueSet(NewBoolValue(true)), `[true]`, false}, + {NewValueSet(NewStringValue("7")), `["7"]`, false}, + {NewValueSet(NewStringValue("foo")), `["foo"]`, false}, + {make(ValueSet), "", true}, + } + + for i, testCase := range testCases { + result, err := json.Marshal(testCase.set) + expectErr := (err != nil) + + if expectErr != testCase.expectErr { + t.Fatalf("case %v: error: expected: %v, got: %v\n", i+1, testCase.expectErr, expectErr) + } + + if !testCase.expectErr { + if string(result) != testCase.expectedResult { + t.Fatalf("case %v: result: expected: %v, got: %v\n", i+1, testCase.expectedResult, string(result)) + } + } + } +} + +func TestValueSetUnmarshalJSON(t *testing.T) { + set1 := NewValueSet( + NewBoolValue(true), + NewStringValue("false"), + NewIntValue(7), + NewStringValue("7"), + NewStringValue("foo"), + NewStringValue("192.168.1.100/24"), + ) + + testCases := []struct { + data []byte + expectedResult ValueSet + expectErr bool + }{ + {[]byte(`true`), NewValueSet(NewBoolValue(true)), false}, + {[]byte(`7`), NewValueSet(NewIntValue(7)), false}, + {[]byte(`"foo"`), NewValueSet(NewStringValue("foo")), false}, + {[]byte(`[true]`), NewValueSet(NewBoolValue(true)), false}, + {[]byte(`[7]`), NewValueSet(NewIntValue(7)), false}, + {[]byte(`["foo"]`), NewValueSet(NewStringValue("foo")), false}, + {[]byte(`[true, "false", 7, "7", "foo", "192.168.1.100/24"]`), set1, false}, + {[]byte(`{}`), nil, true}, // Unsupported data. + {[]byte(`[]`), nil, true}, // Empty array. + {[]byte(`[7, 7, true]`), nil, true}, // Duplicate value. + } + + for i, testCase := range testCases { + result := make(ValueSet) + err := json.Unmarshal(testCase.data, &result) + expectErr := (err != nil) + + if expectErr != testCase.expectErr { + t.Fatalf("case %v: error: expected: %v, got: %v\n", i+1, testCase.expectErr, expectErr) + } + + if !testCase.expectErr { + if !reflect.DeepEqual(result, testCase.expectedResult) { + t.Fatalf("case %v: result: expected: %v, got: %v\n", i+1, testCase.expectedResult, result) + } + } + } +} diff --git a/pkg/policy/effect.go b/pkg/policy/effect.go new file mode 100644 index 000000000..913e65506 --- /dev/null +++ b/pkg/policy/effect.go @@ -0,0 +1,78 @@ +/* + * Minio Cloud Storage, (C) 2018 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package policy + +import ( + "encoding/json" + "fmt" +) + +// Effect - policy statement effect Allow or Deny. +type Effect string + +const ( + // Allow - allow effect. + Allow Effect = "Allow" + + // Deny - deny effect. + Deny = "Deny" +) + +// IsAllowed - returns if given check is allowed or not. +func (effect Effect) IsAllowed(b bool) bool { + if effect == Allow { + return b + } + + return !b +} + +// IsValid - checks if Effect is valid or not +func (effect Effect) IsValid() bool { + switch effect { + case Allow, Deny: + return true + } + + return false +} + +// MarshalJSON - encodes Effect to JSON data. +func (effect Effect) MarshalJSON() ([]byte, error) { + if !effect.IsValid() { + return nil, fmt.Errorf("invalid effect '%v'", effect) + } + + return json.Marshal(string(effect)) +} + +// UnmarshalJSON - decodes JSON data to Effect. +func (effect *Effect) UnmarshalJSON(data []byte) error { + var s string + if err := json.Unmarshal(data, &s); err != nil { + return err + } + + e := Effect(s) + if !e.IsValid() { + return fmt.Errorf("invalid effect '%v'", s) + } + + *effect = e + + return nil +} diff --git a/pkg/policy/effect_test.go b/pkg/policy/effect_test.go new file mode 100644 index 000000000..0286a4f07 --- /dev/null +++ b/pkg/policy/effect_test.go @@ -0,0 +1,122 @@ +/* + * Minio Cloud Storage, (C) 2018 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package policy + +import ( + "encoding/json" + "reflect" + "testing" +) + +func TestEffectIsAllowed(t *testing.T) { + testCases := []struct { + effect Effect + check bool + expectedResult bool + }{ + {Allow, false, false}, + {Allow, true, true}, + {Deny, false, true}, + {Deny, true, false}, + } + + for i, testCase := range testCases { + result := testCase.effect.IsAllowed(testCase.check) + + if result != testCase.expectedResult { + t.Fatalf("case %v: expected: %v, got: %v\n", i+1, testCase.expectedResult, result) + } + } + +} + +func TestEffectIsValid(t *testing.T) { + testCases := []struct { + effect Effect + expectedResult bool + }{ + {Allow, true}, + {Deny, true}, + {Effect(""), false}, + {Effect("foo"), false}, + } + + for i, testCase := range testCases { + result := testCase.effect.IsValid() + + if result != testCase.expectedResult { + t.Fatalf("case %v: expected: %v, got: %v\n", i+1, testCase.expectedResult, result) + } + } +} + +func TestEffectMarshalJSON(t *testing.T) { + testCases := []struct { + effect Effect + expectedResult []byte + expectErr bool + }{ + {Allow, []byte(`"Allow"`), false}, + {Deny, []byte(`"Deny"`), false}, + {Effect(""), nil, true}, + {Effect("foo"), nil, true}, + } + + for i, testCase := range testCases { + result, err := json.Marshal(testCase.effect) + expectErr := (err != nil) + + if expectErr != testCase.expectErr { + t.Fatalf("case %v: error: expected: %v, got: %v\n", i+1, testCase.expectErr, expectErr) + } + + if !testCase.expectErr { + if !reflect.DeepEqual(result, testCase.expectedResult) { + t.Fatalf("case %v: result: expected: %v, got: %v\n", i+1, string(testCase.expectedResult), string(result)) + } + } + } +} + +func TestEffectUnmarshalJSON(t *testing.T) { + testCases := []struct { + data []byte + expectedResult Effect + expectErr bool + }{ + {[]byte(`"Allow"`), Allow, false}, + {[]byte(`"Deny"`), Deny, false}, + {[]byte(`""`), Effect(""), true}, + {[]byte(`"foo"`), Effect(""), true}, + } + + for i, testCase := range testCases { + var result Effect + err := json.Unmarshal(testCase.data, &result) + expectErr := (err != nil) + + if expectErr != testCase.expectErr { + t.Fatalf("case %v: error: expected: %v, got: %v\n", i+1, testCase.expectErr, expectErr) + } + + if !testCase.expectErr { + if result != testCase.expectedResult { + t.Fatalf("case %v: result: expected: %v, got: %v\n", i+1, testCase.expectedResult, result) + } + } + } +} diff --git a/pkg/policy/id.go b/pkg/policy/id.go new file mode 100644 index 000000000..9040ea40a --- /dev/null +++ b/pkg/policy/id.go @@ -0,0 +1,64 @@ +/* + * Minio Cloud Storage, (C) 2018 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package policy + +import ( + "encoding/json" + "fmt" + "regexp" +) + +var idRegexp = regexp.MustCompile("^[[:alnum:]]+$") + +// ID - policy ID. +type ID string + +// IsValid - checks if ID is valid or not. +func (id ID) IsValid() bool { + // Allow empty string as ID. + if string(id) == "" { + return true + } + + return idRegexp.MatchString(string(id)) +} + +// MarshalJSON - encodes ID to JSON data. +func (id ID) MarshalJSON() ([]byte, error) { + if !id.IsValid() { + return nil, fmt.Errorf("invalid ID %v", id) + } + + return json.Marshal(string(id)) +} + +// UnmarshalJSON - decodes JSON data to ID. +func (id *ID) UnmarshalJSON(data []byte) error { + var s string + if err := json.Unmarshal(data, &s); err != nil { + return err + } + + i := ID(s) + if !i.IsValid() { + return fmt.Errorf("invalid ID %v", s) + } + + *id = i + + return nil +} diff --git a/pkg/policy/id_test.go b/pkg/policy/id_test.go new file mode 100644 index 000000000..74065ff31 --- /dev/null +++ b/pkg/policy/id_test.go @@ -0,0 +1,99 @@ +/* + * Minio Cloud Storage, (C) 2018 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package policy + +import ( + "encoding/json" + "reflect" + "testing" +) + +func TestIDIsValid(t *testing.T) { + testCases := []struct { + id ID + expectedResult bool + }{ + {ID("DenyEncryptionSt1"), true}, + {ID(""), true}, + } + + for i, testCase := range testCases { + result := testCase.id.IsValid() + + if result != testCase.expectedResult { + t.Fatalf("case %v: result: expected: %v, got: %v\n", i+1, testCase.expectedResult, result) + } + } +} + +func TestIDMarshalJSON(t *testing.T) { + testCases := []struct { + id ID + expectedResult []byte + expectErr bool + }{ + {ID("foo"), []byte(`"foo"`), false}, + {ID("1234"), []byte(`"1234"`), false}, + {ID("DenyEncryptionSt1"), []byte(`"DenyEncryptionSt1"`), false}, + {ID(""), []byte(`""`), false}, + } + + for i, testCase := range testCases { + result, err := json.Marshal(testCase.id) + expectErr := (err != nil) + + if expectErr != testCase.expectErr { + t.Fatalf("case %v: error: expected: %v, got: %v\n", i+1, testCase.expectErr, expectErr) + } + + if !testCase.expectErr { + if !reflect.DeepEqual(result, testCase.expectedResult) { + t.Fatalf("case %v: result: expected: %v, got: %v\n", i+1, string(testCase.expectedResult), string(result)) + } + } + } +} + +func TestIDUnmarshalJSON(t *testing.T) { + testCases := []struct { + data []byte + expectedResult ID + expectErr bool + }{ + {[]byte(`"foo"`), ID("foo"), false}, + {[]byte(`"1234"`), ID("1234"), false}, + {[]byte(`"DenyEncryptionSt1"`), ID("DenyEncryptionSt1"), false}, + {[]byte(`""`), ID(""), false}, + {[]byte(`"foo bar"`), ID(""), true}, + } + + for i, testCase := range testCases { + var result ID + err := json.Unmarshal(testCase.data, &result) + expectErr := (err != nil) + + if expectErr != testCase.expectErr { + t.Fatalf("case %v: error: expected: %v, got: %v\n", i+1, testCase.expectErr, expectErr) + } + + if !testCase.expectErr { + if result != testCase.expectedResult { + t.Fatalf("case %v: result: expected: %v, got: %v\n", i+1, testCase.expectedResult, result) + } + } + } +} diff --git a/pkg/policy/policy.go b/pkg/policy/policy.go new file mode 100644 index 000000000..cac6853a1 --- /dev/null +++ b/pkg/policy/policy.go @@ -0,0 +1,176 @@ +/* + * Minio Cloud Storage, (C) 2018 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package policy + +import ( + "encoding/json" + "fmt" + "io" +) + +// DefaultVersion - default policy version as per AWS S3 specification. +const DefaultVersion = "2012-10-17" + +// Args - arguments to policy to check whether it is allowed +type Args struct { + AccountName string + Action Action + BucketName string + ConditionValues map[string][]string + IsOwner bool + ObjectName string +} + +// Policy - bucket policy. +type Policy struct { + ID ID `json:"ID,omitempty"` + Version string + Statements []Statement `json:"Statement"` +} + +// IsAllowed - checks given policy args is allowed to continue the Rest API. +func (policy Policy) IsAllowed(args Args) bool { + // Check all deny statements. If any one statement denies, return false. + for _, statement := range policy.Statements { + if statement.Effect == Deny { + if !statement.IsAllowed(args) { + return false + } + } + } + + // For owner, its allowed by default. + if args.IsOwner { + return true + } + + // Check all allow statements. If any one statement allows, return true. + for _, statement := range policy.Statements { + if statement.Effect == Allow { + if statement.IsAllowed(args) { + return true + } + } + } + + return false +} + +// IsEmpty - returns whether policy is empty or not. +func (policy Policy) IsEmpty() bool { + return len(policy.Statements) == 0 +} + +// isValid - checks if Policy is valid or not. +func (policy Policy) isValid() error { + if policy.Version != DefaultVersion { + return fmt.Errorf("invalid version '%v'", policy.Version) + } + + for _, statement := range policy.Statements { + if err := statement.isValid(); err != nil { + return err + } + } + + for i := range policy.Statements { + for _, statement := range policy.Statements[i+1:] { + principals := policy.Statements[i].Principal.Intersection(statement.Principal) + if principals.IsEmpty() { + continue + } + + actions := policy.Statements[i].Actions.Intersection(statement.Actions) + if len(actions) == 0 { + continue + } + + resources := policy.Statements[i].Resources.Intersection(statement.Resources) + if len(resources) == 0 { + continue + } + + if policy.Statements[i].Conditions.String() != statement.Conditions.String() { + continue + } + + return fmt.Errorf("duplicate principal %v, actions %v, resouces %v found in statements %v, %v", + principals, actions, resources, policy.Statements[i], statement) + } + } + + return nil +} + +// MarshalJSON - encodes Policy to JSON data. +func (policy Policy) MarshalJSON() ([]byte, error) { + if err := policy.isValid(); err != nil { + return nil, err + } + + // subtype to avoid recursive call to MarshalJSON() + type subPolicy Policy + return json.Marshal(subPolicy(policy)) +} + +// UnmarshalJSON - decodes JSON data to Policy. +func (policy *Policy) UnmarshalJSON(data []byte) error { + // subtype to avoid recursive call to UnmarshalJSON() + type subPolicy Policy + var sp subPolicy + if err := json.Unmarshal(data, &sp); err != nil { + return err + } + + p := Policy(sp) + if err := p.isValid(); err != nil { + return err + } + + *policy = p + + return nil +} + +// Validate - validates all statements are for given bucket or not. +func (policy Policy) Validate(bucketName string) error { + if err := policy.isValid(); err != nil { + return err + } + + for _, statement := range policy.Statements { + if err := statement.Validate(bucketName); err != nil { + return err + } + } + + return nil +} + +// ParseConfig - parses data in given reader to Policy. +func ParseConfig(reader io.Reader, bucketName string) (*Policy, error) { + var policy Policy + + decoder := json.NewDecoder(reader) + decoder.DisallowUnknownFields() + if err := decoder.Decode(&policy); err != nil { + return nil, err + } + + err := policy.Validate(bucketName) + return &policy, err +} diff --git a/pkg/policy/policy_test.go b/pkg/policy/policy_test.go new file mode 100644 index 000000000..8c4fb61f6 --- /dev/null +++ b/pkg/policy/policy_test.go @@ -0,0 +1,1089 @@ +/* + * Minio Cloud Storage, (C) 2018 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package policy + +import ( + "encoding/json" + "net" + "reflect" + "testing" + + "github.com/minio/minio/pkg/policy/condition" +) + +func TestPolicyIsAllowed(t *testing.T) { + case1Policy := Policy{ + Version: DefaultVersion, + Statements: []Statement{ + NewStatement( + Allow, + NewPrincipal("*"), + NewActionSet(GetBucketLocationAction, PutObjectAction), + NewResourceSet(NewResource("*", "")), + condition.NewFunctions(), + )}, + } + + case2Policy := Policy{ + Version: DefaultVersion, + Statements: []Statement{ + NewStatement( + Allow, + NewPrincipal("*"), + NewActionSet(GetObjectAction, PutObjectAction), + NewResourceSet(NewResource("mybucket", "/myobject*")), + condition.NewFunctions(), + )}, + } + + _, IPNet, err := net.ParseCIDR("192.168.1.0/24") + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + func1, err := condition.NewIPAddressFunc( + condition.AWSSourceIP, + IPNet, + ) + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + + case3Policy := Policy{ + Version: DefaultVersion, + Statements: []Statement{ + NewStatement( + Allow, + NewPrincipal("*"), + NewActionSet(GetObjectAction, PutObjectAction), + NewResourceSet(NewResource("mybucket", "/myobject*")), + condition.NewFunctions(func1), + )}, + } + + case4Policy := Policy{ + Version: DefaultVersion, + Statements: []Statement{ + NewStatement( + Deny, + NewPrincipal("*"), + NewActionSet(GetObjectAction, PutObjectAction), + NewResourceSet(NewResource("mybucket", "/myobject*")), + condition.NewFunctions(func1), + )}, + } + + anonGetBucketLocationArgs := Args{ + AccountName: "Q3AM3UQ867SPQQA43P2F", + Action: GetBucketLocationAction, + BucketName: "mybucket", + ConditionValues: map[string][]string{}, + } + + anonPutObjectActionArgs := Args{ + AccountName: "Q3AM3UQ867SPQQA43P2F", + Action: PutObjectAction, + BucketName: "mybucket", + ConditionValues: map[string][]string{ + "x-amz-copy-source": {"mybucket/myobject"}, + "SourceIp": {"192.168.1.10"}, + }, + ObjectName: "myobject", + } + + anonGetObjectActionArgs := Args{ + AccountName: "Q3AM3UQ867SPQQA43P2F", + Action: GetObjectAction, + BucketName: "mybucket", + ConditionValues: map[string][]string{}, + ObjectName: "myobject", + } + + getBucketLocationArgs := Args{ + AccountName: "Q3AM3UQ867SPQQA43P2F", + Action: GetBucketLocationAction, + BucketName: "mybucket", + ConditionValues: map[string][]string{}, + IsOwner: true, + } + + putObjectActionArgs := Args{ + AccountName: "Q3AM3UQ867SPQQA43P2F", + Action: PutObjectAction, + BucketName: "mybucket", + ConditionValues: map[string][]string{ + "x-amz-copy-source": {"mybucket/myobject"}, + "SourceIp": {"192.168.1.10"}, + }, + IsOwner: true, + ObjectName: "myobject", + } + + getObjectActionArgs := Args{ + AccountName: "Q3AM3UQ867SPQQA43P2F", + Action: GetObjectAction, + BucketName: "mybucket", + ConditionValues: map[string][]string{}, + IsOwner: true, + ObjectName: "myobject", + } + + testCases := []struct { + policy Policy + args Args + expectedResult bool + }{ + {case1Policy, anonGetBucketLocationArgs, true}, + {case1Policy, anonPutObjectActionArgs, true}, + {case1Policy, anonGetObjectActionArgs, false}, + {case1Policy, getBucketLocationArgs, true}, + {case1Policy, putObjectActionArgs, true}, + {case1Policy, getObjectActionArgs, true}, + + {case2Policy, anonGetBucketLocationArgs, false}, + {case2Policy, anonPutObjectActionArgs, true}, + {case2Policy, anonGetObjectActionArgs, true}, + {case2Policy, getBucketLocationArgs, true}, + {case2Policy, putObjectActionArgs, true}, + {case2Policy, getObjectActionArgs, true}, + + {case3Policy, anonGetBucketLocationArgs, false}, + {case3Policy, anonPutObjectActionArgs, true}, + {case3Policy, anonGetObjectActionArgs, false}, + {case3Policy, getBucketLocationArgs, true}, + {case3Policy, putObjectActionArgs, true}, + {case3Policy, getObjectActionArgs, true}, + + {case4Policy, anonGetBucketLocationArgs, false}, + {case4Policy, anonPutObjectActionArgs, false}, + {case4Policy, anonGetObjectActionArgs, false}, + {case4Policy, getBucketLocationArgs, true}, + {case4Policy, putObjectActionArgs, false}, + {case4Policy, getObjectActionArgs, true}, + } + + for i, testCase := range testCases { + result := testCase.policy.IsAllowed(testCase.args) + + if result != testCase.expectedResult { + t.Fatalf("case %v: expected: %v, got: %v\n", i+1, testCase.expectedResult, result) + } + } +} + +func TestPolicyIsEmpty(t *testing.T) { + case1Policy := Policy{ + Version: DefaultVersion, + Statements: []Statement{ + NewStatement( + Allow, + NewPrincipal("*"), + NewActionSet(PutObjectAction), + NewResourceSet(NewResource("mybucket", "/myobject*")), + condition.NewFunctions(), + ), + }, + } + + case2Policy := Policy{ + ID: "MyPolicyForMyBucket", + Version: DefaultVersion, + } + + testCases := []struct { + policy Policy + expectedResult bool + }{ + {case1Policy, false}, + {case2Policy, true}, + } + + for i, testCase := range testCases { + result := testCase.policy.IsEmpty() + + if result != testCase.expectedResult { + t.Fatalf("case %v: expected: %v, got: %v\n", i+1, testCase.expectedResult, result) + } + } +} + +func TestPolicyIsValid(t *testing.T) { + case1Policy := Policy{ + Version: DefaultVersion, + Statements: []Statement{ + NewStatement( + Allow, + NewPrincipal("*"), + NewActionSet(PutObjectAction), + NewResourceSet(NewResource("mybucket", "/myobject*")), + condition.NewFunctions(), + ), + }, + } + + case2Policy := Policy{ + Version: DefaultVersion, + Statements: []Statement{ + NewStatement( + Allow, + NewPrincipal("*"), + NewActionSet(PutObjectAction), + NewResourceSet(NewResource("mybucket", "/myobject*")), + condition.NewFunctions(), + ), + NewStatement( + Deny, + NewPrincipal("*"), + NewActionSet(GetObjectAction), + NewResourceSet(NewResource("mybucket", "/myobject*")), + condition.NewFunctions(), + ), + }, + } + + case3Policy := Policy{ + Version: DefaultVersion, + Statements: []Statement{ + NewStatement( + Allow, + NewPrincipal("*"), + NewActionSet(PutObjectAction), + NewResourceSet(NewResource("mybucket", "/myobject*")), + condition.NewFunctions(), + ), + NewStatement( + Deny, + NewPrincipal("*"), + NewActionSet(PutObjectAction), + NewResourceSet(NewResource("mybucket", "/yourobject*")), + condition.NewFunctions(), + ), + }, + } + + func1, err := condition.NewNullFunc( + condition.S3XAmzCopySource, + true, + ) + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + func2, err := condition.NewNullFunc( + condition.S3XAmzServerSideEncryption, + false, + ) + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + + case4Policy := Policy{ + Version: DefaultVersion, + Statements: []Statement{ + NewStatement( + Allow, + NewPrincipal("*"), + NewActionSet(PutObjectAction), + NewResourceSet(NewResource("mybucket", "/myobject*")), + condition.NewFunctions(func1), + ), + NewStatement( + Deny, + NewPrincipal("*"), + NewActionSet(PutObjectAction), + NewResourceSet(NewResource("mybucket", "/myobject*")), + condition.NewFunctions(func2), + ), + }, + } + + case5Policy := Policy{ + Version: "17-10-2012", + Statements: []Statement{ + NewStatement( + Allow, + NewPrincipal("*"), + NewActionSet(PutObjectAction), + NewResourceSet(NewResource("mybucket", "/myobject*")), + condition.NewFunctions(), + ), + }, + } + + case6Policy := Policy{ + ID: "MyPolicyForMyBucket1", + Version: DefaultVersion, + Statements: []Statement{ + NewStatement( + Allow, + NewPrincipal("*"), + NewActionSet(GetObjectAction, PutObjectAction), + NewResourceSet(NewResource("mybucket", "myobject*")), + condition.NewFunctions(func1, func2), + ), + }, + } + + case7Policy := Policy{ + Version: DefaultVersion, + Statements: []Statement{ + NewStatement( + Allow, + NewPrincipal("*"), + NewActionSet(PutObjectAction), + NewResourceSet(NewResource("mybucket", "/myobject*")), + condition.NewFunctions(), + ), + NewStatement( + Deny, + NewPrincipal("*"), + NewActionSet(PutObjectAction), + NewResourceSet(NewResource("mybucket", "/myobject*")), + condition.NewFunctions(), + ), + }, + } + + testCases := []struct { + policy Policy + expectErr bool + }{ + {case1Policy, false}, + // allowed duplicate principal. + {case2Policy, false}, + // allowed duplicate principal and action. + {case3Policy, false}, + // allowed duplicate principal, action and resource. + {case4Policy, false}, + // Invalid version error. + {case5Policy, true}, + // Invalid statement error. + {case6Policy, true}, + // Duplicate statement error. + {case7Policy, true}, + } + + for i, testCase := range testCases { + err := testCase.policy.isValid() + expectErr := (err != nil) + + if expectErr != testCase.expectErr { + t.Fatalf("case %v: error: expected: %v, got: %v", i+1, testCase.expectErr, expectErr) + } + } +} + +func TestPolicyMarshalJSON(t *testing.T) { + case1Policy := Policy{ + ID: "MyPolicyForMyBucket1", + Version: DefaultVersion, + Statements: []Statement{ + NewStatement( + Allow, + NewPrincipal("*"), + NewActionSet(PutObjectAction), + NewResourceSet(NewResource("mybucket", "/myobject*")), + condition.NewFunctions(), + ), + }, + } + case1Policy.Statements[0].SID = "SomeId1" + case1Data := []byte(`{"ID":"MyPolicyForMyBucket1","Version":"2012-10-17","Statement":[{"Sid":"SomeId1","Effect":"Allow","Principal":{"AWS":["*"]},"Action":["s3:PutObject"],"Resource":["arn:aws:s3:::mybucket/myobject*"]}]}`) + + _, IPNet1, err := net.ParseCIDR("192.168.1.0/24") + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + func1, err := condition.NewIPAddressFunc( + condition.AWSSourceIP, + IPNet1, + ) + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + + case2Policy := Policy{ + Version: DefaultVersion, + Statements: []Statement{ + NewStatement( + Allow, + NewPrincipal("*"), + NewActionSet(PutObjectAction), + NewResourceSet(NewResource("mybucket", "/myobject*")), + condition.NewFunctions(), + ), + NewStatement( + Deny, + NewPrincipal("*"), + NewActionSet(GetObjectAction), + NewResourceSet(NewResource("mybucket", "/yourobject*")), + condition.NewFunctions(func1), + ), + }, + } + case2Data := []byte(`{"Version":"2012-10-17","Statement":[{"Effect":"Allow","Principal":{"AWS":["*"]},"Action":["s3:PutObject"],"Resource":["arn:aws:s3:::mybucket/myobject*"]},{"Effect":"Deny","Principal":{"AWS":["*"]},"Action":["s3:GetObject"],"Resource":["arn:aws:s3:::mybucket/yourobject*"],"Condition":{"IpAddress":{"aws:SourceIp":["192.168.1.0/24"]}}}]}`) + + case3Policy := Policy{ + ID: "MyPolicyForMyBucket1", + Version: DefaultVersion, + Statements: []Statement{ + NewStatement( + Allow, + NewPrincipal("Q3AM3UQ867SPQQA43P2F"), + NewActionSet(PutObjectAction), + NewResourceSet(NewResource("mybucket", "/myobject*")), + condition.NewFunctions(), + ), + NewStatement( + Allow, + NewPrincipal("*"), + NewActionSet(PutObjectAction), + NewResourceSet(NewResource("mybucket", "/myobject*")), + condition.NewFunctions(), + ), + }, + } + case3Data := []byte(`{"ID":"MyPolicyForMyBucket1","Version":"2012-10-17","Statement":[{"Effect":"Allow","Principal":{"AWS":["Q3AM3UQ867SPQQA43P2F"]},"Action":["s3:PutObject"],"Resource":["arn:aws:s3:::mybucket/myobject*"]},{"Effect":"Allow","Principal":{"AWS":["*"]},"Action":["s3:PutObject"],"Resource":["arn:aws:s3:::mybucket/myobject*"]}]}`) + + case4Policy := Policy{ + ID: "MyPolicyForMyBucket1", + Version: DefaultVersion, + Statements: []Statement{ + NewStatement( + Allow, + NewPrincipal("*"), + NewActionSet(PutObjectAction), + NewResourceSet(NewResource("mybucket", "/myobject*")), + condition.NewFunctions(), + ), + NewStatement( + Allow, + NewPrincipal("*"), + NewActionSet(GetObjectAction), + NewResourceSet(NewResource("mybucket", "/myobject*")), + condition.NewFunctions(), + ), + }, + } + case4Data := []byte(`{"ID":"MyPolicyForMyBucket1","Version":"2012-10-17","Statement":[{"Effect":"Allow","Principal":{"AWS":["*"]},"Action":["s3:PutObject"],"Resource":["arn:aws:s3:::mybucket/myobject*"]},{"Effect":"Allow","Principal":{"AWS":["*"]},"Action":["s3:GetObject"],"Resource":["arn:aws:s3:::mybucket/myobject*"]}]}`) + + case5Policy := Policy{ + ID: "MyPolicyForMyBucket1", + Version: DefaultVersion, + Statements: []Statement{ + NewStatement( + Allow, + NewPrincipal("*"), + NewActionSet(PutObjectAction), + NewResourceSet(NewResource("mybucket", "/myobject*")), + condition.NewFunctions(), + ), + NewStatement( + Allow, + NewPrincipal("*"), + NewActionSet(PutObjectAction), + NewResourceSet(NewResource("mybucket", "/yourobject*")), + condition.NewFunctions(), + ), + }, + } + case5Data := []byte(`{"ID":"MyPolicyForMyBucket1","Version":"2012-10-17","Statement":[{"Effect":"Allow","Principal":{"AWS":["*"]},"Action":["s3:PutObject"],"Resource":["arn:aws:s3:::mybucket/myobject*"]},{"Effect":"Allow","Principal":{"AWS":["*"]},"Action":["s3:PutObject"],"Resource":["arn:aws:s3:::mybucket/yourobject*"]}]}`) + + _, IPNet2, err := net.ParseCIDR("192.168.2.0/24") + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + func2, err := condition.NewIPAddressFunc( + condition.AWSSourceIP, + IPNet2, + ) + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + + case6Policy := Policy{ + ID: "MyPolicyForMyBucket1", + Version: DefaultVersion, + Statements: []Statement{ + NewStatement( + Allow, + NewPrincipal("*"), + NewActionSet(PutObjectAction), + NewResourceSet(NewResource("mybucket", "/myobject*")), + condition.NewFunctions(func1), + ), + NewStatement( + Allow, + NewPrincipal("*"), + NewActionSet(PutObjectAction), + NewResourceSet(NewResource("mybucket", "/myobject*")), + condition.NewFunctions(func2), + ), + }, + } + case6Data := []byte(`{"ID":"MyPolicyForMyBucket1","Version":"2012-10-17","Statement":[{"Effect":"Allow","Principal":{"AWS":["*"]},"Action":["s3:PutObject"],"Resource":["arn:aws:s3:::mybucket/myobject*"],"Condition":{"IpAddress":{"aws:SourceIp":["192.168.1.0/24"]}}},{"Effect":"Allow","Principal":{"AWS":["*"]},"Action":["s3:PutObject"],"Resource":["arn:aws:s3:::mybucket/myobject*"],"Condition":{"IpAddress":{"aws:SourceIp":["192.168.2.0/24"]}}}]}`) + + case7Policy := Policy{ + ID: "MyPolicyForMyBucket1", + Version: DefaultVersion, + Statements: []Statement{ + NewStatement( + Allow, + NewPrincipal("*"), + NewActionSet(GetBucketLocationAction), + NewResourceSet(NewResource("mybucket", "")), + condition.NewFunctions(), + ), + }, + } + case7Data := []byte(`{"ID":"MyPolicyForMyBucket1","Version":"2012-10-17","Statement":[{"Effect":"Allow","Principal":{"AWS":["*"]},"Action":["s3:GetBucketLocation"],"Resource":["arn:aws:s3:::mybucket"]}]}`) + + case8Policy := Policy{ + ID: "MyPolicyForMyBucket1", + Version: DefaultVersion, + Statements: []Statement{ + NewStatement( + Allow, + NewPrincipal("*"), + NewActionSet(GetBucketLocationAction), + NewResourceSet(NewResource("*", "")), + condition.NewFunctions(), + ), + }, + } + case8Data := []byte(`{"ID":"MyPolicyForMyBucket1","Version":"2012-10-17","Statement":[{"Effect":"Allow","Principal":{"AWS":["*"]},"Action":["s3:GetBucketLocation"],"Resource":["arn:aws:s3:::*"]}]}`) + + func3, err := condition.NewNullFunc( + condition.S3XAmzCopySource, + true, + ) + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + case9Policy := Policy{ + ID: "MyPolicyForMyBucket1", + Version: DefaultVersion, + Statements: []Statement{ + NewStatement( + Allow, + NewPrincipal("*"), + NewActionSet(GetObjectAction, PutObjectAction), + NewResourceSet(NewResource("mybucket", "myobject*")), + condition.NewFunctions(func1, func2, func3), + ), + }, + } + + testCases := []struct { + policy Policy + expectedResult []byte + expectErr bool + }{ + {case1Policy, case1Data, false}, + {case2Policy, case2Data, false}, + {case3Policy, case3Data, false}, + {case4Policy, case4Data, false}, + {case5Policy, case5Data, false}, + {case6Policy, case6Data, false}, + {case7Policy, case7Data, false}, + {case8Policy, case8Data, false}, + {case9Policy, nil, true}, + } + + for i, testCase := range testCases { + result, err := json.Marshal(testCase.policy) + expectErr := (err != nil) + + if expectErr != testCase.expectErr { + t.Fatalf("case %v: error: expected: %v, got: %v", i+1, testCase.expectErr, expectErr) + } + + if !testCase.expectErr { + if !reflect.DeepEqual(result, testCase.expectedResult) { + t.Fatalf("case %v: result: expected: %v, got: %v", i+1, string(testCase.expectedResult), string(result)) + } + } + } +} + +func TestPolicyUnmarshalJSON(t *testing.T) { + case1Data := []byte(`{ + "ID": "MyPolicyForMyBucket1", + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "SomeId1", + "Effect": "Allow", + "Principal": "*", + "Action": "s3:PutObject", + "Resource": "arn:aws:s3:::mybucket/myobject*" + } + ] +}`) + case1Policy := Policy{ + ID: "MyPolicyForMyBucket1", + Version: DefaultVersion, + Statements: []Statement{ + NewStatement( + Allow, + NewPrincipal("*"), + NewActionSet(PutObjectAction), + NewResourceSet(NewResource("mybucket", "/myobject*")), + condition.NewFunctions(), + ), + }, + } + case1Policy.Statements[0].SID = "SomeId1" + + case2Data := []byte(`{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": "*", + "Action": "s3:PutObject", + "Resource": "arn:aws:s3:::mybucket/myobject*" + }, + { + "Effect": "Deny", + "Principal": "*", + "Action": "s3:GetObject", + "Resource": "arn:aws:s3:::mybucket/yourobject*", + "Condition": { + "IpAddress": { + "aws:SourceIp": "192.168.1.0/24" + } + } + } + ] +}`) + _, IPNet1, err := net.ParseCIDR("192.168.1.0/24") + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + func1, err := condition.NewIPAddressFunc( + condition.AWSSourceIP, + IPNet1, + ) + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + + case2Policy := Policy{ + Version: DefaultVersion, + Statements: []Statement{ + NewStatement( + Allow, + NewPrincipal("*"), + NewActionSet(PutObjectAction), + NewResourceSet(NewResource("mybucket", "/myobject*")), + condition.NewFunctions(), + ), + NewStatement( + Deny, + NewPrincipal("*"), + NewActionSet(GetObjectAction), + NewResourceSet(NewResource("mybucket", "/yourobject*")), + condition.NewFunctions(func1), + ), + }, + } + + case3Data := []byte(`{ + "ID": "MyPolicyForMyBucket1", + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": { + "AWS": [ + "Q3AM3UQ867SPQQA43P2F" + ] + }, + "Action": "s3:PutObject", + "Resource": "arn:aws:s3:::mybucket/myobject*" + }, + { + "Effect": "Allow", + "Principal": "*", + "Action": "s3:PutObject", + "Resource": "arn:aws:s3:::mybucket/myobject*" + } + ] +}`) + case3Policy := Policy{ + ID: "MyPolicyForMyBucket1", + Version: DefaultVersion, + Statements: []Statement{ + NewStatement( + Allow, + NewPrincipal("Q3AM3UQ867SPQQA43P2F"), + NewActionSet(PutObjectAction), + NewResourceSet(NewResource("mybucket", "/myobject*")), + condition.NewFunctions(), + ), + NewStatement( + Allow, + NewPrincipal("*"), + NewActionSet(PutObjectAction), + NewResourceSet(NewResource("mybucket", "/myobject*")), + condition.NewFunctions(), + ), + }, + } + + case4Data := []byte(`{ + "ID": "MyPolicyForMyBucket1", + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": "*", + "Action": "s3:PutObject", + "Resource": "arn:aws:s3:::mybucket/myobject*" + }, + { + "Effect": "Allow", + "Principal": "*", + "Action": "s3:GetObject", + "Resource": "arn:aws:s3:::mybucket/myobject*" + } + ] +}`) + case4Policy := Policy{ + ID: "MyPolicyForMyBucket1", + Version: DefaultVersion, + Statements: []Statement{ + NewStatement( + Allow, + NewPrincipal("*"), + NewActionSet(PutObjectAction), + NewResourceSet(NewResource("mybucket", "/myobject*")), + condition.NewFunctions(), + ), + NewStatement( + Allow, + NewPrincipal("*"), + NewActionSet(GetObjectAction), + NewResourceSet(NewResource("mybucket", "/myobject*")), + condition.NewFunctions(), + ), + }, + } + + case5Data := []byte(`{ + "ID": "MyPolicyForMyBucket1", + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": "*", + "Action": "s3:PutObject", + "Resource": "arn:aws:s3:::mybucket/myobject*" + }, + { + "Effect": "Allow", + "Principal": "*", + "Action": "s3:PutObject", + "Resource": "arn:aws:s3:::mybucket/yourobject*" + } + ] +}`) + case5Policy := Policy{ + ID: "MyPolicyForMyBucket1", + Version: DefaultVersion, + Statements: []Statement{ + NewStatement( + Allow, + NewPrincipal("*"), + NewActionSet(PutObjectAction), + NewResourceSet(NewResource("mybucket", "/myobject*")), + condition.NewFunctions(), + ), + NewStatement( + Allow, + NewPrincipal("*"), + NewActionSet(PutObjectAction), + NewResourceSet(NewResource("mybucket", "/yourobject*")), + condition.NewFunctions(), + ), + }, + } + + case6Data := []byte(`{ + "ID": "MyPolicyForMyBucket1", + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": "*", + "Action": "s3:PutObject", + "Resource": "arn:aws:s3:::mybucket/myobject*", + "Condition": { + "IpAddress": { + "aws:SourceIp": "192.168.1.0/24" + } + } + }, + { + "Effect": "Allow", + "Principal": "*", + "Action": "s3:PutObject", + "Resource": "arn:aws:s3:::mybucket/myobject*", + "Condition": { + "IpAddress": { + "aws:SourceIp": "192.168.2.0/24" + } + } + } + ] +}`) + _, IPNet2, err := net.ParseCIDR("192.168.2.0/24") + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + func2, err := condition.NewIPAddressFunc( + condition.AWSSourceIP, + IPNet2, + ) + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + + case6Policy := Policy{ + ID: "MyPolicyForMyBucket1", + Version: DefaultVersion, + Statements: []Statement{ + NewStatement( + Allow, + NewPrincipal("*"), + NewActionSet(PutObjectAction), + NewResourceSet(NewResource("mybucket", "/myobject*")), + condition.NewFunctions(func1), + ), + NewStatement( + Allow, + NewPrincipal("*"), + NewActionSet(PutObjectAction), + NewResourceSet(NewResource("mybucket", "/myobject*")), + condition.NewFunctions(func2), + ), + }, + } + + case7Data := []byte(`{ + "ID": "MyPolicyForMyBucket1", + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": "*", + "Action": "s3:GetBucketLocation", + "Resource": "arn:aws:s3:::mybucket" + } + ] +}`) + + case7Policy := Policy{ + ID: "MyPolicyForMyBucket1", + Version: DefaultVersion, + Statements: []Statement{ + NewStatement( + Allow, + NewPrincipal("*"), + NewActionSet(GetBucketLocationAction), + NewResourceSet(NewResource("mybucket", "")), + condition.NewFunctions(), + ), + }, + } + + case8Data := []byte(`{ + "ID": "MyPolicyForMyBucket1", + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": "*", + "Action": "s3:GetBucketLocation", + "Resource": "arn:aws:s3:::*" + } + ] +}`) + + case8Policy := Policy{ + ID: "MyPolicyForMyBucket1", + Version: DefaultVersion, + Statements: []Statement{ + NewStatement( + Allow, + NewPrincipal("*"), + NewActionSet(GetBucketLocationAction), + NewResourceSet(NewResource("*", "")), + condition.NewFunctions(), + ), + }, + } + + case9Data := []byte(`{ + "ID": "MyPolicyForMyBucket1", + "Version": "17-10-2012", + "Statement": [ + { + "Effect": "Allow", + "Principal": "*", + "Action": "s3:PutObject", + "Resource": "arn:aws:s3:::mybucket/myobject*" + } + ] +}`) + + case10Data := []byte(`{ + "ID": "MyPolicyForMyBucket1", + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": "*", + "Action": "s3:PutObject", + "Resource": "arn:aws:s3:::mybucket/myobject*" + }, + { + "Effect": "Allow", + "Principal": "*", + "Action": "s3:PutObject", + "Resource": "arn:aws:s3:::mybucket/myobject*" + } + ] +}`) + + case11Data := []byte(`{ + "ID": "MyPolicyForMyBucket1", + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": "*", + "Action": "s3:PutObject", + "Resource": "arn:aws:s3:::mybucket/myobject*" + }, + { + "Effect": "Deny", + "Principal": "*", + "Action": "s3:PutObject", + "Resource": "arn:aws:s3:::mybucket/myobject*" + } + ] +}`) + + testCases := []struct { + data []byte + expectedResult Policy + expectErr bool + }{ + {case1Data, case1Policy, false}, + {case2Data, case2Policy, false}, + {case3Data, case3Policy, false}, + {case4Data, case4Policy, false}, + {case5Data, case5Policy, false}, + {case6Data, case6Policy, false}, + {case7Data, case7Policy, false}, + {case8Data, case8Policy, false}, + // Invalid version error. + {case9Data, Policy{}, true}, + // Duplicate statement error. + {case10Data, Policy{}, true}, + // Duplicate statement error (Effect differs). + {case11Data, Policy{}, true}, + } + + for i, testCase := range testCases { + var result Policy + err := json.Unmarshal(testCase.data, &result) + expectErr := (err != nil) + + if expectErr != testCase.expectErr { + t.Fatalf("case %v: error: expected: %v, got: %v", i+1, testCase.expectErr, expectErr) + } + + if !testCase.expectErr { + if !reflect.DeepEqual(result, testCase.expectedResult) { + t.Fatalf("case %v: result: expected: %v, got: %v", i+1, testCase.expectedResult, result) + } + } + } +} + +func TestPolicyValidate(t *testing.T) { + case1Policy := Policy{ + Version: DefaultVersion, + Statements: []Statement{ + NewStatement( + Allow, + NewPrincipal("*"), + NewActionSet(PutObjectAction), + NewResourceSet(NewResource("mybucket", "/myobject*")), + condition.NewFunctions(), + ), + }, + } + + func1, err := condition.NewNullFunc( + condition.S3XAmzCopySource, + true, + ) + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + func2, err := condition.NewNullFunc( + condition.S3XAmzServerSideEncryption, + false, + ) + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + case2Policy := Policy{ + ID: "MyPolicyForMyBucket1", + Version: DefaultVersion, + Statements: []Statement{ + NewStatement( + Allow, + NewPrincipal("*"), + NewActionSet(GetObjectAction, PutObjectAction), + NewResourceSet(NewResource("mybucket", "myobject*")), + condition.NewFunctions(func1, func2), + ), + }, + } + + testCases := []struct { + policy Policy + bucketName string + expectErr bool + }{ + {case1Policy, "mybucket", false}, + {case2Policy, "yourbucket", true}, + {case1Policy, "yourbucket", true}, + } + + for i, testCase := range testCases { + err := testCase.policy.Validate(testCase.bucketName) + expectErr := (err != nil) + + if expectErr != testCase.expectErr { + t.Fatalf("case %v: error: expected: %v, got: %v", i+1, testCase.expectErr, expectErr) + } + } +} diff --git a/pkg/policy/principal.go b/pkg/policy/principal.go new file mode 100644 index 000000000..4a8c138d7 --- /dev/null +++ b/pkg/policy/principal.go @@ -0,0 +1,92 @@ +/* + * Minio Cloud Storage, (C) 2018 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package policy + +import ( + "encoding/json" + "fmt" + + "github.com/minio/minio-go/pkg/set" + "github.com/minio/minio/pkg/wildcard" +) + +// Principal - policy principal. +type Principal struct { + AWS set.StringSet +} + +// IsValid - checks whether Principal is valid or not. +func (p Principal) IsValid() bool { + return len(p.AWS) != 0 +} + +// Intersection - returns principals available in both Principal. +func (p Principal) Intersection(principal Principal) set.StringSet { + return p.AWS.Intersection(principal.AWS) +} + +// MarshalJSON - encodes Principal to JSON data. +func (p Principal) MarshalJSON() ([]byte, error) { + if !p.IsValid() { + return nil, fmt.Errorf("invalid principal %v", p) + } + + // subtype to avoid recursive call to MarshalJSON() + type subPrincipal Principal + sp := subPrincipal(p) + return json.Marshal(sp) +} + +// Match - matches given principal is wildcard matching with Principal. +func (p Principal) Match(principal string) bool { + for _, pattern := range p.AWS.ToSlice() { + if wildcard.MatchSimple(pattern, principal) { + return true + } + } + + return false +} + +// UnmarshalJSON - decodes JSON data to Principal. +func (p *Principal) UnmarshalJSON(data []byte) error { + // subtype to avoid recursive call to UnmarshalJSON() + type subPrincipal Principal + var sp subPrincipal + + if err := json.Unmarshal(data, &sp); err != nil { + var s string + if err = json.Unmarshal(data, &s); err != nil { + return err + } + + if s != "*" { + return fmt.Errorf("invalid principal '%v'", s) + } + + sp.AWS = set.CreateStringSet("*") + } + + *p = Principal(sp) + + return nil +} + +// NewPrincipal - creates new Principal. +func NewPrincipal(principals ...string) Principal { + return Principal{AWS: set.CreateStringSet(principals...)} +} diff --git a/pkg/policy/principal_test.go b/pkg/policy/principal_test.go new file mode 100644 index 000000000..abd263224 --- /dev/null +++ b/pkg/policy/principal_test.go @@ -0,0 +1,141 @@ +/* + * Minio Cloud Storage, (C) 2018 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package policy + +import ( + "encoding/json" + "reflect" + "testing" + + "github.com/minio/minio-go/pkg/set" +) + +func TestPrincipalIsValid(t *testing.T) { + testCases := []struct { + principal Principal + expectedResult bool + }{ + {NewPrincipal("*"), true}, + {NewPrincipal("arn:aws:iam::AccountNumber:root"), true}, + {NewPrincipal(), false}, + } + + for i, testCase := range testCases { + result := testCase.principal.IsValid() + + if result != testCase.expectedResult { + t.Fatalf("case %v: expected: %v, got: %v\n", i+1, testCase.expectedResult, result) + } + } +} + +func TestPrincipalIntersection(t *testing.T) { + testCases := []struct { + principal Principal + principalToIntersect Principal + expectedResult set.StringSet + }{ + {NewPrincipal("*"), NewPrincipal("*"), set.CreateStringSet("*")}, + {NewPrincipal("arn:aws:iam::AccountNumber:root"), NewPrincipal("arn:aws:iam::AccountNumber:myuser"), set.CreateStringSet()}, + {NewPrincipal(), NewPrincipal("*"), set.CreateStringSet()}, + } + + for i, testCase := range testCases { + result := testCase.principal.Intersection(testCase.principalToIntersect) + + if !reflect.DeepEqual(result, testCase.expectedResult) { + t.Fatalf("case %v: expected: %v, got: %v\n", i+1, testCase.expectedResult, result) + } + } +} + +func TestPrincipalMarshalJSON(t *testing.T) { + testCases := []struct { + principal Principal + expectedResult []byte + expectErr bool + }{ + {NewPrincipal("*"), []byte(`{"AWS":["*"]}`), false}, + {NewPrincipal("arn:aws:iam::AccountNumber:*"), []byte(`{"AWS":["arn:aws:iam::AccountNumber:*"]}`), false}, + {NewPrincipal(), nil, true}, + } + + for i, testCase := range testCases { + result, err := json.Marshal(testCase.principal) + expectErr := (err != nil) + + if expectErr != testCase.expectErr { + t.Fatalf("case %v: error: expected: %v, got: %v\n", i+1, testCase.expectErr, expectErr) + } + + if !testCase.expectErr { + if !reflect.DeepEqual(result, testCase.expectedResult) { + t.Fatalf("case %v: result: expected: %v, got: %v\n", i+1, string(testCase.expectedResult), string(result)) + } + } + } +} + +func TestPrincipalMatch(t *testing.T) { + testCases := []struct { + principals Principal + principal string + expectedResult bool + }{ + {NewPrincipal("*"), "AccountNumber", true}, + {NewPrincipal("arn:aws:iam::*"), "arn:aws:iam::AccountNumber:root", true}, + {NewPrincipal("arn:aws:iam::AccountNumber:*"), "arn:aws:iam::TestAccountNumber:root", false}, + } + + for i, testCase := range testCases { + result := testCase.principals.Match(testCase.principal) + + if result != testCase.expectedResult { + t.Fatalf("case %v: expected: %v, got: %v\n", i+1, testCase.expectedResult, result) + } + } +} + +func TestPrincipalUnmarshalJSON(t *testing.T) { + testCases := []struct { + data []byte + expectedResult Principal + expectErr bool + }{ + {[]byte(`"*"`), NewPrincipal("*"), false}, + {[]byte(`{"AWS": "*"}`), NewPrincipal("*"), false}, + {[]byte(`{"AWS": "arn:aws:iam::AccountNumber:*"}`), NewPrincipal("arn:aws:iam::AccountNumber:*"), false}, + {[]byte(`"arn:aws:iam::AccountNumber:*"`), NewPrincipal(), true}, + {[]byte(`["arn:aws:iam::AccountNumber:*", "arn:aws:iam:AnotherAccount:*"]`), NewPrincipal(), true}, + } + + for i, testCase := range testCases { + var result Principal + err := json.Unmarshal(testCase.data, &result) + expectErr := (err != nil) + + if expectErr != testCase.expectErr { + t.Fatalf("case %v: error: expected: %v, got: %v\n", i+1, testCase.expectErr, expectErr) + } + + if !testCase.expectErr { + if !reflect.DeepEqual(result, testCase.expectedResult) { + t.Fatalf("case %v: result: expected: %v, got: %v\n", i+1, testCase.expectedResult, result) + } + } + } +} diff --git a/pkg/policy/resource.go b/pkg/policy/resource.go new file mode 100644 index 000000000..ed17d806f --- /dev/null +++ b/pkg/policy/resource.go @@ -0,0 +1,131 @@ +/* + * Minio Cloud Storage, (C) 2018 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package policy + +import ( + "encoding/json" + "fmt" + "strings" + + "github.com/minio/minio/pkg/wildcard" +) + +// ResourceARNPrefix - resource ARN prefix as per AWS S3 specification. +const ResourceARNPrefix = "arn:aws:s3:::" + +// Resource - resource in policy statement. +type Resource struct { + bucketName string + pattern string +} + +func (r Resource) isBucketPattern() bool { + return !strings.Contains(r.pattern, "/") +} + +func (r Resource) isObjectPattern() bool { + return strings.Contains(r.pattern, "/") || strings.Contains(r.bucketName, "*") +} + +// IsValid - checks whether Resource is valid or not. +func (r Resource) IsValid() bool { + return r.bucketName != "" && r.pattern != "" +} + +// Match - matches object name with resource pattern. +func (r Resource) Match(resource string) bool { + return wildcard.Match(r.pattern, resource) +} + +// MarshalJSON - encodes Resource to JSON data. +func (r Resource) MarshalJSON() ([]byte, error) { + if !r.IsValid() { + return nil, fmt.Errorf("invalid resource %v", r) + } + + return json.Marshal(r.String()) +} + +func (r Resource) String() string { + return ResourceARNPrefix + r.pattern +} + +// UnmarshalJSON - decodes JSON data to Resource. +func (r *Resource) UnmarshalJSON(data []byte) error { + var s string + if err := json.Unmarshal(data, &s); err != nil { + return err + } + + parsedResource, err := parseResource(s) + if err != nil { + return err + } + + *r = parsedResource + + return nil +} + +// Validate - validates Resource is for given bucket or not. +func (r Resource) Validate(bucketName string) error { + if !r.IsValid() { + return fmt.Errorf("invalid resource") + } + + if !wildcard.Match(r.bucketName, bucketName) { + return fmt.Errorf("bucket name does not match") + } + + return nil +} + +// parseResource - parses string to Resource. +func parseResource(s string) (Resource, error) { + if !strings.HasPrefix(s, ResourceARNPrefix) { + return Resource{}, fmt.Errorf("invalid resource '%v'", s) + } + + pattern := strings.TrimPrefix(s, ResourceARNPrefix) + tokens := strings.SplitN(pattern, "/", 2) + bucketName := tokens[0] + if bucketName == "" { + return Resource{}, fmt.Errorf("invalid resource format '%v'", s) + } + + return Resource{ + bucketName: bucketName, + pattern: pattern, + }, nil +} + +// NewResource - creates new resource. +func NewResource(bucketName, keyName string) Resource { + pattern := bucketName + if keyName != "" { + if !strings.HasPrefix(keyName, "/") { + pattern += "/" + } + + pattern += keyName + } + + return Resource{ + bucketName: bucketName, + pattern: pattern, + } +} diff --git a/pkg/policy/resource_test.go b/pkg/policy/resource_test.go new file mode 100644 index 000000000..b9cb7df51 --- /dev/null +++ b/pkg/policy/resource_test.go @@ -0,0 +1,221 @@ +/* + * Minio Cloud Storage, (C) 2018 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package policy + +import ( + "encoding/json" + "reflect" + "testing" +) + +func TestResourceIsBucketPattern(t *testing.T) { + testCases := []struct { + resource Resource + expectedResult bool + }{ + {NewResource("*", ""), true}, + {NewResource("mybucket", ""), true}, + {NewResource("mybucket*", ""), true}, + {NewResource("mybucket?0", ""), true}, + {NewResource("", "*"), false}, + {NewResource("*", "*"), false}, + {NewResource("mybucket", "*"), false}, + {NewResource("mybucket*", "/myobject"), false}, + {NewResource("mybucket?0", "/2010/photos/*"), false}, + } + + for i, testCase := range testCases { + result := testCase.resource.isBucketPattern() + + if result != testCase.expectedResult { + t.Fatalf("case %v: expected: %v, got: %v", i+1, testCase.expectedResult, result) + } + } +} + +func TestResourceIsObjectPattern(t *testing.T) { + testCases := []struct { + resource Resource + expectedResult bool + }{ + {NewResource("*", ""), true}, + {NewResource("mybucket*", ""), true}, + {NewResource("", "*"), true}, + {NewResource("*", "*"), true}, + {NewResource("mybucket", "*"), true}, + {NewResource("mybucket*", "/myobject"), true}, + {NewResource("mybucket?0", "/2010/photos/*"), true}, + {NewResource("mybucket", ""), false}, + {NewResource("mybucket?0", ""), false}, + } + + for i, testCase := range testCases { + result := testCase.resource.isObjectPattern() + + if result != testCase.expectedResult { + t.Fatalf("case %v: expected: %v, got: %v", i+1, testCase.expectedResult, result) + } + } +} + +func TestResourceIsValid(t *testing.T) { + testCases := []struct { + resource Resource + expectedResult bool + }{ + {NewResource("*", ""), true}, + {NewResource("mybucket*", ""), true}, + {NewResource("*", "*"), true}, + {NewResource("mybucket", "*"), true}, + {NewResource("mybucket*", "/myobject"), true}, + {NewResource("mybucket?0", "/2010/photos/*"), true}, + {NewResource("mybucket", ""), true}, + {NewResource("mybucket?0", ""), true}, + {NewResource("", ""), false}, + {NewResource("", "*"), false}, + } + + for i, testCase := range testCases { + result := testCase.resource.IsValid() + + if result != testCase.expectedResult { + t.Fatalf("case %v: expected: %v, got: %v", i+1, testCase.expectedResult, result) + } + } +} + +func TestResourceMatch(t *testing.T) { + testCases := []struct { + resource Resource + objectName string + expectedResult bool + }{ + {NewResource("*", ""), "mybucket", true}, + {NewResource("*", ""), "mybucket/myobject", true}, + {NewResource("mybucket*", ""), "mybucket", true}, + {NewResource("mybucket*", ""), "mybucket/myobject", true}, + {NewResource("", "*"), "/myobject", true}, + {NewResource("*", "*"), "mybucket/myobject", true}, + {NewResource("mybucket", "*"), "mybucket/myobject", true}, + {NewResource("mybucket*", "/myobject"), "mybucket/myobject", true}, + {NewResource("mybucket*", "/myobject"), "mybucket100/myobject", true}, + {NewResource("mybucket?0", "/2010/photos/*"), "mybucket20/2010/photos/1.jpg", true}, + {NewResource("mybucket", ""), "mybucket", true}, + {NewResource("mybucket?0", ""), "mybucket30", true}, + {NewResource("", "*"), "mybucket/myobject", false}, + {NewResource("*", "*"), "mybucket", false}, + {NewResource("mybucket", "*"), "mybucket10/myobject", false}, + {NewResource("mybucket?0", "/2010/photos/*"), "mybucket0/2010/photos/1.jpg", false}, + {NewResource("mybucket", ""), "mybucket/myobject", false}, + } + + for i, testCase := range testCases { + result := testCase.resource.Match(testCase.objectName) + + if result != testCase.expectedResult { + t.Fatalf("case %v: expected: %v, got: %v", i+1, testCase.expectedResult, result) + } + } +} + +func TestResourceMarshalJSON(t *testing.T) { + testCases := []struct { + resource Resource + expectedResult []byte + expectErr bool + }{ + {NewResource("*", ""), []byte(`"arn:aws:s3:::*"`), false}, + {NewResource("mybucket*", ""), []byte(`"arn:aws:s3:::mybucket*"`), false}, + {NewResource("mybucket", ""), []byte(`"arn:aws:s3:::mybucket"`), false}, + {NewResource("*", "*"), []byte(`"arn:aws:s3:::*/*"`), false}, + {NewResource("mybucket", "*"), []byte(`"arn:aws:s3:::mybucket/*"`), false}, + {NewResource("mybucket*", "myobject"), []byte(`"arn:aws:s3:::mybucket*/myobject"`), false}, + {NewResource("mybucket?0", "/2010/photos/*"), []byte(`"arn:aws:s3:::mybucket?0/2010/photos/*"`), false}, + {Resource{}, nil, true}, + {NewResource("", "*"), nil, true}, + } + + for i, testCase := range testCases { + result, err := json.Marshal(testCase.resource) + expectErr := (err != nil) + + if expectErr != testCase.expectErr { + t.Fatalf("case %v: error: expected: %v, got: %v", i+1, testCase.expectErr, expectErr) + } + + if !testCase.expectErr { + if !reflect.DeepEqual(result, testCase.expectedResult) { + t.Fatalf("case %v: result: expected: %v, got: %v", i+1, string(testCase.expectedResult), string(result)) + } + } + } +} + +func TestResourceUnmarshalJSON(t *testing.T) { + testCases := []struct { + data []byte + expectedResult Resource + expectErr bool + }{ + {[]byte(`"arn:aws:s3:::*"`), NewResource("*", ""), false}, + {[]byte(`"arn:aws:s3:::mybucket*"`), NewResource("mybucket*", ""), false}, + {[]byte(`"arn:aws:s3:::mybucket"`), NewResource("mybucket", ""), false}, + {[]byte(`"arn:aws:s3:::*/*"`), NewResource("*", "*"), false}, + {[]byte(`"arn:aws:s3:::mybucket/*"`), NewResource("mybucket", "*"), false}, + {[]byte(`"arn:aws:s3:::mybucket*/myobject"`), NewResource("mybucket*", "myobject"), false}, + {[]byte(`"arn:aws:s3:::mybucket?0/2010/photos/*"`), NewResource("mybucket?0", "/2010/photos/*"), false}, + {[]byte(`"mybucket/myobject*"`), Resource{}, true}, + {[]byte(`"arn:aws:s3:::/*"`), Resource{}, true}, + } + + for i, testCase := range testCases { + var result Resource + err := json.Unmarshal(testCase.data, &result) + expectErr := (err != nil) + + if expectErr != testCase.expectErr { + t.Fatalf("case %v: error: expected: %v, got: %v", i+1, testCase.expectErr, expectErr) + } + + if !testCase.expectErr { + if !reflect.DeepEqual(result, testCase.expectedResult) { + t.Fatalf("case %v: result: expected: %v, got: %v", i+1, testCase.expectedResult, result) + } + } + } +} + +func TestResourceValidate(t *testing.T) { + testCases := []struct { + resource Resource + bucketName string + expectErr bool + }{ + {NewResource("mybucket", "/myobject*"), "mybucket", false}, + {NewResource("", "/myobject*"), "yourbucket", true}, + {NewResource("mybucket", "/myobject*"), "yourbucket", true}, + } + + for i, testCase := range testCases { + err := testCase.resource.Validate(testCase.bucketName) + expectErr := (err != nil) + + if expectErr != testCase.expectErr { + t.Fatalf("case %v: error: expected: %v, got: %v", i+1, testCase.expectErr, expectErr) + } + } +} diff --git a/pkg/policy/resourceset.go b/pkg/policy/resourceset.go new file mode 100644 index 000000000..4a5470819 --- /dev/null +++ b/pkg/policy/resourceset.go @@ -0,0 +1,147 @@ +/* + * Minio Cloud Storage, (C) 2018 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package policy + +import ( + "encoding/json" + "fmt" + "sort" + + "github.com/minio/minio-go/pkg/set" +) + +// ResourceSet - set of resources in policy statement. +type ResourceSet map[Resource]struct{} + +// bucketResourceExists - checks if at least one bucket resource exists in the set. +func (resourceSet ResourceSet) bucketResourceExists() bool { + for resource := range resourceSet { + if resource.isBucketPattern() { + return true + } + } + + return false +} + +// objectResourceExists - checks if at least one object resource exists in the set. +func (resourceSet ResourceSet) objectResourceExists() bool { + for resource := range resourceSet { + if resource.isObjectPattern() { + return true + } + } + + return false +} + +// Add - adds resource to resource set. +func (resourceSet ResourceSet) Add(resource Resource) { + resourceSet[resource] = struct{}{} +} + +// Intersection - returns resouces available in both ResourcsSet. +func (resourceSet ResourceSet) Intersection(sset ResourceSet) ResourceSet { + nset := NewResourceSet() + for k := range resourceSet { + if _, ok := sset[k]; ok { + nset.Add(k) + } + } + + return nset +} + +// MarshalJSON - encodes ResourceSet to JSON data. +func (resourceSet ResourceSet) MarshalJSON() ([]byte, error) { + if len(resourceSet) == 0 { + return nil, fmt.Errorf("empty resource set") + } + + resources := []Resource{} + for resource := range resourceSet { + resources = append(resources, resource) + } + + return json.Marshal(resources) +} + +// Match - matches object name with anyone of resource pattern in resource set. +func (resourceSet ResourceSet) Match(resource string) bool { + for r := range resourceSet { + if r.Match(resource) { + return true + } + } + + return false +} + +func (resourceSet ResourceSet) String() string { + resources := []string{} + for resource := range resourceSet { + resources = append(resources, resource.String()) + } + sort.Strings(resources) + + return fmt.Sprintf("%v", resources) +} + +// UnmarshalJSON - decodes JSON data to ResourceSet. +func (resourceSet *ResourceSet) UnmarshalJSON(data []byte) error { + var sset set.StringSet + if err := json.Unmarshal(data, &sset); err != nil { + return err + } + + *resourceSet = make(ResourceSet) + for _, s := range sset.ToSlice() { + resource, err := parseResource(s) + if err != nil { + return err + } + + if _, found := (*resourceSet)[resource]; found { + return fmt.Errorf("duplicate resource '%v' found", s) + } + + resourceSet.Add(resource) + } + + return nil +} + +// Validate - validates ResourceSet is for given bucket or not. +func (resourceSet ResourceSet) Validate(bucketName string) error { + for resource := range resourceSet { + if err := resource.Validate(bucketName); err != nil { + return err + } + } + + return nil +} + +// NewResourceSet - creates new resource set. +func NewResourceSet(resources ...Resource) ResourceSet { + resourceSet := make(ResourceSet) + for _, resource := range resources { + resourceSet.Add(resource) + } + + return resourceSet +} diff --git a/pkg/policy/resourceset_test.go b/pkg/policy/resourceset_test.go new file mode 100644 index 000000000..7a31e6f51 --- /dev/null +++ b/pkg/policy/resourceset_test.go @@ -0,0 +1,240 @@ +/* + * Minio Cloud Storage, (C) 2018 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package policy + +import ( + "encoding/json" + "reflect" + "testing" +) + +func TestResourceSetBucketResourceExists(t *testing.T) { + testCases := []struct { + resourceSet ResourceSet + expectedResult bool + }{ + {NewResourceSet(NewResource("*", "")), true}, + {NewResourceSet(NewResource("mybucket", "")), true}, + {NewResourceSet(NewResource("mybucket*", "")), true}, + {NewResourceSet(NewResource("mybucket?0", "")), true}, + {NewResourceSet(NewResource("mybucket", "/2010/photos/*"), NewResource("mybucket", "")), true}, + {NewResourceSet(NewResource("", "*")), false}, + {NewResourceSet(NewResource("*", "*")), false}, + {NewResourceSet(NewResource("mybucket", "*")), false}, + {NewResourceSet(NewResource("mybucket*", "/myobject")), false}, + {NewResourceSet(NewResource("mybucket?0", "/2010/photos/*")), false}, + } + + for i, testCase := range testCases { + result := testCase.resourceSet.bucketResourceExists() + + if result != testCase.expectedResult { + t.Fatalf("case %v: expected: %v, got: %v", i+1, testCase.expectedResult, result) + } + } +} + +func TestResourceSetObjectResourceExists(t *testing.T) { + testCases := []struct { + resourceSet ResourceSet + expectedResult bool + }{ + {NewResourceSet(NewResource("*", "")), true}, + {NewResourceSet(NewResource("mybucket*", "")), true}, + {NewResourceSet(NewResource("", "*")), true}, + {NewResourceSet(NewResource("*", "*")), true}, + {NewResourceSet(NewResource("mybucket", "*")), true}, + {NewResourceSet(NewResource("mybucket*", "/myobject")), true}, + {NewResourceSet(NewResource("mybucket?0", "/2010/photos/*")), true}, + {NewResourceSet(NewResource("mybucket", ""), NewResource("mybucket", "/2910/photos/*")), true}, + {NewResourceSet(NewResource("mybucket", "")), false}, + {NewResourceSet(NewResource("mybucket?0", "")), false}, + } + + for i, testCase := range testCases { + result := testCase.resourceSet.objectResourceExists() + + if result != testCase.expectedResult { + t.Fatalf("case %v: expected: %v, got: %v", i+1, testCase.expectedResult, result) + } + } +} + +func TestResourceSetAdd(t *testing.T) { + testCases := []struct { + resourceSet ResourceSet + resource Resource + expectedResult ResourceSet + }{ + {NewResourceSet(), NewResource("mybucket", "/myobject*"), + NewResourceSet(NewResource("mybucket", "/myobject*"))}, + {NewResourceSet(NewResource("mybucket", "/myobject*")), + NewResource("mybucket", "/yourobject*"), + NewResourceSet(NewResource("mybucket", "/myobject*"), + NewResource("mybucket", "/yourobject*"))}, + {NewResourceSet(NewResource("mybucket", "/myobject*")), + NewResource("mybucket", "/myobject*"), + NewResourceSet(NewResource("mybucket", "/myobject*"))}, + } + + for i, testCase := range testCases { + testCase.resourceSet.Add(testCase.resource) + + if !reflect.DeepEqual(testCase.resourceSet, testCase.expectedResult) { + t.Fatalf("case %v: expected: %v, got: %v", i+1, testCase.expectedResult, testCase.resourceSet) + } + } +} + +func TestResourceSetIntersection(t *testing.T) { + testCases := []struct { + set ResourceSet + setToIntersect ResourceSet + expectedResult ResourceSet + }{ + {NewResourceSet(), NewResourceSet(NewResource("mybucket", "/myobject*")), NewResourceSet()}, + {NewResourceSet(NewResource("mybucket", "/myobject*")), NewResourceSet(), NewResourceSet()}, + {NewResourceSet(NewResource("mybucket", "/myobject*")), + NewResourceSet(NewResource("mybucket", "/myobject*"), NewResource("mybucket", "/yourobject*")), + NewResourceSet(NewResource("mybucket", "/myobject*"))}, + } + + for i, testCase := range testCases { + result := testCase.set.Intersection(testCase.setToIntersect) + + if !reflect.DeepEqual(result, testCase.expectedResult) { + t.Fatalf("case %v: expected: %v, got: %v\n", i+1, testCase.expectedResult, testCase.set) + } + } +} + +func TestResourceSetMarshalJSON(t *testing.T) { + testCases := []struct { + resoruceSet ResourceSet + expectedResult []byte + expectErr bool + }{ + {NewResourceSet(NewResource("mybucket", "/myobject*")), + []byte(`["arn:aws:s3:::mybucket/myobject*"]`), false}, + {NewResourceSet(NewResource("mybucket", "/photos/myobject*")), + []byte(`["arn:aws:s3:::mybucket/photos/myobject*"]`), false}, + {NewResourceSet(), nil, true}, + } + + for i, testCase := range testCases { + result, err := json.Marshal(testCase.resoruceSet) + expectErr := (err != nil) + + if expectErr != testCase.expectErr { + t.Fatalf("case %v: error: expected: %v, got: %v", i+1, testCase.expectErr, expectErr) + } + + if !testCase.expectErr { + if !reflect.DeepEqual(result, testCase.expectedResult) { + t.Fatalf("case %v: result: expected: %v, got: %v", i+1, string(testCase.expectedResult), string(result)) + } + } + } +} + +func TestResourceSetMatch(t *testing.T) { + testCases := []struct { + resourceSet ResourceSet + resource string + expectedResult bool + }{ + {NewResourceSet(NewResource("*", "")), "mybucket", true}, + {NewResourceSet(NewResource("*", "")), "mybucket/myobject", true}, + {NewResourceSet(NewResource("mybucket*", "")), "mybucket", true}, + {NewResourceSet(NewResource("mybucket*", "")), "mybucket/myobject", true}, + {NewResourceSet(NewResource("", "*")), "/myobject", true}, + {NewResourceSet(NewResource("*", "*")), "mybucket/myobject", true}, + {NewResourceSet(NewResource("mybucket", "*")), "mybucket/myobject", true}, + {NewResourceSet(NewResource("mybucket*", "/myobject")), "mybucket/myobject", true}, + {NewResourceSet(NewResource("mybucket*", "/myobject")), "mybucket100/myobject", true}, + {NewResourceSet(NewResource("mybucket?0", "/2010/photos/*")), "mybucket20/2010/photos/1.jpg", true}, + {NewResourceSet(NewResource("mybucket", "")), "mybucket", true}, + {NewResourceSet(NewResource("mybucket?0", "")), "mybucket30", true}, + {NewResourceSet(NewResource("mybucket?0", "/2010/photos/*"), + NewResource("mybucket", "/2010/photos/*")), "mybucket/2010/photos/1.jpg", true}, + {NewResourceSet(NewResource("", "*")), "mybucket/myobject", false}, + {NewResourceSet(NewResource("*", "*")), "mybucket", false}, + {NewResourceSet(NewResource("mybucket", "*")), "mybucket10/myobject", false}, + {NewResourceSet(NewResource("mybucket", "")), "mybucket/myobject", false}, + {NewResourceSet(), "mybucket/myobject", false}, + } + + for i, testCase := range testCases { + result := testCase.resourceSet.Match(testCase.resource) + + if result != testCase.expectedResult { + t.Fatalf("case %v: expected: %v, got: %v", i+1, testCase.expectedResult, result) + } + } +} + +func TestResourceSetUnmarshalJSON(t *testing.T) { + testCases := []struct { + data []byte + expectedResult ResourceSet + expectErr bool + }{ + {[]byte(`"arn:aws:s3:::mybucket/myobject*"`), + NewResourceSet(NewResource("mybucket", "/myobject*")), false}, + {[]byte(`"arn:aws:s3:::mybucket/photos/myobject*"`), + NewResourceSet(NewResource("mybucket", "/photos/myobject*")), false}, + {[]byte(`"arn:aws:s3:::mybucket"`), NewResourceSet(NewResource("mybucket", "")), false}, + {[]byte(`"mybucket/myobject*"`), nil, true}, + } + + for i, testCase := range testCases { + var result ResourceSet + err := json.Unmarshal(testCase.data, &result) + expectErr := (err != nil) + + if expectErr != testCase.expectErr { + t.Fatalf("case %v: error: expected: %v, got: %v", i+1, testCase.expectErr, expectErr) + } + + if !testCase.expectErr { + if !reflect.DeepEqual(result, testCase.expectedResult) { + t.Fatalf("case %v: result: expected: %v, got: %v", i+1, testCase.expectedResult, result) + } + } + } +} + +func TestResourceSetValidate(t *testing.T) { + testCases := []struct { + resourceSet ResourceSet + bucketName string + expectErr bool + }{ + {NewResourceSet(NewResource("mybucket", "/myobject*")), "mybucket", false}, + {NewResourceSet(NewResource("", "/myobject*")), "yourbucket", true}, + {NewResourceSet(NewResource("mybucket", "/myobject*")), "yourbucket", true}, + } + + for i, testCase := range testCases { + err := testCase.resourceSet.Validate(testCase.bucketName) + expectErr := (err != nil) + + if expectErr != testCase.expectErr { + t.Fatalf("case %v: error: expected: %v, got: %v", i+1, testCase.expectErr, expectErr) + } + } +} diff --git a/pkg/policy/statement.go b/pkg/policy/statement.go new file mode 100644 index 000000000..e1b12bdb9 --- /dev/null +++ b/pkg/policy/statement.go @@ -0,0 +1,156 @@ +/* + * Minio Cloud Storage, (C) 2018 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package policy + +import ( + "encoding/json" + "fmt" + "strings" + + "github.com/minio/minio/pkg/policy/condition" +) + +// Statement - policy statement. +type Statement struct { + SID ID `json:"Sid,omitempty"` + Effect Effect `json:"Effect"` + Principal Principal `json:"Principal"` + Actions ActionSet `json:"Action"` + Resources ResourceSet `json:"Resource"` + Conditions condition.Functions `json:"Condition,omitempty"` +} + +// IsAllowed - checks given policy args is allowed to continue the Rest API. +func (statement Statement) IsAllowed(args Args) bool { + check := func() bool { + if !statement.Principal.Match(args.AccountName) { + return false + } + + if !statement.Actions.Contains(args.Action) { + return false + } + + resource := args.BucketName + if args.ObjectName != "" { + if !strings.HasPrefix(args.ObjectName, "/") { + resource += "/" + } + + resource += args.ObjectName + } + + if !statement.Resources.Match(resource) { + return false + } + + return statement.Conditions.Evaluate(args.ConditionValues) + } + + return statement.Effect.IsAllowed(check()) +} + +// isValid - checks whether statement is valid or not. +func (statement Statement) isValid() error { + if !statement.Effect.IsValid() { + return fmt.Errorf("invalid Effect %v", statement.Effect) + } + + if !statement.Principal.IsValid() { + return fmt.Errorf("invalid Principal %v", statement.Principal) + } + + if len(statement.Actions) == 0 { + return fmt.Errorf("Action must not be empty") + } + + if len(statement.Resources) == 0 { + return fmt.Errorf("Resource must not be empty") + } + + for action := range statement.Actions { + if action.isObjectAction() { + if !statement.Resources.objectResourceExists() { + return fmt.Errorf("unsupported Resource found %v for action %v", statement.Resources, action) + } + } else { + if !statement.Resources.bucketResourceExists() { + return fmt.Errorf("unsupported Resource found %v for action %v", statement.Resources, action) + } + } + + keys := statement.Conditions.Keys() + keyDiff := keys.Difference(actionConditionKeyMap[action]) + if !keyDiff.IsEmpty() { + return fmt.Errorf("unsupported condition keys '%v' used for action '%v'", keyDiff, action) + } + } + + return nil +} + +// MarshalJSON - encodes JSON data to Statement. +func (statement Statement) MarshalJSON() ([]byte, error) { + if err := statement.isValid(); err != nil { + return nil, err + } + + // subtype to avoid recursive call to MarshalJSON() + type subStatement Statement + ss := subStatement(statement) + return json.Marshal(ss) +} + +// UnmarshalJSON - decodes JSON data to Statement. +func (statement *Statement) UnmarshalJSON(data []byte) error { + // subtype to avoid recursive call to UnmarshalJSON() + type subStatement Statement + var ss subStatement + + if err := json.Unmarshal(data, &ss); err != nil { + return err + } + + s := Statement(ss) + if err := s.isValid(); err != nil { + return err + } + + *statement = s + + return nil +} + +// Validate - validates Statement is for given bucket or not. +func (statement Statement) Validate(bucketName string) error { + if err := statement.isValid(); err != nil { + return err + } + + return statement.Resources.Validate(bucketName) +} + +// NewStatement - creates new statement. +func NewStatement(effect Effect, principal Principal, actionSet ActionSet, resourceSet ResourceSet, conditions condition.Functions) Statement { + return Statement{ + Effect: effect, + Principal: principal, + Actions: actionSet, + Resources: resourceSet, + Conditions: conditions, + } +} diff --git a/pkg/policy/statement_test.go b/pkg/policy/statement_test.go new file mode 100644 index 000000000..ac2e475ae --- /dev/null +++ b/pkg/policy/statement_test.go @@ -0,0 +1,571 @@ +/* + * Minio Cloud Storage, (C) 2018 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package policy + +import ( + "encoding/json" + "net" + "reflect" + "testing" + + "github.com/minio/minio/pkg/policy/condition" +) + +func TestStatementIsAllowed(t *testing.T) { + case1Statement := NewStatement( + Allow, + NewPrincipal("*"), + NewActionSet(GetBucketLocationAction, PutObjectAction), + NewResourceSet(NewResource("*", "")), + condition.NewFunctions(), + ) + + case2Statement := NewStatement( + Allow, + NewPrincipal("*"), + NewActionSet(GetObjectAction, PutObjectAction), + NewResourceSet(NewResource("mybucket", "/myobject*")), + condition.NewFunctions(), + ) + + _, IPNet1, err := net.ParseCIDR("192.168.1.0/24") + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + func1, err := condition.NewIPAddressFunc( + condition.AWSSourceIP, + IPNet1, + ) + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + + case3Statement := NewStatement( + Allow, + NewPrincipal("*"), + NewActionSet(GetObjectAction, PutObjectAction), + NewResourceSet(NewResource("mybucket", "/myobject*")), + condition.NewFunctions(func1), + ) + + case4Statement := NewStatement( + Deny, + NewPrincipal("*"), + NewActionSet(GetObjectAction, PutObjectAction), + NewResourceSet(NewResource("mybucket", "/myobject*")), + condition.NewFunctions(func1), + ) + + anonGetBucketLocationArgs := Args{ + AccountName: "Q3AM3UQ867SPQQA43P2F", + Action: GetBucketLocationAction, + BucketName: "mybucket", + ConditionValues: map[string][]string{}, + } + + anonPutObjectActionArgs := Args{ + AccountName: "Q3AM3UQ867SPQQA43P2F", + Action: PutObjectAction, + BucketName: "mybucket", + ConditionValues: map[string][]string{ + "x-amz-copy-source": {"mybucket/myobject"}, + "SourceIp": {"192.168.1.10"}, + }, + ObjectName: "myobject", + } + + anonGetObjectActionArgs := Args{ + AccountName: "Q3AM3UQ867SPQQA43P2F", + Action: GetObjectAction, + BucketName: "mybucket", + ConditionValues: map[string][]string{}, + ObjectName: "myobject", + } + + getBucketLocationArgs := Args{ + AccountName: "Q3AM3UQ867SPQQA43P2F", + Action: GetBucketLocationAction, + BucketName: "mybucket", + ConditionValues: map[string][]string{}, + IsOwner: true, + } + + putObjectActionArgs := Args{ + AccountName: "Q3AM3UQ867SPQQA43P2F", + Action: PutObjectAction, + BucketName: "mybucket", + ConditionValues: map[string][]string{ + "x-amz-copy-source": {"mybucket/myobject"}, + "SourceIp": {"192.168.1.10"}, + }, + IsOwner: true, + ObjectName: "myobject", + } + + getObjectActionArgs := Args{ + AccountName: "Q3AM3UQ867SPQQA43P2F", + Action: GetObjectAction, + BucketName: "mybucket", + ConditionValues: map[string][]string{}, + IsOwner: true, + ObjectName: "myobject", + } + + testCases := []struct { + statement Statement + args Args + expectedResult bool + }{ + {case1Statement, anonGetBucketLocationArgs, true}, + {case1Statement, anonPutObjectActionArgs, true}, + {case1Statement, anonGetObjectActionArgs, false}, + {case1Statement, getBucketLocationArgs, true}, + {case1Statement, putObjectActionArgs, true}, + {case1Statement, getObjectActionArgs, false}, + + {case2Statement, anonGetBucketLocationArgs, false}, + {case2Statement, anonPutObjectActionArgs, true}, + {case2Statement, anonGetObjectActionArgs, true}, + {case2Statement, getBucketLocationArgs, false}, + {case2Statement, putObjectActionArgs, true}, + {case2Statement, getObjectActionArgs, true}, + + {case3Statement, anonGetBucketLocationArgs, false}, + {case3Statement, anonPutObjectActionArgs, true}, + {case3Statement, anonGetObjectActionArgs, false}, + {case3Statement, getBucketLocationArgs, false}, + {case3Statement, putObjectActionArgs, true}, + {case3Statement, getObjectActionArgs, false}, + + {case4Statement, anonGetBucketLocationArgs, true}, + {case4Statement, anonPutObjectActionArgs, false}, + {case4Statement, anonGetObjectActionArgs, true}, + {case4Statement, getBucketLocationArgs, true}, + {case4Statement, putObjectActionArgs, false}, + {case4Statement, getObjectActionArgs, true}, + } + + for i, testCase := range testCases { + result := testCase.statement.IsAllowed(testCase.args) + + if result != testCase.expectedResult { + t.Fatalf("case %v: expected: %v, got: %v\n", i+1, testCase.expectedResult, result) + } + } +} + +func TestStatementIsValid(t *testing.T) { + _, IPNet1, err := net.ParseCIDR("192.168.1.0/24") + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + func1, err := condition.NewIPAddressFunc( + condition.AWSSourceIP, + IPNet1, + ) + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + + func2, err := condition.NewStringEqualsFunc( + condition.S3XAmzCopySource, + "mybucket/myobject", + ) + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + + testCases := []struct { + statement Statement + expectErr bool + }{ + // Invalid effect error. + {NewStatement( + Effect("foo"), + NewPrincipal("*"), + NewActionSet(GetBucketLocationAction, PutObjectAction), + NewResourceSet(NewResource("*", "")), + condition.NewFunctions(), + ), true}, + // Invalid principal error. + {NewStatement( + Allow, + NewPrincipal(), + NewActionSet(GetBucketLocationAction, PutObjectAction), + NewResourceSet(NewResource("*", "")), + condition.NewFunctions(), + ), true}, + // Empty actions error. + {NewStatement( + Allow, + NewPrincipal("*"), + NewActionSet(), + NewResourceSet(NewResource("*", "")), + condition.NewFunctions(), + ), true}, + // Empty resources error. + {NewStatement( + Allow, + NewPrincipal("*"), + NewActionSet(GetBucketLocationAction, PutObjectAction), + NewResourceSet(), + condition.NewFunctions(), + ), true}, + // Unsupported resource found for object action. + {NewStatement( + Allow, + NewPrincipal("*"), + NewActionSet(GetBucketLocationAction, PutObjectAction), + NewResourceSet(NewResource("mybucket", "")), + condition.NewFunctions(), + ), true}, + // Unsupported resource found for bucket action. + {NewStatement( + Allow, + NewPrincipal("*"), + NewActionSet(GetBucketLocationAction, PutObjectAction), + NewResourceSet(NewResource("mybucket", "myobject*")), + condition.NewFunctions(), + ), true}, + // Unsupported condition key for action. + {NewStatement( + Allow, + NewPrincipal("*"), + NewActionSet(GetObjectAction, PutObjectAction), + NewResourceSet(NewResource("mybucket", "myobject*")), + condition.NewFunctions(func1, func2), + ), true}, + {NewStatement( + Deny, + NewPrincipal("*"), + NewActionSet(GetObjectAction, PutObjectAction), + NewResourceSet(NewResource("mybucket", "myobject*")), + condition.NewFunctions(func1), + ), false}, + } + + for i, testCase := range testCases { + err := testCase.statement.isValid() + expectErr := (err != nil) + + if expectErr != testCase.expectErr { + t.Fatalf("case %v: error: expected: %v, got: %v", i+1, testCase.expectErr, expectErr) + } + } +} + +func TestStatementMarshalJSON(t *testing.T) { + case1Statement := NewStatement( + Allow, + NewPrincipal("*"), + NewActionSet(PutObjectAction), + NewResourceSet(NewResource("mybucket", "/myobject*")), + condition.NewFunctions(), + ) + case1Statement.SID = "SomeId1" + case1Data := []byte(`{"Sid":"SomeId1","Effect":"Allow","Principal":{"AWS":["*"]},"Action":["s3:PutObject"],"Resource":["arn:aws:s3:::mybucket/myobject*"]}`) + + func1, err := condition.NewNullFunc( + condition.S3XAmzCopySource, + true, + ) + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + case2Statement := NewStatement( + Allow, + NewPrincipal("*"), + NewActionSet(PutObjectAction), + NewResourceSet(NewResource("mybucket", "/myobject*")), + condition.NewFunctions(func1), + ) + case2Data := []byte(`{"Effect":"Allow","Principal":{"AWS":["*"]},"Action":["s3:PutObject"],"Resource":["arn:aws:s3:::mybucket/myobject*"],"Condition":{"Null":{"s3:x-amz-copy-source":[true]}}}`) + + func2, err := condition.NewNullFunc( + condition.S3XAmzServerSideEncryption, + false, + ) + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + case3Statement := NewStatement( + Deny, + NewPrincipal("*"), + NewActionSet(GetObjectAction), + NewResourceSet(NewResource("mybucket", "/myobject*")), + condition.NewFunctions(func2), + ) + case3Data := []byte(`{"Effect":"Deny","Principal":{"AWS":["*"]},"Action":["s3:GetObject"],"Resource":["arn:aws:s3:::mybucket/myobject*"],"Condition":{"Null":{"s3:x-amz-server-side-encryption":[false]}}}`) + + case4Statement := NewStatement( + Allow, + NewPrincipal("*"), + NewActionSet(GetObjectAction, PutObjectAction), + NewResourceSet(NewResource("mybucket", "myobject*")), + condition.NewFunctions(func1, func2), + ) + + testCases := []struct { + statement Statement + expectedResult []byte + expectErr bool + }{ + {case1Statement, case1Data, false}, + {case2Statement, case2Data, false}, + {case3Statement, case3Data, false}, + // Invalid statement error. + {case4Statement, nil, true}, + } + + for i, testCase := range testCases { + result, err := json.Marshal(testCase.statement) + expectErr := (err != nil) + + if expectErr != testCase.expectErr { + t.Fatalf("case %v: error: expected: %v, got: %v", i+1, testCase.expectErr, expectErr) + } + + if !testCase.expectErr { + if !reflect.DeepEqual(result, testCase.expectedResult) { + t.Fatalf("case %v: result: expected: %v, got: %v", i+1, string(testCase.expectedResult), string(result)) + } + } + } +} + +func TestStatementUnmarshalJSON(t *testing.T) { + case1Data := []byte(`{ + "Sid": "SomeId1", + "Effect": "Allow", + "Principal": "*", + "Action": "s3:PutObject", + "Resource": "arn:aws:s3:::mybucket/myobject*" +}`) + case1Statement := NewStatement( + Allow, + NewPrincipal("*"), + NewActionSet(PutObjectAction), + NewResourceSet(NewResource("mybucket", "/myobject*")), + condition.NewFunctions(), + ) + case1Statement.SID = "SomeId1" + + case2Data := []byte(`{ + "Effect": "Allow", + "Principal": "*", + "Action": "s3:PutObject", + "Resource": "arn:aws:s3:::mybucket/myobject*", + "Condition": { + "Null": { + "s3:x-amz-copy-source": true + } + } +}`) + func1, err := condition.NewNullFunc( + condition.S3XAmzCopySource, + true, + ) + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + case2Statement := NewStatement( + Allow, + NewPrincipal("*"), + NewActionSet(PutObjectAction), + NewResourceSet(NewResource("mybucket", "/myobject*")), + condition.NewFunctions(func1), + ) + + case3Data := []byte(`{ + "Effect": "Deny", + "Principal": { + "AWS": "*" + }, + "Action": [ + "s3:PutObject", + "s3:GetObject" + ], + "Resource": "arn:aws:s3:::mybucket/myobject*", + "Condition": { + "Null": { + "s3:x-amz-server-side-encryption": "false" + } + } +}`) + func2, err := condition.NewNullFunc( + condition.S3XAmzServerSideEncryption, + false, + ) + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + case3Statement := NewStatement( + Deny, + NewPrincipal("*"), + NewActionSet(PutObjectAction, GetObjectAction), + NewResourceSet(NewResource("mybucket", "/myobject*")), + condition.NewFunctions(func2), + ) + + case4Data := []byte(`{ + "Effect": "Allow", + "Principal": "Q3AM3UQ867SPQQA43P2F", + "Action": "s3:PutObject", + "Resource": "arn:aws:s3:::mybucket/myobject*" +}`) + + case5Data := []byte(`{ + "Principal": "*", + "Action": "s3:PutObject", + "Resource": "arn:aws:s3:::mybucket/myobject*" +}`) + + case6Data := []byte(`{ + "Effect": "Allow", + "Action": "s3:PutObject", + "Resource": "arn:aws:s3:::mybucket/myobject*" +}`) + + case7Data := []byte(`{ + "Effect": "Allow", + "Principal": "*", + "Resource": "arn:aws:s3:::mybucket/myobject*" +}`) + + case8Data := []byte(`{ + "Effect": "Allow", + "Principal": "*", + "Action": "s3:PutObject" +}`) + + case9Data := []byte(`{ + "Effect": "Allow", + "Principal": "*", + "Action": "s3:PutObject", + "Resource": "arn:aws:s3:::mybucket/myobject*", + "Condition": { + } +}`) + + case10Data := []byte(`{ + "Effect": "Deny", + "Principal": { + "AWS": "*" + }, + "Action": [ + "s3:PutObject", + "s3:GetObject" + ], + "Resource": "arn:aws:s3:::mybucket/myobject*", + "Condition": { + "StringEquals": { + "s3:x-amz-copy-source": "yourbucket/myobject*" + } + } +}`) + + testCases := []struct { + data []byte + expectedResult Statement + expectErr bool + }{ + {case1Data, case1Statement, false}, + {case2Data, case2Statement, false}, + {case3Data, case3Statement, false}, + // JSON unmarshaling error. + {case4Data, Statement{}, true}, + // Invalid effect error. + {case5Data, Statement{}, true}, + // empty principal error. + {case6Data, Statement{}, true}, + // Empty action error. + {case7Data, Statement{}, true}, + // Empty resource error. + {case8Data, Statement{}, true}, + // Empty condition error. + {case9Data, Statement{}, true}, + // Unsupported condition key error. + {case10Data, Statement{}, true}, + } + + for i, testCase := range testCases { + var result Statement + err := json.Unmarshal(testCase.data, &result) + expectErr := (err != nil) + + if expectErr != testCase.expectErr { + t.Fatalf("case %v: error: expected: %v, got: %v", i+1, testCase.expectErr, expectErr) + } + + if !testCase.expectErr { + if !reflect.DeepEqual(result, testCase.expectedResult) { + t.Fatalf("case %v: result: expected: %v, got: %v", i+1, testCase.expectedResult, result) + } + } + } +} + +func TestStatementValidate(t *testing.T) { + case1Statement := NewStatement( + Allow, + NewPrincipal("*"), + NewActionSet(PutObjectAction), + NewResourceSet(NewResource("mybucket", "/myobject*")), + condition.NewFunctions(), + ) + + func1, err := condition.NewNullFunc( + condition.S3XAmzCopySource, + true, + ) + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + func2, err := condition.NewNullFunc( + condition.S3XAmzServerSideEncryption, + false, + ) + if err != nil { + t.Fatalf("unexpected error. %v\n", err) + } + case2Statement := NewStatement( + Allow, + NewPrincipal("*"), + NewActionSet(GetObjectAction, PutObjectAction), + NewResourceSet(NewResource("mybucket", "myobject*")), + condition.NewFunctions(func1, func2), + ) + + testCases := []struct { + statement Statement + bucketName string + expectErr bool + }{ + {case1Statement, "mybucket", false}, + {case2Statement, "mybucket", true}, + {case1Statement, "yourbucket", true}, + } + + for i, testCase := range testCases { + err := testCase.statement.Validate(testCase.bucketName) + expectErr := (err != nil) + + if expectErr != testCase.expectErr { + t.Fatalf("case %v: error: expected: %v, got: %v", i+1, testCase.expectErr, expectErr) + } + } +}