support tagging based policy conditions (#15763)

This commit is contained in:
Harshavardhana 2022-09-28 11:25:46 -07:00 committed by GitHub
parent 4f1ff9c4d9
commit 41b633f5ea
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
9 changed files with 189 additions and 117 deletions

View File

@ -27,6 +27,7 @@ import (
jsoniter "github.com/json-iterator/go" jsoniter "github.com/json-iterator/go"
miniogopolicy "github.com/minio/minio-go/v7/pkg/policy" miniogopolicy "github.com/minio/minio-go/v7/pkg/policy"
"github.com/minio/minio-go/v7/pkg/tags"
"github.com/minio/minio/internal/handlers" "github.com/minio/minio/internal/handlers"
xhttp "github.com/minio/minio/internal/http" xhttp "github.com/minio/minio/internal/http"
"github.com/minio/minio/internal/logger" "github.com/minio/minio/internal/logger"
@ -125,6 +126,20 @@ func getConditionValues(r *http.Request, lc string, username string, claims map[
cloneHeader := r.Header.Clone() cloneHeader := r.Header.Clone()
if userTags := cloneHeader.Get(xhttp.AmzObjectTagging); userTags != "" {
tag, _ := tags.ParseObjectTags(userTags)
if tag != nil {
tagMap := tag.ToMap()
keys := make([]string, 0, len(tagMap))
for k, v := range tagMap {
args[pathJoin("ExistingObjectTag", k)] = []string{v}
args[pathJoin("RequestObjectTag", k)] = []string{v}
keys = append(keys, k)
}
args["RequestObjectTagKeys"] = keys
}
}
for _, objLock := range []string{ for _, objLock := range []string{
xhttp.AmzObjectLockMode, xhttp.AmzObjectLockMode,
xhttp.AmzObjectLockLegalHold, xhttp.AmzObjectLockLegalHold,
@ -137,6 +152,9 @@ func getConditionValues(r *http.Request, lc string, username string, claims map[
} }
for key, values := range cloneHeader { for key, values := range cloneHeader {
if strings.EqualFold(key, xhttp.AmzObjectTagging) {
continue
}
if existingValues, found := args[key]; found { if existingValues, found := args[key]; found {
args[key] = append(existingValues, values...) args[key] = append(existingValues, values...)
} else { } else {

View File

@ -418,6 +418,7 @@ func (api objectAPIHandlers) getObjectHandler(ctx context.Context, objectAPI Obj
return checkPreconditions(ctx, w, r, oi, opts) return checkPreconditions(ctx, w, r, oi, opts)
} }
var proxy proxyResult var proxy proxyResult
gr, err := getObjectNInfo(ctx, bucket, object, rs, r.Header, readLock, opts) gr, err := getObjectNInfo(ctx, bucket, object, rs, r.Header, readLock, opts)
if err != nil { if err != nil {
@ -463,6 +464,7 @@ func (api objectAPIHandlers) getObjectHandler(ctx context.Context, objectAPI Obj
w.Header()[xhttp.AmzVersionID] = []string{gr.ObjInfo.VersionID} w.Header()[xhttp.AmzVersionID] = []string{gr.ObjInfo.VersionID}
w.Header()[xhttp.AmzDeleteMarker] = []string{strconv.FormatBool(gr.ObjInfo.DeleteMarker)} w.Header()[xhttp.AmzDeleteMarker] = []string{strconv.FormatBool(gr.ObjInfo.DeleteMarker)}
} }
QueueReplicationHeal(ctx, bucket, gr.ObjInfo)
} }
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL) writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return return
@ -472,6 +474,7 @@ func (api objectAPIHandlers) getObjectHandler(ctx context.Context, objectAPI Obj
objInfo := gr.ObjInfo objInfo := gr.ObjInfo
if !proxy.Proxy { // apply lifecycle rules only for local requests
// Automatically remove the object/version is an expiry lifecycle rule can be applied // Automatically remove the object/version is an expiry lifecycle rule can be applied
if lc, err := globalLifecycleSys.Get(bucket); err == nil { if lc, err := globalLifecycleSys.Get(bucket); err == nil {
rcfg, _ := globalBucketObjectLockSys.Get(bucket) rcfg, _ := globalBucketObjectLockSys.Get(bucket)
@ -491,10 +494,9 @@ func (api objectAPIHandlers) getObjectHandler(ctx context.Context, objectAPI Obj
} }
} }
// Queue failed/pending replication automatically QueueReplicationHeal(ctx, bucket, gr.ObjInfo)
if !proxy.Proxy {
QueueReplicationHeal(ctx, bucket, objInfo)
} }
// filter object lock metadata if permission does not permit // filter object lock metadata if permission does not permit
getRetPerms := checkRequestAuthType(ctx, r, policy.GetObjectRetentionAction, bucket, object) getRetPerms := checkRequestAuthType(ctx, r, policy.GetObjectRetentionAction, bucket, object)
legalHoldPerms := checkRequestAuthType(ctx, r, policy.GetObjectLegalHoldAction, bucket, object) legalHoldPerms := checkRequestAuthType(ctx, r, policy.GetObjectLegalHoldAction, bucket, object)
@ -632,6 +634,36 @@ func (api objectAPIHandlers) headObjectHandler(ctx context.Context, objectAPI Ob
return return
} }
// Get request range.
var rs *HTTPRangeSpec
rangeHeader := r.Header.Get(xhttp.Range)
objInfo, err := getObjectInfo(ctx, bucket, object, opts)
var proxy proxyResult
if err != nil {
// proxy HEAD to replication target if active-active replication configured on bucket
proxytgts := getProxyTargets(ctx, bucket, object, opts)
if !proxytgts.Empty() {
if rangeHeader != "" {
rs, _ = parseRequestRangeSpec(rangeHeader)
}
var oi ObjectInfo
oi, proxy = proxyHeadToReplicationTarget(ctx, bucket, object, rs, opts, proxytgts)
if proxy.Proxy {
objInfo = oi
}
if proxy.Err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, proxy.Err), r.URL)
return
}
}
}
if err == nil && objInfo.UserTags != "" {
// Set this such that authorization policies can be applied on the object tags.
r.Header.Set(xhttp.AmzObjectTagging, objInfo.UserTags)
}
if s3Error := checkRequestAuthType(ctx, r, policy.GetObjectAction, bucket, object); s3Error != ErrNone { if s3Error := checkRequestAuthType(ctx, r, policy.GetObjectAction, bucket, object); s3Error != ErrNone {
if getRequestAuthType(r) == authTypeAnonymous { if getRequestAuthType(r) == authTypeAnonymous {
// As per "Permission" section in // As per "Permission" section in
@ -653,7 +685,6 @@ func (api objectAPIHandlers) headObjectHandler(ctx context.Context, objectAPI Ob
ConditionValues: getConditionValues(r, "", "", nil), ConditionValues: getConditionValues(r, "", "", nil),
IsOwner: false, IsOwner: false,
}) { }) {
_, err = getObjectInfo(ctx, bucket, object, opts)
if toAPIError(ctx, err).Code == "NoSuchKey" { if toAPIError(ctx, err).Code == "NoSuchKey" {
s3Error = ErrNoSuchKey s3Error = ErrNoSuchKey
} }
@ -662,26 +693,8 @@ func (api objectAPIHandlers) headObjectHandler(ctx context.Context, objectAPI Ob
writeErrorResponseHeadersOnly(w, errorCodes.ToAPIErr(s3Error)) writeErrorResponseHeadersOnly(w, errorCodes.ToAPIErr(s3Error))
return return
} }
// Get request range.
var rs *HTTPRangeSpec
rangeHeader := r.Header.Get(xhttp.Range)
objInfo, err := getObjectInfo(ctx, bucket, object, opts) if err != nil && !proxy.Proxy {
var proxy proxyResult
if err != nil {
var oi ObjectInfo
// proxy HEAD to replication target if active-active replication configured on bucket
proxytgts := getProxyTargets(ctx, bucket, object, opts)
if !proxytgts.Empty() {
if rangeHeader != "" {
rs, _ = parseRequestRangeSpec(rangeHeader)
}
oi, proxy = proxyHeadToReplicationTarget(ctx, bucket, object, rs, opts, proxytgts)
if proxy.Proxy {
objInfo = oi
}
}
if !proxy.Proxy {
if globalBucketVersioningSys.PrefixEnabled(bucket, object) { if globalBucketVersioningSys.PrefixEnabled(bucket, object) {
switch { switch {
case !objInfo.VersionPurgeStatus.Empty(): case !objInfo.VersionPurgeStatus.Empty():
@ -695,13 +708,13 @@ func (api objectAPIHandlers) headObjectHandler(ctx context.Context, objectAPI Ob
w.Header()[xhttp.AmzVersionID] = []string{objInfo.VersionID} w.Header()[xhttp.AmzVersionID] = []string{objInfo.VersionID}
w.Header()[xhttp.AmzDeleteMarker] = []string{strconv.FormatBool(objInfo.DeleteMarker)} w.Header()[xhttp.AmzDeleteMarker] = []string{strconv.FormatBool(objInfo.DeleteMarker)}
} }
}
QueueReplicationHeal(ctx, bucket, objInfo) QueueReplicationHeal(ctx, bucket, objInfo)
}
writeErrorResponseHeadersOnly(w, toAPIError(ctx, err)) writeErrorResponseHeadersOnly(w, toAPIError(ctx, err))
return return
} }
}
if !proxy.Proxy { // apply lifecycle rules only locally not for proxied requests
// Automatically remove the object/version is an expiry lifecycle rule can be applied // Automatically remove the object/version is an expiry lifecycle rule can be applied
if lc, err := globalLifecycleSys.Get(bucket); err == nil { if lc, err := globalLifecycleSys.Get(bucket); err == nil {
rcfg, _ := globalBucketObjectLockSys.Get(bucket) rcfg, _ := globalBucketObjectLockSys.Get(bucket)
@ -720,9 +733,6 @@ func (api objectAPIHandlers) headObjectHandler(ctx context.Context, objectAPI Ob
return return
} }
} }
// Queue failed/pending replication automatically
if !proxy.Proxy {
QueueReplicationHeal(ctx, bucket, objInfo) QueueReplicationHeal(ctx, bucket, objInfo)
} }
@ -3197,15 +3207,19 @@ func (api objectAPIHandlers) PutObjectTaggingHandler(w http.ResponseWriter, r *h
return return
} }
// Allow putObjectTagging if policy action is set // Tags XML will not be bigger than 1MiB in size, fail if its bigger.
if s3Error := checkRequestAuthType(ctx, r, policy.PutObjectTaggingAction, bucket, object); s3Error != ErrNone { tags, err := tags.ParseObjectXML(io.LimitReader(r.Body, 1<<20))
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL) if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return return
} }
tags, err := tags.ParseObjectXML(io.LimitReader(r.Body, r.ContentLength)) // Set this such that authorization policies can be applied on the object tags.
if err != nil { r.Header.Set(xhttp.AmzObjectTagging, tags.String())
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
// Allow putObjectTagging if policy action is set
if s3Error := checkRequestAuthType(ctx, r, policy.PutObjectTaggingAction, bucket, object); s3Error != ErrNone {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL)
return return
} }
@ -3283,12 +3297,6 @@ func (api objectAPIHandlers) DeleteObjectTaggingHandler(w http.ResponseWriter, r
return return
} }
// Allow deleteObjectTagging if policy action is set
if s3Error := checkRequestAuthType(ctx, r, policy.DeleteObjectTaggingAction, bucket, object); s3Error != ErrNone {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL)
return
}
opts, err := getOpts(ctx, r, bucket, object) opts, err := getOpts(ctx, r, bucket, object)
if err != nil { if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL) writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
@ -3300,6 +3308,18 @@ func (api objectAPIHandlers) DeleteObjectTaggingHandler(w http.ResponseWriter, r
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL) writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return return
} }
if userTags := oi.UserTags; userTags != "" {
// Set this such that authorization policies can be applied on the object tags.
r.Header.Set(xhttp.AmzObjectTagging, oi.UserTags)
}
// Allow deleteObjectTagging if policy action is set
if s3Error := checkRequestAuthType(ctx, r, policy.DeleteObjectTaggingAction, bucket, object); s3Error != ErrNone {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL)
return
}
dsc := mustReplicate(ctx, bucket, object, getMustReplicateOptions(oi, replication.MetadataReplicationType, opts)) dsc := mustReplicate(ctx, bucket, object, getMustReplicateOptions(oi, replication.MetadataReplicationType, opts))
if dsc.ReplicateAny() { if dsc.ReplicateAny() {
opts.UserDefined = make(map[string]string) opts.UserDefined = make(map[string]string)

View File

@ -30,6 +30,7 @@ import (
"time" "time"
"github.com/gorilla/mux" "github.com/gorilla/mux"
"github.com/minio/minio-go/v7/pkg/tags"
sse "github.com/minio/minio/internal/bucket/encryption" sse "github.com/minio/minio/internal/bucket/encryption"
objectlock "github.com/minio/minio/internal/bucket/object/lock" objectlock "github.com/minio/minio/internal/bucket/object/lock"
"github.com/minio/minio/internal/bucket/replication" "github.com/minio/minio/internal/bucket/replication"
@ -129,6 +130,20 @@ func (api objectAPIHandlers) NewMultipartUploadHandler(w http.ResponseWriter, r
return return
} }
if objTags := r.Header.Get(xhttp.AmzObjectTagging); objTags != "" {
if !objectAPI.IsTaggingSupported() {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
return
}
if _, err := tags.ParseObjectTags(objTags); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
metadata[xhttp.AmzObjectTagging] = objTags
}
retPerms := isPutActionAllowed(ctx, getRequestAuthType(r), bucket, object, r, iampolicy.PutObjectRetentionAction) retPerms := isPutActionAllowed(ctx, getRequestAuthType(r), bucket, object, r, iampolicy.PutObjectRetentionAction)
holdPerms := isPutActionAllowed(ctx, getRequestAuthType(r), bucket, object, r, iampolicy.PutObjectLegalHoldAction) holdPerms := isPutActionAllowed(ctx, getRequestAuthType(r), bucket, object, r, iampolicy.PutObjectLegalHoldAction)

4
go.mod
View File

@ -48,8 +48,8 @@ require (
github.com/minio/highwayhash v1.0.2 github.com/minio/highwayhash v1.0.2
github.com/minio/kes v0.21.0 github.com/minio/kes v0.21.0
github.com/minio/madmin-go v1.4.29 github.com/minio/madmin-go v1.4.29
github.com/minio/minio-go/v7 v7.0.37 github.com/minio/minio-go/v7 v7.0.40-0.20220928095841-8848d8affe8a
github.com/minio/pkg v1.4.0 github.com/minio/pkg v1.4.2
github.com/minio/selfupdate v0.5.0 github.com/minio/selfupdate v0.5.0
github.com/minio/sha256-simd v1.0.0 github.com/minio/sha256-simd v1.0.0
github.com/minio/simdjson-go v0.4.2 github.com/minio/simdjson-go v0.4.2

8
go.sum
View File

@ -657,11 +657,11 @@ github.com/minio/md5-simd v1.1.0/go.mod h1:XpBqgZULrMYD3R+M28PcmP0CkI7PEMzB3U77Z
github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34= github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34=
github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM= github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM=
github.com/minio/minio-go/v7 v7.0.23/go.mod h1:ei5JjmxwHaMrgsMrn4U/+Nmg+d8MKS1U2DAn1ou4+Do= github.com/minio/minio-go/v7 v7.0.23/go.mod h1:ei5JjmxwHaMrgsMrn4U/+Nmg+d8MKS1U2DAn1ou4+Do=
github.com/minio/minio-go/v7 v7.0.37 h1:aJvYMbtpVPSFBck6guyvOkxK03MycxDOCs49ZBuY5M8= github.com/minio/minio-go/v7 v7.0.40-0.20220928095841-8848d8affe8a h1:COFh7S3tOKmJNYtKKFAuHQFH7MAaXxg4aAluXC9KQgc=
github.com/minio/minio-go/v7 v7.0.37/go.mod h1:nCrRzjoSUQh8hgKKtu3Y708OLvRLtuASMg2/nvmbarw= github.com/minio/minio-go/v7 v7.0.40-0.20220928095841-8848d8affe8a/go.mod h1:nCrRzjoSUQh8hgKKtu3Y708OLvRLtuASMg2/nvmbarw=
github.com/minio/pkg v1.1.20/go.mod h1:Xo7LQshlxGa9shKwJ7NzQbgW4s8T/Wc1cOStR/eUiMY= github.com/minio/pkg v1.1.20/go.mod h1:Xo7LQshlxGa9shKwJ7NzQbgW4s8T/Wc1cOStR/eUiMY=
github.com/minio/pkg v1.4.0 h1:bVdmz8vgv5bvLysHY2kuviuof0kteY6YPMRXCjDqJU4= github.com/minio/pkg v1.4.2 h1:QEToJld+cy+mMLDv084kIOgzjJQMbM+ioI/WotHeYQY=
github.com/minio/pkg v1.4.0/go.mod h1:mxCLAG+fOGIQr6odQ5Ukqc6qv9Zj6v1d6TD3NP82B7Y= github.com/minio/pkg v1.4.2/go.mod h1:mxCLAG+fOGIQr6odQ5Ukqc6qv9Zj6v1d6TD3NP82B7Y=
github.com/minio/selfupdate v0.5.0 h1:0UH1HlL49+2XByhovKl5FpYTjKfvrQ2sgL1zEXK6mfI= github.com/minio/selfupdate v0.5.0 h1:0UH1HlL49+2XByhovKl5FpYTjKfvrQ2sgL1zEXK6mfI=
github.com/minio/selfupdate v0.5.0/go.mod h1:mcDkzMgq8PRcpCRJo/NlPY7U45O5dfYl2Y0Rg7IustY= github.com/minio/selfupdate v0.5.0/go.mod h1:mcDkzMgq8PRcpCRJo/NlPY7U45O5dfYl2Y0Rg7IustY=
github.com/minio/sha256-simd v0.1.1/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= github.com/minio/sha256-simd v0.1.1/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM=

View File

@ -20,7 +20,6 @@ package lifecycle
import ( import (
"encoding/xml" "encoding/xml"
"io" "io"
"log"
"github.com/minio/minio-go/v7/pkg/tags" "github.com/minio/minio-go/v7/pkg/tags"
) )
@ -172,7 +171,6 @@ func (f Filter) TestTags(userTags string) bool {
parsedTags, err := tags.ParseObjectTags(userTags) parsedTags, err := tags.ParseObjectTags(userTags)
if err != nil { if err != nil {
log.Printf("Unexpected object tag found: `%s`\n", userTags)
return false return false
} }
tagsMap := parsedTags.ToMap() tagsMap := parsedTags.ToMap()

View File

@ -19,6 +19,8 @@ package replication
import ( import (
"encoding/xml" "encoding/xml"
"github.com/minio/minio-go/v7/pkg/tags"
) )
var errInvalidFilter = Errorf("Filter must have exactly one of Prefix, Tag, or And specified") var errInvalidFilter = Errorf("Filter must have exactly one of Prefix, Tag, or And specified")
@ -29,8 +31,9 @@ type Filter struct {
Prefix string Prefix string
And And And And
Tag Tag Tag Tag
// Caching tags, only once // Caching tags, only once
cachedTags map[string]struct{} cachedTags map[string]string
} }
// IsEmpty returns true if filter is not set // IsEmpty returns true if filter is not set
@ -93,27 +96,43 @@ func (f Filter) Validate() error {
// TestTags tests if the object tags satisfy the Filter tags requirement, // TestTags tests if the object tags satisfy the Filter tags requirement,
// it returns true if there is no tags in the underlying Filter. // it returns true if there is no tags in the underlying Filter.
func (f *Filter) TestTags(ttags []string) bool { func (f *Filter) TestTags(userTags string) bool {
if f.cachedTags == nil { if f.cachedTags == nil {
tags := make(map[string]struct{}) cached := make(map[string]string)
for _, t := range append(f.And.Tags, f.Tag) { for _, t := range append(f.And.Tags, f.Tag) {
if !t.IsEmpty() { if !t.IsEmpty() {
tags[t.String()] = struct{}{} cached[t.Key] = t.Value
} }
} }
f.cachedTags = tags f.cachedTags = cached
} }
for ct := range f.cachedTags {
foundTag := false // This filter does not have any tags, always return true
for _, t := range ttags { if len(f.cachedTags) == 0 {
if ct == t { return true
foundTag = true
break
} }
}
if !foundTag { parsedTags, err := tags.ParseObjectTags(userTags)
if err != nil {
return false return false
} }
tagsMap := parsedTags.ToMap()
// This filter has tags configured but this object
// does not have any tag, skip this object
if len(tagsMap) == 0 {
return false
} }
// Both filter and object have tags, find a match,
// skip this object otherwise
for k, cv := range f.cachedTags {
v, ok := tagsMap[k]
if ok && v == cv {
return true return true
}
}
return false
} }

View File

@ -199,7 +199,7 @@ func (c Config) FilterActionableRules(obj ObjectOpts) []Rule {
if !strings.HasPrefix(obj.Name, rule.Prefix()) { if !strings.HasPrefix(obj.Name, rule.Prefix()) {
continue continue
} }
if rule.Filter.TestTags(strings.Split(obj.UserTags, "&")) { if rule.Filter.TestTags(obj.UserTags) {
rules = append(rules, rule) rules = append(rules, rule)
} }
} }

View File

@ -295,12 +295,14 @@ func TestReplicate(t *testing.T) {
{ObjectOpts{Name: "xa/c5test", UserTags: "k1=v1", Replica: false}, cfgs[4], true}, // 40. replica syncing disabled, this object is NOT a replica {ObjectOpts{Name: "xa/c5test", UserTags: "k1=v1", Replica: false}, cfgs[4], true}, // 40. replica syncing disabled, this object is NOT a replica
} }
for i, testCase := range testCases { for _, testCase := range testCases {
testCase := testCase
t.Run(testCase.opts.Name, func(t *testing.T) {
result := testCase.c.Replicate(testCase.opts) result := testCase.c.Replicate(testCase.opts)
if result != testCase.expectedResult { if result != testCase.expectedResult {
t.Fatalf("case %v: expected: %v, got: %v", i+1, testCase.expectedResult, result) t.Errorf("expected: %v, got: %v", testCase.expectedResult, result)
} }
})
} }
} }