mirror of https://github.com/minio/minio.git
migrate all bucket metadata into a single file (#9586)
this is a major overhaul by migrating off all bucket metadata related configs into a single object '.metadata.bin' this allows us for faster bootups across 1000's of buckets and as well as keeps the code simple enough for future work and additions. Additionally also fixes #9396, #9394
This commit is contained in:
parent
730775bb4e
commit
bd032d13ff
|
@ -100,18 +100,18 @@ func (api objectAPIHandlers) PutBucketACLHandler(w http.ResponseWriter, r *http.
|
|||
}
|
||||
|
||||
if len(acl.AccessControlList.Grants) == 0 {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, errInvalidArgument), r.URL, guessIsBrowserReq(r))
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, NotImplemented{}), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
if acl.AccessControlList.Grants[0].Permission != "FULL_CONTROL" {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, errInvalidArgument), r.URL, guessIsBrowserReq(r))
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, NotImplemented{}), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if aclHeader != "" && aclHeader != "private" {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, errInvalidArgument), r.URL, guessIsBrowserReq(r))
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, NotImplemented{}), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -159,6 +159,7 @@ func (api objectAPIHandlers) GetBucketACLHandler(w http.ResponseWriter, r *http.
|
|||
},
|
||||
Permission: "FULL_CONTROL",
|
||||
})
|
||||
|
||||
if err := xml.NewEncoder(w).Encode(acl); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
|
@ -214,18 +215,18 @@ func (api objectAPIHandlers) PutObjectACLHandler(w http.ResponseWriter, r *http.
|
|||
}
|
||||
|
||||
if len(acl.AccessControlList.Grants) == 0 {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, errInvalidArgument), r.URL, guessIsBrowserReq(r))
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, NotImplemented{}), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
if acl.AccessControlList.Grants[0].Permission != "FULL_CONTROL" {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, errInvalidArgument), r.URL, guessIsBrowserReq(r))
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, NotImplemented{}), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if aclHeader != "" && aclHeader != "private" {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, errInvalidArgument), r.URL, guessIsBrowserReq(r))
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, NotImplemented{}), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
|
|
|
@ -21,6 +21,7 @@ import (
|
|||
"crypto/subtle"
|
||||
"crypto/tls"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
|
@ -897,7 +898,7 @@ func toAdminAPIErr(ctx context.Context, err error) APIError {
|
|||
HTTPStatusCode: e.StatusCode,
|
||||
}
|
||||
default:
|
||||
if err == errConfigNotFound {
|
||||
if errors.Is(err, errConfigNotFound) {
|
||||
apiErr = APIError{
|
||||
Code: "XMinioConfigError",
|
||||
Description: err.Error(),
|
||||
|
|
|
@ -17,9 +17,9 @@
|
|||
package cmd
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"path"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/minio/minio/cmd/config"
|
||||
|
@ -61,31 +61,22 @@ func (a adminAPIHandlers) PutBucketQuotaConfigHandler(w http.ResponseWriter, r *
|
|||
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
defer r.Body.Close()
|
||||
|
||||
data, err := ioutil.ReadAll(r.Body)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL)
|
||||
return
|
||||
}
|
||||
quotaCfg, err := parseBucketQuota(data)
|
||||
if err != nil {
|
||||
|
||||
if _, err = parseBucketQuota(bucket, data); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
configFile := path.Join(bucketConfigPrefix, bucket, bucketQuotaConfigFile)
|
||||
if err = saveConfig(ctx, objectAPI, configFile, data); err != nil {
|
||||
|
||||
if err = globalBucketMetadataSys.Update(bucket, bucketQuotaConfigFile, data); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
if quotaCfg.Quota > 0 {
|
||||
globalBucketQuotaSys.Set(bucket, quotaCfg)
|
||||
globalNotificationSys.PutBucketQuotaConfig(ctx, bucket, quotaCfg)
|
||||
|
||||
} else {
|
||||
globalBucketQuotaSys.Remove(bucket)
|
||||
globalNotificationSys.RemoveBucketQuotaConfig(ctx, bucket)
|
||||
|
||||
}
|
||||
|
||||
// Write success response.
|
||||
writeSuccessResponseHeadersOnly(w)
|
||||
|
@ -109,16 +100,17 @@ func (a adminAPIHandlers) GetBucketQuotaConfigHandler(w http.ResponseWriter, r *
|
|||
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
configFile := path.Join(bucketConfigPrefix, bucket, bucketQuotaConfigFile)
|
||||
configData, err := readConfig(ctx, objectAPI, configFile)
|
||||
|
||||
configData, err := globalBucketMetadataSys.GetConfig(bucket, bucketQuotaConfigFile)
|
||||
if err != nil {
|
||||
if err != errConfigNotFound {
|
||||
if errors.Is(err, errConfigNotFound) {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, BucketQuotaConfigNotFound{Bucket: bucket}), r.URL)
|
||||
} else {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, BucketQuotaConfigNotFound{Bucket: bucket}), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Write success response.
|
||||
writeSuccessResponseJSON(w, configData)
|
||||
}
|
||||
|
@ -143,17 +135,12 @@ func (a adminAPIHandlers) RemoveBucketQuotaConfigHandler(w http.ResponseWriter,
|
|||
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
configFile := path.Join(bucketConfigPrefix, bucket, bucketQuotaConfigFile)
|
||||
if err := deleteConfig(ctx, objectAPI, configFile); err != nil {
|
||||
if err != errConfigNotFound {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, BucketQuotaConfigNotFound{Bucket: bucket}), r.URL)
|
||||
|
||||
if err := globalBucketMetadataSys.Update(bucket, bucketQuotaConfigFile, nil); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
globalBucketQuotaSys.Remove(bucket)
|
||||
globalNotificationSys.RemoveBucketQuotaConfig(ctx, bucket)
|
||||
|
||||
// Write success response.
|
||||
writeSuccessNoContent(w)
|
||||
}
|
|
@ -100,6 +100,9 @@ const (
|
|||
ErrNoSuchBucketLifecycle
|
||||
ErrNoSuchLifecycleConfiguration
|
||||
ErrNoSuchBucketSSEConfig
|
||||
ErrNoSuchCORSConfiguration
|
||||
ErrNoSuchWebsiteConfiguration
|
||||
ErrReplicationConfigurationNotFoundError
|
||||
ErrNoSuchKey
|
||||
ErrNoSuchUpload
|
||||
ErrNoSuchVersion
|
||||
|
@ -793,6 +796,21 @@ var errorCodes = errorCodeMap{
|
|||
Description: "Object Lock configuration cannot be enabled on existing buckets",
|
||||
HTTPStatusCode: http.StatusConflict,
|
||||
},
|
||||
ErrNoSuchCORSConfiguration: {
|
||||
Code: "NoSuchCORSConfiguration",
|
||||
Description: "The CORS configuration does not exist",
|
||||
HTTPStatusCode: http.StatusNotFound,
|
||||
},
|
||||
ErrNoSuchWebsiteConfiguration: {
|
||||
Code: "NoSuchWebsiteConfiguration",
|
||||
Description: "The specified bucket does not have a website configuration",
|
||||
HTTPStatusCode: http.StatusNotFound,
|
||||
},
|
||||
ErrReplicationConfigurationNotFoundError: {
|
||||
Code: "ReplicationConfigurationNotFoundError",
|
||||
Description: "The replication configuration was not found",
|
||||
HTTPStatusCode: http.StatusNotFound,
|
||||
},
|
||||
ErrNoSuchObjectLockConfiguration: {
|
||||
Code: "NoSuchObjectLockConfiguration",
|
||||
Description: "The specified object does not have a ObjectLock configuration",
|
||||
|
|
|
@ -18,6 +18,7 @@ package cmd
|
|||
|
||||
import (
|
||||
"encoding/xml"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
|
@ -25,7 +26,6 @@ import (
|
|||
"github.com/gorilla/mux"
|
||||
xhttp "github.com/minio/minio/cmd/http"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
bucketsse "github.com/minio/minio/pkg/bucket/encryption"
|
||||
"github.com/minio/minio/pkg/bucket/policy"
|
||||
)
|
||||
|
||||
|
@ -85,17 +85,17 @@ func (api objectAPIHandlers) PutBucketEncryptionHandler(w http.ResponseWriter, r
|
|||
return
|
||||
}
|
||||
|
||||
// Store the bucket encryption configuration in the object layer
|
||||
if err = objAPI.SetBucketSSEConfig(ctx, bucket, encConfig); err != nil {
|
||||
configData, err := xml.Marshal(encConfig)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Update the in-memory bucket encryption cache
|
||||
globalBucketSSEConfigSys.Set(bucket, encConfig)
|
||||
|
||||
// Update peer MinIO servers of the updated bucket encryption config
|
||||
globalNotificationSys.SetBucketSSEConfig(ctx, bucket, encConfig)
|
||||
// Store the bucket encryption configuration in the object layer
|
||||
if err = globalBucketMetadataSys.Update(bucket, bucketSSEConfig, configData); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
writeSuccessResponseHeadersOnly(w)
|
||||
}
|
||||
|
@ -128,21 +128,17 @@ func (api objectAPIHandlers) GetBucketEncryptionHandler(w http.ResponseWriter, r
|
|||
return
|
||||
}
|
||||
|
||||
// Fetch bucket encryption configuration from object layer
|
||||
var encConfig *bucketsse.BucketSSEConfig
|
||||
if encConfig, err = objAPI.GetBucketSSEConfig(ctx, bucket); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
var encConfigData []byte
|
||||
if encConfigData, err = xml.Marshal(encConfig); err != nil {
|
||||
configData, err := globalBucketMetadataSys.GetConfig(bucket, bucketSSEConfig)
|
||||
if err != nil {
|
||||
if errors.Is(err, errConfigNotFound) {
|
||||
err = BucketSSEConfigNotFound{Bucket: bucket}
|
||||
}
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Write bucket encryption configuration to client
|
||||
writeSuccessResponseXML(w, encConfigData)
|
||||
writeSuccessResponseXML(w, configData)
|
||||
}
|
||||
|
||||
// DeleteBucketEncryptionHandler - Removes bucket encryption configuration
|
||||
|
@ -173,16 +169,10 @@ func (api objectAPIHandlers) DeleteBucketEncryptionHandler(w http.ResponseWriter
|
|||
}
|
||||
|
||||
// Delete bucket encryption config from object layer
|
||||
if err = objAPI.DeleteBucketSSEConfig(ctx, bucket); err != nil {
|
||||
if err = globalBucketMetadataSys.Update(bucket, bucketSSEConfig, nil); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Remove entry from the in-memory bucket encryption cache
|
||||
globalBucketSSEConfigSys.Remove(bucket)
|
||||
|
||||
// Update peer MinIO servers of the updated bucket encryption config
|
||||
globalNotificationSys.RemoveBucketSSEConfig(ctx, bucket)
|
||||
|
||||
writeSuccessNoContent(w)
|
||||
}
|
||||
|
|
|
@ -18,19 +18,14 @@ package cmd
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/xml"
|
||||
"errors"
|
||||
"io"
|
||||
"path"
|
||||
"sync"
|
||||
|
||||
bucketsse "github.com/minio/minio/pkg/bucket/encryption"
|
||||
)
|
||||
|
||||
// BucketSSEConfigSys - in-memory cache of bucket encryption config
|
||||
type BucketSSEConfigSys struct {
|
||||
sync.RWMutex
|
||||
bucketSSEConfigMap map[string]*bucketsse.BucketSSEConfig
|
||||
}
|
||||
|
||||
|
@ -44,15 +39,20 @@ func NewBucketSSEConfigSys() *BucketSSEConfigSys {
|
|||
// load - Loads the bucket encryption configuration for the given list of buckets
|
||||
func (sys *BucketSSEConfigSys) load(buckets []BucketInfo, objAPI ObjectLayer) error {
|
||||
for _, bucket := range buckets {
|
||||
config, err := objAPI.GetBucketSSEConfig(GlobalContext, bucket.Name)
|
||||
configData, err := globalBucketMetadataSys.GetConfig(bucket.Name, bucketSSEConfig)
|
||||
if err != nil {
|
||||
if _, ok := err.(BucketSSEConfigNotFound); ok {
|
||||
if errors.Is(err, errConfigNotFound) {
|
||||
continue
|
||||
}
|
||||
// Quorum errors should be returned.
|
||||
return err
|
||||
}
|
||||
sys.Set(bucket.Name, config)
|
||||
|
||||
config, err := bucketsse.ParseBucketSSEConfig(bytes.NewReader(configData))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
sys.bucketSSEConfigMap[bucket.Name] = config
|
||||
}
|
||||
|
||||
return nil
|
||||
|
@ -74,86 +74,28 @@ func (sys *BucketSSEConfigSys) Init(buckets []BucketInfo, objAPI ObjectLayer) er
|
|||
}
|
||||
|
||||
// Get - gets bucket encryption config for the given bucket.
|
||||
func (sys *BucketSSEConfigSys) Get(bucket string) (config *bucketsse.BucketSSEConfig, ok bool) {
|
||||
// We don't cache bucket encryption config in gateway mode.
|
||||
func (sys *BucketSSEConfigSys) Get(bucket string) (config *bucketsse.BucketSSEConfig, err error) {
|
||||
if globalIsGateway {
|
||||
objAPI := newObjectLayerWithoutSafeModeFn()
|
||||
if objAPI == nil {
|
||||
return
|
||||
return nil, errServerNotInitialized
|
||||
}
|
||||
|
||||
cfg, err := objAPI.GetBucketSSEConfig(GlobalContext, bucket)
|
||||
return nil, BucketSSEConfigNotFound{Bucket: bucket}
|
||||
}
|
||||
|
||||
config, ok := sys.bucketSSEConfigMap[bucket]
|
||||
if !ok {
|
||||
configData, err := globalBucketMetadataSys.GetConfig(bucket, bucketSSEConfig)
|
||||
if err != nil {
|
||||
return
|
||||
if errors.Is(err, errConfigNotFound) {
|
||||
return nil, BucketSSEConfigNotFound{Bucket: bucket}
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
return cfg, true
|
||||
return bucketsse.ParseBucketSSEConfig(bytes.NewReader(configData))
|
||||
}
|
||||
|
||||
sys.Lock()
|
||||
defer sys.Unlock()
|
||||
config, ok = sys.bucketSSEConfigMap[bucket]
|
||||
return
|
||||
}
|
||||
|
||||
// Set - sets bucket encryption config to given bucket name.
|
||||
func (sys *BucketSSEConfigSys) Set(bucket string, config *bucketsse.BucketSSEConfig) {
|
||||
// We don't cache bucket encryption config in gateway mode.
|
||||
if globalIsGateway {
|
||||
return
|
||||
}
|
||||
|
||||
sys.Lock()
|
||||
defer sys.Unlock()
|
||||
sys.bucketSSEConfigMap[bucket] = config
|
||||
}
|
||||
|
||||
// Remove - removes bucket encryption config for given bucket.
|
||||
func (sys *BucketSSEConfigSys) Remove(bucket string) {
|
||||
sys.Lock()
|
||||
defer sys.Unlock()
|
||||
|
||||
delete(sys.bucketSSEConfigMap, bucket)
|
||||
}
|
||||
|
||||
// saveBucketSSEConfig - save bucket encryption config for given bucket.
|
||||
func saveBucketSSEConfig(ctx context.Context, objAPI ObjectLayer, bucket string, config *bucketsse.BucketSSEConfig) error {
|
||||
data, err := xml.Marshal(config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Path to store bucket encryption config for the given bucket.
|
||||
configFile := path.Join(bucketConfigPrefix, bucket, bucketSSEConfig)
|
||||
return saveConfig(ctx, objAPI, configFile, data)
|
||||
}
|
||||
|
||||
// getBucketSSEConfig - get bucket encryption config for given bucket.
|
||||
func getBucketSSEConfig(objAPI ObjectLayer, bucket string) (*bucketsse.BucketSSEConfig, error) {
|
||||
// Path to bucket-encryption.xml for the given bucket.
|
||||
configFile := path.Join(bucketConfigPrefix, bucket, bucketSSEConfig)
|
||||
configData, err := readConfig(GlobalContext, objAPI, configFile)
|
||||
if err != nil {
|
||||
if err == errConfigNotFound {
|
||||
err = BucketSSEConfigNotFound{Bucket: bucket}
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return bucketsse.ParseBucketSSEConfig(bytes.NewReader(configData))
|
||||
}
|
||||
|
||||
// removeBucketSSEConfig - removes bucket encryption config for given bucket.
|
||||
func removeBucketSSEConfig(ctx context.Context, objAPI ObjectLayer, bucket string) error {
|
||||
// Path to bucket-encryption.xml for the given bucket.
|
||||
configFile := path.Join(bucketConfigPrefix, bucket, bucketSSEConfig)
|
||||
|
||||
if err := deleteConfig(ctx, objAPI, configFile); err != nil {
|
||||
if err == errConfigNotFound {
|
||||
return BucketSSEConfigNotFound{Bucket: bucket}
|
||||
}
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
return config, nil
|
||||
}
|
||||
|
||||
// validateBucketSSEConfig parses bucket encryption configuration and validates if it is supported by MinIO.
|
||||
|
|
|
@ -19,6 +19,7 @@ package cmd
|
|||
import (
|
||||
"encoding/base64"
|
||||
"encoding/xml"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
|
@ -399,9 +400,9 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
|||
continue
|
||||
}
|
||||
|
||||
if _, ok := globalBucketObjectLockSys.Get(bucket); ok {
|
||||
if err := enforceRetentionBypassForDelete(ctx, r, bucket, object.ObjectName, getObjectInfoFn); err != ErrNone {
|
||||
dErrs[index] = err
|
||||
if rcfg, _ := globalBucketObjectLockSys.Get(bucket); rcfg.LockEnabled {
|
||||
if apiErr := enforceRetentionBypassForDelete(ctx, r, bucket, object.ObjectName, getObjectInfoFn); apiErr != ErrNone {
|
||||
dErrs[index] = apiErr
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
@ -538,6 +539,10 @@ func (api objectAPIHandlers) PutBucketHandler(w http.ResponseWriter, r *http.Req
|
|||
return
|
||||
}
|
||||
|
||||
if objectLockEnabled {
|
||||
globalNotificationSys.LoadBucketMetadata(GlobalContext, bucket)
|
||||
}
|
||||
|
||||
// Make sure to add Location information here only for bucket
|
||||
w.Header().Set(xhttp.Location,
|
||||
getObjectLocation(r, globalDomainNames, bucket, ""))
|
||||
|
@ -568,10 +573,8 @@ func (api objectAPIHandlers) PutBucketHandler(w http.ResponseWriter, r *http.Req
|
|||
return
|
||||
}
|
||||
|
||||
if objectLockEnabled && !globalIsGateway {
|
||||
ret := &objectlock.Retention{}
|
||||
globalBucketObjectLockSys.Set(bucket, ret)
|
||||
globalNotificationSys.PutBucketObjectLockConfig(ctx, bucket, ret)
|
||||
if objectLockEnabled {
|
||||
globalNotificationSys.LoadBucketMetadata(GlobalContext, bucket)
|
||||
}
|
||||
|
||||
// Make sure to add Location information here only for bucket
|
||||
|
@ -746,11 +749,13 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
|||
var objectEncryptionKey crypto.ObjectKey
|
||||
|
||||
// Check if bucket encryption is enabled
|
||||
_, encEnabled := globalBucketSSEConfigSys.Get(bucket)
|
||||
// This request header needs to be set prior to setting ObjectOptions
|
||||
if (globalAutoEncryption || encEnabled) && !crypto.SSEC.IsRequested(r.Header) {
|
||||
r.Header.Add(crypto.SSEHeader, crypto.SSEAlgorithmAES256)
|
||||
if _, err = globalBucketSSEConfigSys.Get(bucket); err == nil || globalAutoEncryption {
|
||||
// This request header needs to be set prior to setting ObjectOptions
|
||||
if !crypto.SSEC.IsRequested(r.Header) {
|
||||
r.Header.Add(crypto.SSEHeader, crypto.SSEAlgorithmAES256)
|
||||
}
|
||||
}
|
||||
|
||||
// get gateway encryption options
|
||||
var opts ObjectOptions
|
||||
opts, err = putOpts(ctx, r, bucket, object, metadata)
|
||||
|
@ -908,9 +913,11 @@ func (api objectAPIHandlers) DeleteBucketHandler(w http.ResponseWriter, r *http.
|
|||
}
|
||||
}
|
||||
|
||||
if _, ok := globalBucketObjectLockSys.Get(bucket); ok && forceDelete {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMethodNotAllowed), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
if forceDelete {
|
||||
if rcfg, _ := globalBucketObjectLockSys.Get(bucket); rcfg.LockEnabled {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMethodNotAllowed), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
deleteBucket := objectAPI.DeleteBucket
|
||||
|
@ -930,7 +937,7 @@ func (api objectAPIHandlers) DeleteBucketHandler(w http.ResponseWriter, r *http.
|
|||
}
|
||||
}
|
||||
|
||||
globalNotificationSys.DeleteBucket(ctx, bucket)
|
||||
globalNotificationSys.DeleteBucketMetadata(ctx, bucket)
|
||||
|
||||
// Write success response.
|
||||
writeSuccessNoContent(w)
|
||||
|
@ -1022,18 +1029,15 @@ func (api objectAPIHandlers) PutBucketObjectLockConfigHandler(w http.ResponseWri
|
|||
return
|
||||
}
|
||||
|
||||
if err = objectAPI.SetBucketObjectLockConfig(ctx, bucket, config); err != nil {
|
||||
configData, err := xml.Marshal(config)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
if config.Rule != nil {
|
||||
retention := config.ToRetention()
|
||||
globalBucketObjectLockSys.Set(bucket, retention)
|
||||
globalNotificationSys.PutBucketObjectLockConfig(ctx, bucket, retention)
|
||||
} else {
|
||||
globalBucketObjectLockSys.Set(bucket, &objectlock.Retention{})
|
||||
globalNotificationSys.PutBucketObjectLockConfig(ctx, bucket, &objectlock.Retention{})
|
||||
if err = globalBucketMetadataSys.Update(bucket, objectLockConfig, configData); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Write success response.
|
||||
|
@ -1058,20 +1062,18 @@ func (api objectAPIHandlers) GetBucketObjectLockConfigHandler(w http.ResponseWri
|
|||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// check if user has permissions to perform this operation
|
||||
if s3Error := checkRequestAuthType(ctx, r, policy.GetBucketObjectLockConfigurationAction, bucket, ""); s3Error != ErrNone {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
lkCfg, err := objectAPI.GetBucketObjectLockConfig(ctx, bucket)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
configData, err := xml.Marshal(lkCfg)
|
||||
configData, err := globalBucketMetadataSys.GetConfig(bucket, objectLockConfig)
|
||||
if err != nil {
|
||||
if errors.Is(err, errConfigNotFound) {
|
||||
err = BucketObjectLockConfigNotFound{Bucket: bucket}
|
||||
}
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
@ -1109,7 +1111,13 @@ func (api objectAPIHandlers) PutBucketTaggingHandler(w http.ResponseWriter, r *h
|
|||
return
|
||||
}
|
||||
|
||||
if err = objectAPI.SetBucketTagging(ctx, bucket, tags); err != nil {
|
||||
configData, err := xml.Marshal(tags)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
if err = globalBucketMetadataSys.Update(bucket, bucketTaggingConfigFile, configData); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
@ -1140,14 +1148,11 @@ func (api objectAPIHandlers) GetBucketTaggingHandler(w http.ResponseWriter, r *h
|
|||
return
|
||||
}
|
||||
|
||||
t, err := objectAPI.GetBucketTagging(ctx, bucket)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
configData, err := xml.Marshal(t)
|
||||
configData, err := globalBucketMetadataSys.GetConfig(bucket, bucketTaggingConfigFile)
|
||||
if err != nil {
|
||||
if errors.Is(err, errConfigNotFound) {
|
||||
err = BucketTaggingNotFound{Bucket: bucket}
|
||||
}
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
@ -1177,7 +1182,7 @@ func (api objectAPIHandlers) DeleteBucketTaggingHandler(w http.ResponseWriter, r
|
|||
return
|
||||
}
|
||||
|
||||
if err := objectAPI.DeleteBucketTagging(ctx, bucket); err != nil {
|
||||
if err := globalBucketMetadataSys.Update(bucket, bucketTaggingConfigFile, nil); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
|
|
@ -18,6 +18,7 @@ package cmd
|
|||
|
||||
import (
|
||||
"encoding/xml"
|
||||
"errors"
|
||||
"io"
|
||||
"net/http"
|
||||
|
||||
|
@ -78,13 +79,16 @@ func (api objectAPIHandlers) PutBucketLifecycleHandler(w http.ResponseWriter, r
|
|||
return
|
||||
}
|
||||
|
||||
if err = objAPI.SetBucketLifecycle(ctx, bucket, bucketLifecycle); err != nil {
|
||||
configData, err := xml.Marshal(bucketLifecycle)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
globalLifecycleSys.Set(bucket, bucketLifecycle)
|
||||
globalNotificationSys.SetBucketLifecycle(ctx, bucket, bucketLifecycle)
|
||||
if err = globalBucketMetadataSys.Update(bucket, bucketLifecycleConfig, configData); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Success.
|
||||
writeSuccessResponseHeadersOnly(w)
|
||||
|
@ -116,21 +120,17 @@ func (api objectAPIHandlers) GetBucketLifecycleHandler(w http.ResponseWriter, r
|
|||
return
|
||||
}
|
||||
|
||||
// Read bucket access lifecycle.
|
||||
bucketLifecycle, err := objAPI.GetBucketLifecycle(ctx, bucket)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
lifecycleData, err := xml.Marshal(bucketLifecycle)
|
||||
configData, err := globalBucketMetadataSys.GetConfig(bucket, bucketLifecycleConfig)
|
||||
if err != nil {
|
||||
if errors.Is(err, errConfigNotFound) {
|
||||
err = BucketLifecycleNotFound{Bucket: bucket}
|
||||
}
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Write lifecycle configuration to client.
|
||||
writeSuccessResponseXML(w, lifecycleData)
|
||||
writeSuccessResponseXML(w, configData)
|
||||
}
|
||||
|
||||
// DeleteBucketLifecycleHandler - This HTTP handler removes bucket lifecycle configuration.
|
||||
|
@ -159,14 +159,11 @@ func (api objectAPIHandlers) DeleteBucketLifecycleHandler(w http.ResponseWriter,
|
|||
return
|
||||
}
|
||||
|
||||
if err := objAPI.DeleteBucketLifecycle(ctx, bucket); err != nil {
|
||||
if err := globalBucketMetadataSys.Update(bucket, bucketLifecycleConfig, nil); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
globalLifecycleSys.Remove(bucket)
|
||||
globalNotificationSys.RemoveBucketLifecycle(ctx, bucket)
|
||||
|
||||
// Success.
|
||||
writeSuccessNoContent(w)
|
||||
}
|
|
@ -18,10 +18,7 @@ package cmd
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/xml"
|
||||
"path"
|
||||
"sync"
|
||||
"errors"
|
||||
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/bucket/lifecycle"
|
||||
|
@ -35,83 +32,32 @@ const (
|
|||
|
||||
// LifecycleSys - Bucket lifecycle subsystem.
|
||||
type LifecycleSys struct {
|
||||
sync.RWMutex
|
||||
bucketLifecycleMap map[string]*lifecycle.Lifecycle
|
||||
}
|
||||
|
||||
// Set - sets lifecycle config to given bucket name.
|
||||
func (sys *LifecycleSys) Set(bucketName string, lifecycle *lifecycle.Lifecycle) {
|
||||
if globalIsGateway {
|
||||
// no-op
|
||||
return
|
||||
}
|
||||
|
||||
sys.Lock()
|
||||
defer sys.Unlock()
|
||||
|
||||
sys.bucketLifecycleMap[bucketName] = lifecycle
|
||||
}
|
||||
|
||||
// Get - gets lifecycle config associated to a given bucket name.
|
||||
func (sys *LifecycleSys) Get(bucketName string) (lc *lifecycle.Lifecycle, ok bool) {
|
||||
func (sys *LifecycleSys) Get(bucketName string) (lc *lifecycle.Lifecycle, err error) {
|
||||
if globalIsGateway {
|
||||
// When gateway is enabled, no cached value
|
||||
// is used to validate life cycle policies.
|
||||
objAPI := newObjectLayerWithoutSafeModeFn()
|
||||
if objAPI == nil {
|
||||
return
|
||||
return nil, errServerNotInitialized
|
||||
}
|
||||
|
||||
l, err := objAPI.GetBucketLifecycle(GlobalContext, bucketName)
|
||||
return nil, BucketLifecycleNotFound{Bucket: bucketName}
|
||||
}
|
||||
|
||||
lc, ok := sys.bucketLifecycleMap[bucketName]
|
||||
if !ok {
|
||||
configData, err := globalBucketMetadataSys.GetConfig(bucketName, bucketLifecycleConfig)
|
||||
if err != nil {
|
||||
return
|
||||
if errors.Is(err, errConfigNotFound) {
|
||||
return nil, BucketLifecycleNotFound{Bucket: bucketName}
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
return l, true
|
||||
return lifecycle.ParseLifecycleConfig(bytes.NewReader(configData))
|
||||
}
|
||||
|
||||
sys.Lock()
|
||||
defer sys.Unlock()
|
||||
lc, ok = sys.bucketLifecycleMap[bucketName]
|
||||
return
|
||||
}
|
||||
|
||||
func saveLifecycleConfig(ctx context.Context, objAPI ObjectLayer, bucketName string, bucketLifecycle *lifecycle.Lifecycle) error {
|
||||
data, err := xml.Marshal(bucketLifecycle)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Construct path to lifecycle.xml for the given bucket.
|
||||
configFile := path.Join(bucketConfigPrefix, bucketName, bucketLifecycleConfig)
|
||||
return saveConfig(ctx, objAPI, configFile, data)
|
||||
}
|
||||
|
||||
// getLifecycleConfig - get lifecycle config for given bucket name.
|
||||
func getLifecycleConfig(objAPI ObjectLayer, bucketName string) (*lifecycle.Lifecycle, error) {
|
||||
// Construct path to lifecycle.xml for the given bucket.
|
||||
configFile := path.Join(bucketConfigPrefix, bucketName, bucketLifecycleConfig)
|
||||
configData, err := readConfig(GlobalContext, objAPI, configFile)
|
||||
if err != nil {
|
||||
if err == errConfigNotFound {
|
||||
err = BucketLifecycleNotFound{Bucket: bucketName}
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return lifecycle.ParseLifecycleConfig(bytes.NewReader(configData))
|
||||
}
|
||||
|
||||
func removeLifecycleConfig(ctx context.Context, objAPI ObjectLayer, bucketName string) error {
|
||||
// Construct path to lifecycle.xml for the given bucket.
|
||||
configFile := path.Join(bucketConfigPrefix, bucketName, bucketLifecycleConfig)
|
||||
|
||||
if err := deleteConfig(ctx, objAPI, configFile); err != nil {
|
||||
if err == errConfigNotFound {
|
||||
return BucketLifecycleNotFound{Bucket: bucketName}
|
||||
}
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
return lc, nil
|
||||
}
|
||||
|
||||
// NewLifecycleSys - creates new lifecycle system.
|
||||
|
@ -140,31 +86,21 @@ func (sys *LifecycleSys) Init(buckets []BucketInfo, objAPI ObjectLayer) error {
|
|||
// Loads lifecycle policies for all buckets into LifecycleSys.
|
||||
func (sys *LifecycleSys) load(buckets []BucketInfo, objAPI ObjectLayer) error {
|
||||
for _, bucket := range buckets {
|
||||
config, err := getLifecycleConfig(objAPI, bucket.Name)
|
||||
configData, err := globalBucketMetadataSys.GetConfig(bucket.Name, bucketLifecycleConfig)
|
||||
if err != nil {
|
||||
if _, ok := err.(BucketLifecycleNotFound); ok {
|
||||
if errors.Is(err, errConfigNotFound) {
|
||||
continue
|
||||
}
|
||||
// Quorum errors should be returned.
|
||||
return err
|
||||
}
|
||||
|
||||
// Do not load the lifecycle configuration if it is not valid
|
||||
err = config.Validate()
|
||||
config, err := lifecycle.ParseLifecycleConfig(bytes.NewReader(configData))
|
||||
if err != nil {
|
||||
logger.LogIf(context.Background(), err)
|
||||
logger.LogIf(GlobalContext, err)
|
||||
continue
|
||||
}
|
||||
|
||||
sys.Set(bucket.Name, config)
|
||||
sys.bucketLifecycleMap[bucket.Name] = config
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Remove - removes lifecycle config for given bucket name.
|
||||
func (sys *LifecycleSys) Remove(bucketName string) {
|
||||
sys.Lock()
|
||||
defer sys.Unlock()
|
||||
|
||||
delete(sys.bucketLifecycleMap, bucketName)
|
||||
}
|
||||
|
|
|
@ -1,165 +0,0 @@
|
|||
/*
|
||||
* MinIO Cloud Storage, (C) 2020 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"path"
|
||||
"time"
|
||||
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
)
|
||||
|
||||
const (
|
||||
legacyBucketObjectLockEnabledConfigFile = "object-lock-enabled.json"
|
||||
legacyBucketObjectLockEnabledConfig = `{"x-amz-bucket-object-lock-enabled":true}`
|
||||
|
||||
bucketMetadataFile = ".metadata.bin"
|
||||
bucketMetadataFormat = 1
|
||||
bucketMetadataVersion = 1
|
||||
)
|
||||
|
||||
//go:generate msgp -file $GOFILE -unexported
|
||||
|
||||
// bucketMetadata contains bucket metadata.
|
||||
// When adding/removing fields, regenerate the marshal code using the go generate above.
|
||||
// Only changing meaning of fields requires a version bump.
|
||||
// bucketMetadataFormat refers to the format.
|
||||
// bucketMetadataVersion can be used to track a rolling upgrade of a field.
|
||||
type bucketMetadata struct {
|
||||
Name string
|
||||
Created time.Time
|
||||
LockEnabled bool
|
||||
}
|
||||
|
||||
// newBucketMetadata creates bucketMetadata with the supplied name and Created to Now.
|
||||
func newBucketMetadata(name string) bucketMetadata {
|
||||
return bucketMetadata{
|
||||
Name: name,
|
||||
Created: UTCNow(),
|
||||
}
|
||||
}
|
||||
|
||||
// loadBucketMeta loads the metadata of bucket by name from ObjectLayer o.
|
||||
// If an error is returned the returned metadata will be default initialized.
|
||||
func loadBucketMeta(ctx context.Context, o ObjectLayer, name string) (bucketMetadata, error) {
|
||||
b := newBucketMetadata(name)
|
||||
configFile := path.Join(bucketConfigPrefix, name, bucketMetadataFile)
|
||||
data, err := readConfig(ctx, o, configFile)
|
||||
if err != nil {
|
||||
return b, err
|
||||
}
|
||||
if len(data) <= 4 {
|
||||
return b, fmt.Errorf("loadBucketMetadata: no data")
|
||||
}
|
||||
// Read header
|
||||
switch binary.LittleEndian.Uint16(data[0:2]) {
|
||||
case bucketMetadataFormat:
|
||||
default:
|
||||
return b, fmt.Errorf("loadBucketMetadata: unknown format: %d", binary.LittleEndian.Uint16(data[0:2]))
|
||||
}
|
||||
switch binary.LittleEndian.Uint16(data[2:4]) {
|
||||
case bucketMetadataVersion:
|
||||
default:
|
||||
return b, fmt.Errorf("loadBucketMetadata: unknown version: %d", binary.LittleEndian.Uint16(data[2:4]))
|
||||
}
|
||||
|
||||
// OK, parse data.
|
||||
_, err = b.UnmarshalMsg(data[4:])
|
||||
return b, err
|
||||
}
|
||||
|
||||
var errMetaDataConverted = errors.New("metadata converted")
|
||||
|
||||
// loadBucketMetadata loads and migrates to bucket metadata.
|
||||
func loadBucketMetadata(ctx context.Context, objectAPI ObjectLayer, bucket string) (bucketMetadata, error) {
|
||||
meta, err := loadBucketMeta(ctx, objectAPI, bucket)
|
||||
if err == nil {
|
||||
return meta, nil
|
||||
}
|
||||
|
||||
if err != errConfigNotFound {
|
||||
return meta, err
|
||||
}
|
||||
|
||||
// Control here means old bucket without bucket metadata. Hence we migrate existing settings.
|
||||
if err = meta.convertLegacyLockconfig(ctx, objectAPI); err != nil {
|
||||
return meta, err
|
||||
}
|
||||
|
||||
return meta, errMetaDataConverted
|
||||
}
|
||||
|
||||
func (b *bucketMetadata) convertLegacyLockconfig(ctx context.Context, objectAPI ObjectLayer) error {
|
||||
configFile := path.Join(bucketConfigPrefix, b.Name, legacyBucketObjectLockEnabledConfigFile)
|
||||
|
||||
save := func() error {
|
||||
if err := b.save(ctx, objectAPI); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := deleteConfig(ctx, objectAPI, configFile); err != nil && !errors.Is(err, errConfigNotFound) {
|
||||
logger.LogIf(ctx, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
configData, err := readConfig(ctx, objectAPI, configFile)
|
||||
if err != nil {
|
||||
if !errors.Is(err, errConfigNotFound) {
|
||||
return err
|
||||
}
|
||||
|
||||
return save()
|
||||
}
|
||||
|
||||
if string(configData) != legacyBucketObjectLockEnabledConfig {
|
||||
return fmt.Errorf("content mismatch in config file %v", configFile)
|
||||
}
|
||||
|
||||
b.LockEnabled = true
|
||||
return save()
|
||||
}
|
||||
|
||||
// save config to supplied ObjectLayer o.
|
||||
func (b *bucketMetadata) save(ctx context.Context, o ObjectLayer) error {
|
||||
data := make([]byte, 4, b.Msgsize()+4)
|
||||
// Put header
|
||||
binary.LittleEndian.PutUint16(data[0:2], bucketMetadataFormat)
|
||||
binary.LittleEndian.PutUint16(data[2:4], bucketMetadataVersion)
|
||||
|
||||
// Add data
|
||||
data, err := b.MarshalMsg(data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
configFile := path.Join(bucketConfigPrefix, b.Name, bucketMetadataFile)
|
||||
return saveConfig(ctx, o, configFile, data)
|
||||
}
|
||||
|
||||
// removeBucketMeta bucket metadata.
|
||||
// If config does not exist no error is returned.
|
||||
func removeBucketMeta(ctx context.Context, obj ObjectLayer, bucket string) error {
|
||||
configFile := path.Join(bucketConfigPrefix, bucket, bucketMetadataFile)
|
||||
if err := deleteConfig(ctx, obj, configFile); err != nil && err != errConfigNotFound {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
|
@ -1,160 +0,0 @@
|
|||
package cmd
|
||||
|
||||
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
|
||||
|
||||
import (
|
||||
"github.com/tinylib/msgp/msgp"
|
||||
)
|
||||
|
||||
// DecodeMsg implements msgp.Decodable
|
||||
func (z *bucketMetadata) DecodeMsg(dc *msgp.Reader) (err error) {
|
||||
var field []byte
|
||||
_ = field
|
||||
var zb0001 uint32
|
||||
zb0001, err = dc.ReadMapHeader()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
for zb0001 > 0 {
|
||||
zb0001--
|
||||
field, err = dc.ReadMapKeyPtr()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
switch msgp.UnsafeString(field) {
|
||||
case "Name":
|
||||
z.Name, err = dc.ReadString()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Name")
|
||||
return
|
||||
}
|
||||
case "Created":
|
||||
z.Created, err = dc.ReadTime()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Created")
|
||||
return
|
||||
}
|
||||
case "LockEnabled":
|
||||
z.LockEnabled, err = dc.ReadBool()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "LockEnabled")
|
||||
return
|
||||
}
|
||||
default:
|
||||
err = dc.Skip()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// EncodeMsg implements msgp.Encodable
|
||||
func (z bucketMetadata) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
// map header, size 3
|
||||
// write "Name"
|
||||
err = en.Append(0x83, 0xa4, 0x4e, 0x61, 0x6d, 0x65)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteString(z.Name)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Name")
|
||||
return
|
||||
}
|
||||
// write "Created"
|
||||
err = en.Append(0xa7, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteTime(z.Created)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Created")
|
||||
return
|
||||
}
|
||||
// write "LockEnabled"
|
||||
err = en.Append(0xab, 0x4c, 0x6f, 0x63, 0x6b, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteBool(z.LockEnabled)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "LockEnabled")
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// MarshalMsg implements msgp.Marshaler
|
||||
func (z bucketMetadata) MarshalMsg(b []byte) (o []byte, err error) {
|
||||
o = msgp.Require(b, z.Msgsize())
|
||||
// map header, size 3
|
||||
// string "Name"
|
||||
o = append(o, 0x83, 0xa4, 0x4e, 0x61, 0x6d, 0x65)
|
||||
o = msgp.AppendString(o, z.Name)
|
||||
// string "Created"
|
||||
o = append(o, 0xa7, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64)
|
||||
o = msgp.AppendTime(o, z.Created)
|
||||
// string "LockEnabled"
|
||||
o = append(o, 0xab, 0x4c, 0x6f, 0x63, 0x6b, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64)
|
||||
o = msgp.AppendBool(o, z.LockEnabled)
|
||||
return
|
||||
}
|
||||
|
||||
// UnmarshalMsg implements msgp.Unmarshaler
|
||||
func (z *bucketMetadata) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
||||
var field []byte
|
||||
_ = field
|
||||
var zb0001 uint32
|
||||
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
for zb0001 > 0 {
|
||||
zb0001--
|
||||
field, bts, err = msgp.ReadMapKeyZC(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
switch msgp.UnsafeString(field) {
|
||||
case "Name":
|
||||
z.Name, bts, err = msgp.ReadStringBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Name")
|
||||
return
|
||||
}
|
||||
case "Created":
|
||||
z.Created, bts, err = msgp.ReadTimeBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Created")
|
||||
return
|
||||
}
|
||||
case "LockEnabled":
|
||||
z.LockEnabled, bts, err = msgp.ReadBoolBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "LockEnabled")
|
||||
return
|
||||
}
|
||||
default:
|
||||
bts, err = msgp.Skip(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
o = bts
|
||||
return
|
||||
}
|
||||
|
||||
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
|
||||
func (z bucketMetadata) Msgsize() (s int) {
|
||||
s = 1 + 5 + msgp.StringPrefixSize + len(z.Name) + 8 + msgp.TimeSize + 12 + msgp.BoolSize
|
||||
return
|
||||
}
|
|
@ -0,0 +1,245 @@
|
|||
/*
|
||||
* MinIO Cloud Storage, (C) 2020 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"github.com/minio/minio/pkg/bucket/policy"
|
||||
)
|
||||
|
||||
// BucketMetadataSys captures all bucket metadata for a given cluster.
|
||||
type BucketMetadataSys struct {
|
||||
sync.RWMutex
|
||||
metadataMap map[string]BucketMetadata
|
||||
|
||||
// Do fallback to old config when unable to find a bucket config
|
||||
fallback bool
|
||||
}
|
||||
|
||||
// Remove bucket metadata from memory.
|
||||
func (sys *BucketMetadataSys) Remove(bucket string) {
|
||||
if globalIsGateway {
|
||||
return
|
||||
}
|
||||
sys.Lock()
|
||||
delete(sys.metadataMap, bucket)
|
||||
sys.Unlock()
|
||||
}
|
||||
|
||||
// Set - sets a new metadata in-memory.
|
||||
// Only a shallow copy is saved and fields with references
|
||||
// cannot be modified without causing a race condition,
|
||||
// so they should be replaced atomically and not appended to, etc.
|
||||
// Data is not persisted to disk.
|
||||
func (sys *BucketMetadataSys) Set(bucket string, meta BucketMetadata) {
|
||||
if globalIsGateway {
|
||||
return
|
||||
}
|
||||
|
||||
if bucket != minioMetaBucket {
|
||||
sys.Lock()
|
||||
sys.metadataMap[bucket] = meta
|
||||
sys.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
// Update update bucket metadata for the specified config file.
|
||||
// The configData data should not be modified after being sent here.
|
||||
func (sys *BucketMetadataSys) Update(bucket string, configFile string, configData []byte) error {
|
||||
objAPI := newObjectLayerWithoutSafeModeFn()
|
||||
if objAPI == nil {
|
||||
return errServerNotInitialized
|
||||
}
|
||||
|
||||
if globalIsGateway {
|
||||
if configFile == bucketPolicyConfig {
|
||||
config, err := policy.ParseConfig(bytes.NewReader(configData), bucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return objAPI.SetBucketPolicy(GlobalContext, bucket, config)
|
||||
}
|
||||
return NotImplemented{}
|
||||
}
|
||||
|
||||
if bucket == minioMetaBucket {
|
||||
return errInvalidArgument
|
||||
}
|
||||
|
||||
defer globalNotificationSys.LoadBucketMetadata(GlobalContext, bucket)
|
||||
|
||||
sys.Lock()
|
||||
meta, ok := sys.metadataMap[bucket]
|
||||
sys.Unlock()
|
||||
if !ok {
|
||||
return BucketNotFound{Bucket: bucket}
|
||||
}
|
||||
|
||||
switch configFile {
|
||||
case bucketPolicyConfig:
|
||||
meta.PolicyConfigJSON = configData
|
||||
case bucketNotificationConfig:
|
||||
meta.NotificationXML = configData
|
||||
case bucketLifecycleConfig:
|
||||
meta.LifecycleConfigXML = configData
|
||||
case bucketSSEConfig:
|
||||
meta.EncryptionConfigXML = configData
|
||||
case bucketTaggingConfigFile:
|
||||
meta.TaggingConfigXML = configData
|
||||
case objectLockConfig:
|
||||
meta.ObjectLockConfigurationXML = configData
|
||||
case bucketQuotaConfigFile:
|
||||
meta.QuotaConfigJSON = configData
|
||||
default:
|
||||
return fmt.Errorf("Unknown bucket %s metadata update requested %s", bucket, configFile)
|
||||
}
|
||||
|
||||
if err := meta.Save(GlobalContext, objAPI); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
sys.Lock()
|
||||
sys.metadataMap[bucket] = meta
|
||||
sys.Unlock()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get metadata for a bucket.
|
||||
// If no metadata exists errConfigNotFound is returned and a new metadata is returned.
|
||||
// Only a shallow copy is returned, so referenced data should not be modified,
|
||||
// but can be replaced atomically.
|
||||
func (sys *BucketMetadataSys) Get(bucket string) (BucketMetadata, error) {
|
||||
if globalIsGateway || bucket == minioMetaBucket {
|
||||
return newBucketMetadata(bucket), errConfigNotFound
|
||||
}
|
||||
|
||||
sys.RLock()
|
||||
defer sys.RUnlock()
|
||||
|
||||
meta, ok := sys.metadataMap[bucket]
|
||||
if !ok {
|
||||
return newBucketMetadata(bucket), errConfigNotFound
|
||||
}
|
||||
|
||||
return meta, nil
|
||||
}
|
||||
|
||||
// GetConfig returns a specific configuration from the bucket metadata.
|
||||
// The returned data should be copied before being modified.
|
||||
func (sys *BucketMetadataSys) GetConfig(bucket string, configFile string) ([]byte, error) {
|
||||
if globalIsGateway {
|
||||
return nil, NotImplemented{}
|
||||
}
|
||||
|
||||
if bucket == minioMetaBucket {
|
||||
return nil, errInvalidArgument
|
||||
}
|
||||
|
||||
sys.RLock()
|
||||
defer sys.RUnlock()
|
||||
|
||||
meta, ok := sys.metadataMap[bucket]
|
||||
if !ok {
|
||||
if !sys.fallback {
|
||||
return nil, errConfigNotFound
|
||||
}
|
||||
objAPI := newObjectLayerWithoutSafeModeFn()
|
||||
if objAPI == nil {
|
||||
return nil, errServerNotInitialized
|
||||
}
|
||||
var err error
|
||||
meta, err = loadBucketMetadata(GlobalContext, objAPI, bucket)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
sys.metadataMap[bucket] = meta
|
||||
}
|
||||
|
||||
var configData []byte
|
||||
switch configFile {
|
||||
case bucketPolicyConfig:
|
||||
configData = meta.PolicyConfigJSON
|
||||
case bucketNotificationConfig:
|
||||
configData = meta.NotificationXML
|
||||
case bucketLifecycleConfig:
|
||||
configData = meta.LifecycleConfigXML
|
||||
case bucketSSEConfig:
|
||||
configData = meta.EncryptionConfigXML
|
||||
case bucketTaggingConfigFile:
|
||||
configData = meta.TaggingConfigXML
|
||||
case objectLockConfig:
|
||||
configData = meta.ObjectLockConfigurationXML
|
||||
case bucketQuotaConfigFile:
|
||||
configData = meta.QuotaConfigJSON
|
||||
default:
|
||||
return nil, fmt.Errorf("Unknown bucket %s metadata update requested %s", bucket, configFile)
|
||||
}
|
||||
|
||||
if len(configData) == 0 {
|
||||
return nil, errConfigNotFound
|
||||
}
|
||||
|
||||
return configData, nil
|
||||
}
|
||||
|
||||
// Init - initializes bucket metadata system for all buckets.
|
||||
func (sys *BucketMetadataSys) Init(ctx context.Context, buckets []BucketInfo, objAPI ObjectLayer) error {
|
||||
if objAPI == nil {
|
||||
return errServerNotInitialized
|
||||
}
|
||||
|
||||
// In gateway mode, we don't need to load the policies
|
||||
// from the backend.
|
||||
if globalIsGateway {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Load PolicySys once during boot.
|
||||
return sys.load(ctx, buckets, objAPI)
|
||||
}
|
||||
|
||||
// Loads bucket metadata for all buckets into BucketMetadataSys.
|
||||
// When if done successfully fallback will be disabled.
|
||||
func (sys *BucketMetadataSys) load(ctx context.Context, buckets []BucketInfo, objAPI ObjectLayer) error {
|
||||
sys.Lock()
|
||||
defer sys.Unlock()
|
||||
|
||||
for _, bucket := range buckets {
|
||||
meta, err := loadBucketMetadata(ctx, objAPI, bucket.Name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
sys.metadataMap[bucket.Name] = meta
|
||||
}
|
||||
sys.fallback = false
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewBucketMetadataSys - creates new policy system.
|
||||
func NewBucketMetadataSys() *BucketMetadataSys {
|
||||
return &BucketMetadataSys{
|
||||
metadataMap: make(map[string]BucketMetadata),
|
||||
|
||||
// Do fallback until all buckets have been loaded.
|
||||
fallback: true,
|
||||
}
|
||||
}
|
|
@ -0,0 +1,221 @@
|
|||
/*
|
||||
* MinIO Cloud Storage, (C) 2020 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"path"
|
||||
"time"
|
||||
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
)
|
||||
|
||||
const (
|
||||
legacyBucketObjectLockEnabledConfigFile = "object-lock-enabled.json"
|
||||
legacyBucketObjectLockEnabledConfig = `{"x-amz-bucket-object-lock-enabled":true}`
|
||||
|
||||
bucketMetadataFile = ".metadata.bin"
|
||||
bucketMetadataFormat = 1
|
||||
bucketMetadataVersion = 1
|
||||
)
|
||||
|
||||
var (
|
||||
defaultBucketObjectLockConfig = []byte(`<ObjectLockConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/"><ObjectLockEnabled>Enabled</ObjectLockEnabled></ObjectLockConfiguration>`)
|
||||
)
|
||||
|
||||
//go:generate msgp -file $GOFILE
|
||||
|
||||
// BucketMetadata contains bucket metadata.
|
||||
// When adding/removing fields, regenerate the marshal code using the go generate above.
|
||||
// Only changing meaning of fields requires a version bump.
|
||||
// bucketMetadataFormat refers to the format.
|
||||
// bucketMetadataVersion can be used to track a rolling upgrade of a field.
|
||||
type BucketMetadata struct {
|
||||
Name string
|
||||
Created time.Time
|
||||
LockEnabled bool // legacy not used anymore.
|
||||
PolicyConfigJSON []byte
|
||||
NotificationXML []byte
|
||||
LifecycleConfigXML []byte
|
||||
ObjectLockConfigurationXML []byte
|
||||
EncryptionConfigXML []byte
|
||||
TaggingConfigXML []byte
|
||||
QuotaConfigJSON []byte
|
||||
}
|
||||
|
||||
// newBucketMetadata creates BucketMetadata with the supplied name and Created to Now.
|
||||
func newBucketMetadata(name string) BucketMetadata {
|
||||
return BucketMetadata{
|
||||
Name: name,
|
||||
Created: UTCNow(),
|
||||
}
|
||||
}
|
||||
|
||||
// Load - loads the metadata of bucket by name from ObjectLayer api.
|
||||
// If an error is returned the returned metadata will be default initialized.
|
||||
func (b *BucketMetadata) Load(ctx context.Context, api ObjectLayer, name string) error {
|
||||
configFile := path.Join(bucketConfigPrefix, name, bucketMetadataFile)
|
||||
data, err := readConfig(ctx, api, configFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(data) <= 4 {
|
||||
return fmt.Errorf("loadBucketMetadata: no data")
|
||||
}
|
||||
// Read header
|
||||
switch binary.LittleEndian.Uint16(data[0:2]) {
|
||||
case bucketMetadataFormat:
|
||||
default:
|
||||
return fmt.Errorf("loadBucketMetadata: unknown format: %d", binary.LittleEndian.Uint16(data[0:2]))
|
||||
}
|
||||
switch binary.LittleEndian.Uint16(data[2:4]) {
|
||||
case bucketMetadataVersion:
|
||||
default:
|
||||
return fmt.Errorf("loadBucketMetadata: unknown version: %d", binary.LittleEndian.Uint16(data[2:4]))
|
||||
}
|
||||
|
||||
// OK, parse data.
|
||||
_, err = b.UnmarshalMsg(data[4:])
|
||||
return err
|
||||
}
|
||||
|
||||
// loadBucketMetadata loads and migrates to bucket metadata.
|
||||
func loadBucketMetadata(ctx context.Context, objectAPI ObjectLayer, bucket string) (BucketMetadata, error) {
|
||||
b := newBucketMetadata(bucket)
|
||||
err := b.Load(ctx, objectAPI, bucket)
|
||||
if err == nil {
|
||||
return b, b.convertLegacyConfigs(ctx, objectAPI)
|
||||
}
|
||||
|
||||
if err != errConfigNotFound {
|
||||
return b, err
|
||||
}
|
||||
|
||||
// Old bucket without bucket metadata. Hence we migrate existing settings.
|
||||
return b, b.convertLegacyConfigs(ctx, objectAPI)
|
||||
}
|
||||
|
||||
func (b *BucketMetadata) convertLegacyConfigs(ctx context.Context, objectAPI ObjectLayer) error {
|
||||
legacyConfigs := []string{
|
||||
legacyBucketObjectLockEnabledConfigFile,
|
||||
bucketPolicyConfig,
|
||||
bucketNotificationConfig,
|
||||
bucketLifecycleConfig,
|
||||
bucketQuotaConfigFile,
|
||||
bucketSSEConfig,
|
||||
bucketTaggingConfigFile,
|
||||
objectLockConfig,
|
||||
}
|
||||
|
||||
configs := make(map[string][]byte)
|
||||
|
||||
// Handle migration from lockEnabled to newer format.
|
||||
if b.LockEnabled {
|
||||
configs[objectLockConfig] = defaultBucketObjectLockConfig
|
||||
b.LockEnabled = false // legacy value unset it
|
||||
// we are only interested in b.ObjectLockConfigurationXML or objectLockConfig value
|
||||
}
|
||||
|
||||
for _, legacyFile := range legacyConfigs {
|
||||
configFile := path.Join(bucketConfigPrefix, b.Name, legacyFile)
|
||||
|
||||
configData, err := readConfig(ctx, objectAPI, configFile)
|
||||
if err != nil {
|
||||
if errors.Is(err, errConfigNotFound) {
|
||||
// legacy file config not found, proceed to look for new metadata.
|
||||
continue
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
configs[legacyFile] = configData
|
||||
}
|
||||
|
||||
if len(configs) == 0 {
|
||||
// nothing to update, return right away.
|
||||
return nil
|
||||
}
|
||||
|
||||
for legacyFile, configData := range configs {
|
||||
switch legacyFile {
|
||||
case legacyBucketObjectLockEnabledConfigFile:
|
||||
if string(configData) == legacyBucketObjectLockEnabledConfig {
|
||||
b.ObjectLockConfigurationXML = defaultBucketObjectLockConfig
|
||||
b.LockEnabled = false // legacy value unset it
|
||||
// we are only interested in b.ObjectLockConfigurationXML
|
||||
}
|
||||
case bucketPolicyConfig:
|
||||
b.PolicyConfigJSON = configData
|
||||
case bucketNotificationConfig:
|
||||
b.NotificationXML = configData
|
||||
case bucketLifecycleConfig:
|
||||
b.LifecycleConfigXML = configData
|
||||
case bucketSSEConfig:
|
||||
b.EncryptionConfigXML = configData
|
||||
case bucketTaggingConfigFile:
|
||||
b.TaggingConfigXML = configData
|
||||
case objectLockConfig:
|
||||
b.ObjectLockConfigurationXML = configData
|
||||
case bucketQuotaConfigFile:
|
||||
b.QuotaConfigJSON = configData
|
||||
}
|
||||
}
|
||||
|
||||
if err := b.Save(ctx, objectAPI); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for legacyFile := range configs {
|
||||
configFile := path.Join(bucketConfigPrefix, b.Name, legacyFile)
|
||||
if err := deleteConfig(ctx, objectAPI, configFile); err != nil && !errors.Is(err, errConfigNotFound) {
|
||||
logger.LogIf(ctx, err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Save config to supplied ObjectLayer api.
|
||||
func (b *BucketMetadata) Save(ctx context.Context, api ObjectLayer) error {
|
||||
data := make([]byte, 4, b.Msgsize()+4)
|
||||
|
||||
// Initialize the header.
|
||||
binary.LittleEndian.PutUint16(data[0:2], bucketMetadataFormat)
|
||||
binary.LittleEndian.PutUint16(data[2:4], bucketMetadataVersion)
|
||||
|
||||
// Marshal the bucket metadata
|
||||
data, err := b.MarshalMsg(data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
configFile := path.Join(bucketConfigPrefix, b.Name, bucketMetadataFile)
|
||||
return saveConfig(ctx, api, configFile, data)
|
||||
}
|
||||
|
||||
// deleteBucketMetadata deletes bucket metadata
|
||||
// If config does not exist no error is returned.
|
||||
func deleteBucketMetadata(ctx context.Context, obj ObjectLayer, bucket string) error {
|
||||
configFile := path.Join(bucketConfigPrefix, bucket, bucketMetadataFile)
|
||||
if err := deleteConfig(ctx, obj, configFile); err != nil && err != errConfigNotFound {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,335 @@
|
|||
package cmd
|
||||
|
||||
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
|
||||
|
||||
import (
|
||||
"github.com/tinylib/msgp/msgp"
|
||||
)
|
||||
|
||||
// DecodeMsg implements msgp.Decodable
|
||||
func (z *BucketMetadata) DecodeMsg(dc *msgp.Reader) (err error) {
|
||||
var field []byte
|
||||
_ = field
|
||||
var zb0001 uint32
|
||||
zb0001, err = dc.ReadMapHeader()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
for zb0001 > 0 {
|
||||
zb0001--
|
||||
field, err = dc.ReadMapKeyPtr()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
switch msgp.UnsafeString(field) {
|
||||
case "Name":
|
||||
z.Name, err = dc.ReadString()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Name")
|
||||
return
|
||||
}
|
||||
case "Created":
|
||||
z.Created, err = dc.ReadTime()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Created")
|
||||
return
|
||||
}
|
||||
case "LockEnabled":
|
||||
z.LockEnabled, err = dc.ReadBool()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "LockEnabled")
|
||||
return
|
||||
}
|
||||
case "PolicyConfigJSON":
|
||||
z.PolicyConfigJSON, err = dc.ReadBytes(z.PolicyConfigJSON)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "PolicyConfigJSON")
|
||||
return
|
||||
}
|
||||
case "NotificationXML":
|
||||
z.NotificationXML, err = dc.ReadBytes(z.NotificationXML)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "NotificationXML")
|
||||
return
|
||||
}
|
||||
case "LifecycleConfigXML":
|
||||
z.LifecycleConfigXML, err = dc.ReadBytes(z.LifecycleConfigXML)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "LifecycleConfigXML")
|
||||
return
|
||||
}
|
||||
case "ObjectLockConfigurationXML":
|
||||
z.ObjectLockConfigurationXML, err = dc.ReadBytes(z.ObjectLockConfigurationXML)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ObjectLockConfigurationXML")
|
||||
return
|
||||
}
|
||||
case "EncryptionConfigXML":
|
||||
z.EncryptionConfigXML, err = dc.ReadBytes(z.EncryptionConfigXML)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "EncryptionConfigXML")
|
||||
return
|
||||
}
|
||||
case "TaggingConfigXML":
|
||||
z.TaggingConfigXML, err = dc.ReadBytes(z.TaggingConfigXML)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "TaggingConfigXML")
|
||||
return
|
||||
}
|
||||
case "QuotaConfigJSON":
|
||||
z.QuotaConfigJSON, err = dc.ReadBytes(z.QuotaConfigJSON)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "QuotaConfigJSON")
|
||||
return
|
||||
}
|
||||
default:
|
||||
err = dc.Skip()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// EncodeMsg implements msgp.Encodable
|
||||
func (z *BucketMetadata) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
// map header, size 10
|
||||
// write "Name"
|
||||
err = en.Append(0x8a, 0xa4, 0x4e, 0x61, 0x6d, 0x65)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteString(z.Name)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Name")
|
||||
return
|
||||
}
|
||||
// write "Created"
|
||||
err = en.Append(0xa7, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteTime(z.Created)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Created")
|
||||
return
|
||||
}
|
||||
// write "LockEnabled"
|
||||
err = en.Append(0xab, 0x4c, 0x6f, 0x63, 0x6b, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteBool(z.LockEnabled)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "LockEnabled")
|
||||
return
|
||||
}
|
||||
// write "PolicyConfigJSON"
|
||||
err = en.Append(0xb0, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x4a, 0x53, 0x4f, 0x4e)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteBytes(z.PolicyConfigJSON)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "PolicyConfigJSON")
|
||||
return
|
||||
}
|
||||
// write "NotificationXML"
|
||||
err = en.Append(0xaf, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x58, 0x4d, 0x4c)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteBytes(z.NotificationXML)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "NotificationXML")
|
||||
return
|
||||
}
|
||||
// write "LifecycleConfigXML"
|
||||
err = en.Append(0xb2, 0x4c, 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x58, 0x4d, 0x4c)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteBytes(z.LifecycleConfigXML)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "LifecycleConfigXML")
|
||||
return
|
||||
}
|
||||
// write "ObjectLockConfigurationXML"
|
||||
err = en.Append(0xba, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x4c, 0x6f, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x58, 0x4d, 0x4c)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteBytes(z.ObjectLockConfigurationXML)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ObjectLockConfigurationXML")
|
||||
return
|
||||
}
|
||||
// write "EncryptionConfigXML"
|
||||
err = en.Append(0xb3, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x58, 0x4d, 0x4c)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteBytes(z.EncryptionConfigXML)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "EncryptionConfigXML")
|
||||
return
|
||||
}
|
||||
// write "TaggingConfigXML"
|
||||
err = en.Append(0xb0, 0x54, 0x61, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x58, 0x4d, 0x4c)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteBytes(z.TaggingConfigXML)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "TaggingConfigXML")
|
||||
return
|
||||
}
|
||||
// write "QuotaConfigJSON"
|
||||
err = en.Append(0xaf, 0x51, 0x75, 0x6f, 0x74, 0x61, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x4a, 0x53, 0x4f, 0x4e)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteBytes(z.QuotaConfigJSON)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "QuotaConfigJSON")
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// MarshalMsg implements msgp.Marshaler
|
||||
func (z *BucketMetadata) MarshalMsg(b []byte) (o []byte, err error) {
|
||||
o = msgp.Require(b, z.Msgsize())
|
||||
// map header, size 10
|
||||
// string "Name"
|
||||
o = append(o, 0x8a, 0xa4, 0x4e, 0x61, 0x6d, 0x65)
|
||||
o = msgp.AppendString(o, z.Name)
|
||||
// string "Created"
|
||||
o = append(o, 0xa7, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64)
|
||||
o = msgp.AppendTime(o, z.Created)
|
||||
// string "LockEnabled"
|
||||
o = append(o, 0xab, 0x4c, 0x6f, 0x63, 0x6b, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64)
|
||||
o = msgp.AppendBool(o, z.LockEnabled)
|
||||
// string "PolicyConfigJSON"
|
||||
o = append(o, 0xb0, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x4a, 0x53, 0x4f, 0x4e)
|
||||
o = msgp.AppendBytes(o, z.PolicyConfigJSON)
|
||||
// string "NotificationXML"
|
||||
o = append(o, 0xaf, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x58, 0x4d, 0x4c)
|
||||
o = msgp.AppendBytes(o, z.NotificationXML)
|
||||
// string "LifecycleConfigXML"
|
||||
o = append(o, 0xb2, 0x4c, 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x58, 0x4d, 0x4c)
|
||||
o = msgp.AppendBytes(o, z.LifecycleConfigXML)
|
||||
// string "ObjectLockConfigurationXML"
|
||||
o = append(o, 0xba, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x4c, 0x6f, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x58, 0x4d, 0x4c)
|
||||
o = msgp.AppendBytes(o, z.ObjectLockConfigurationXML)
|
||||
// string "EncryptionConfigXML"
|
||||
o = append(o, 0xb3, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x58, 0x4d, 0x4c)
|
||||
o = msgp.AppendBytes(o, z.EncryptionConfigXML)
|
||||
// string "TaggingConfigXML"
|
||||
o = append(o, 0xb0, 0x54, 0x61, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x58, 0x4d, 0x4c)
|
||||
o = msgp.AppendBytes(o, z.TaggingConfigXML)
|
||||
// string "QuotaConfigJSON"
|
||||
o = append(o, 0xaf, 0x51, 0x75, 0x6f, 0x74, 0x61, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x4a, 0x53, 0x4f, 0x4e)
|
||||
o = msgp.AppendBytes(o, z.QuotaConfigJSON)
|
||||
return
|
||||
}
|
||||
|
||||
// UnmarshalMsg implements msgp.Unmarshaler
|
||||
func (z *BucketMetadata) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
||||
var field []byte
|
||||
_ = field
|
||||
var zb0001 uint32
|
||||
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
for zb0001 > 0 {
|
||||
zb0001--
|
||||
field, bts, err = msgp.ReadMapKeyZC(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
switch msgp.UnsafeString(field) {
|
||||
case "Name":
|
||||
z.Name, bts, err = msgp.ReadStringBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Name")
|
||||
return
|
||||
}
|
||||
case "Created":
|
||||
z.Created, bts, err = msgp.ReadTimeBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Created")
|
||||
return
|
||||
}
|
||||
case "LockEnabled":
|
||||
z.LockEnabled, bts, err = msgp.ReadBoolBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "LockEnabled")
|
||||
return
|
||||
}
|
||||
case "PolicyConfigJSON":
|
||||
z.PolicyConfigJSON, bts, err = msgp.ReadBytesBytes(bts, z.PolicyConfigJSON)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "PolicyConfigJSON")
|
||||
return
|
||||
}
|
||||
case "NotificationXML":
|
||||
z.NotificationXML, bts, err = msgp.ReadBytesBytes(bts, z.NotificationXML)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "NotificationXML")
|
||||
return
|
||||
}
|
||||
case "LifecycleConfigXML":
|
||||
z.LifecycleConfigXML, bts, err = msgp.ReadBytesBytes(bts, z.LifecycleConfigXML)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "LifecycleConfigXML")
|
||||
return
|
||||
}
|
||||
case "ObjectLockConfigurationXML":
|
||||
z.ObjectLockConfigurationXML, bts, err = msgp.ReadBytesBytes(bts, z.ObjectLockConfigurationXML)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ObjectLockConfigurationXML")
|
||||
return
|
||||
}
|
||||
case "EncryptionConfigXML":
|
||||
z.EncryptionConfigXML, bts, err = msgp.ReadBytesBytes(bts, z.EncryptionConfigXML)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "EncryptionConfigXML")
|
||||
return
|
||||
}
|
||||
case "TaggingConfigXML":
|
||||
z.TaggingConfigXML, bts, err = msgp.ReadBytesBytes(bts, z.TaggingConfigXML)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "TaggingConfigXML")
|
||||
return
|
||||
}
|
||||
case "QuotaConfigJSON":
|
||||
z.QuotaConfigJSON, bts, err = msgp.ReadBytesBytes(bts, z.QuotaConfigJSON)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "QuotaConfigJSON")
|
||||
return
|
||||
}
|
||||
default:
|
||||
bts, err = msgp.Skip(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
o = bts
|
||||
return
|
||||
}
|
||||
|
||||
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
|
||||
func (z *BucketMetadata) Msgsize() (s int) {
|
||||
s = 1 + 5 + msgp.StringPrefixSize + len(z.Name) + 8 + msgp.TimeSize + 12 + msgp.BoolSize + 17 + msgp.BytesPrefixSize + len(z.PolicyConfigJSON) + 16 + msgp.BytesPrefixSize + len(z.NotificationXML) + 19 + msgp.BytesPrefixSize + len(z.LifecycleConfigXML) + 27 + msgp.BytesPrefixSize + len(z.ObjectLockConfigurationXML) + 20 + msgp.BytesPrefixSize + len(z.EncryptionConfigXML) + 17 + msgp.BytesPrefixSize + len(z.TaggingConfigXML) + 16 + msgp.BytesPrefixSize + len(z.QuotaConfigJSON)
|
||||
return
|
||||
}
|
|
@ -9,8 +9,8 @@ import (
|
|||
"github.com/tinylib/msgp/msgp"
|
||||
)
|
||||
|
||||
func TestMarshalUnmarshalbucketMetadata(t *testing.T) {
|
||||
v := bucketMetadata{}
|
||||
func TestMarshalUnmarshalBucketMetadata(t *testing.T) {
|
||||
v := BucketMetadata{}
|
||||
bts, err := v.MarshalMsg(nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
|
@ -32,8 +32,8 @@ func TestMarshalUnmarshalbucketMetadata(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func BenchmarkMarshalMsgbucketMetadata(b *testing.B) {
|
||||
v := bucketMetadata{}
|
||||
func BenchmarkMarshalMsgBucketMetadata(b *testing.B) {
|
||||
v := BucketMetadata{}
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
|
@ -41,8 +41,8 @@ func BenchmarkMarshalMsgbucketMetadata(b *testing.B) {
|
|||
}
|
||||
}
|
||||
|
||||
func BenchmarkAppendMsgbucketMetadata(b *testing.B) {
|
||||
v := bucketMetadata{}
|
||||
func BenchmarkAppendMsgBucketMetadata(b *testing.B) {
|
||||
v := BucketMetadata{}
|
||||
bts := make([]byte, 0, v.Msgsize())
|
||||
bts, _ = v.MarshalMsg(bts[0:0])
|
||||
b.SetBytes(int64(len(bts)))
|
||||
|
@ -53,8 +53,8 @@ func BenchmarkAppendMsgbucketMetadata(b *testing.B) {
|
|||
}
|
||||
}
|
||||
|
||||
func BenchmarkUnmarshalbucketMetadata(b *testing.B) {
|
||||
v := bucketMetadata{}
|
||||
func BenchmarkUnmarshalBucketMetadata(b *testing.B) {
|
||||
v := BucketMetadata{}
|
||||
bts, _ := v.MarshalMsg(nil)
|
||||
b.ReportAllocs()
|
||||
b.SetBytes(int64(len(bts)))
|
||||
|
@ -67,17 +67,17 @@ func BenchmarkUnmarshalbucketMetadata(b *testing.B) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestEncodeDecodebucketMetadata(t *testing.T) {
|
||||
v := bucketMetadata{}
|
||||
func TestEncodeDecodeBucketMetadata(t *testing.T) {
|
||||
v := BucketMetadata{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
|
||||
m := v.Msgsize()
|
||||
if buf.Len() > m {
|
||||
t.Log("WARNING: TestEncodeDecodebucketMetadata Msgsize() is inaccurate")
|
||||
t.Log("WARNING: TestEncodeDecodeBucketMetadata Msgsize() is inaccurate")
|
||||
}
|
||||
|
||||
vn := bucketMetadata{}
|
||||
vn := BucketMetadata{}
|
||||
err := msgp.Decode(&buf, &vn)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
|
@ -91,8 +91,8 @@ func TestEncodeDecodebucketMetadata(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func BenchmarkEncodebucketMetadata(b *testing.B) {
|
||||
v := bucketMetadata{}
|
||||
func BenchmarkEncodeBucketMetadata(b *testing.B) {
|
||||
v := BucketMetadata{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
b.SetBytes(int64(buf.Len()))
|
||||
|
@ -105,8 +105,8 @@ func BenchmarkEncodebucketMetadata(b *testing.B) {
|
|||
en.Flush()
|
||||
}
|
||||
|
||||
func BenchmarkDecodebucketMetadata(b *testing.B) {
|
||||
v := bucketMetadata{}
|
||||
func BenchmarkDecodeBucketMetadata(b *testing.B) {
|
||||
v := BucketMetadata{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
b.SetBytes(int64(buf.Len()))
|
|
@ -19,10 +19,8 @@ package cmd
|
|||
import (
|
||||
"encoding/json"
|
||||
"encoding/xml"
|
||||
"errors"
|
||||
"io"
|
||||
"net/http"
|
||||
"path"
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
|
@ -38,8 +36,6 @@ const (
|
|||
bucketNotificationConfig = "notification.xml"
|
||||
)
|
||||
|
||||
var errNoSuchNotifications = errors.New("The specified bucket does not have bucket notifications")
|
||||
|
||||
// GetBucketNotificationHandler - This HTTP handler returns event notification configuration
|
||||
// as per http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html.
|
||||
// It returns empty configuration if its not set.
|
||||
|
@ -73,64 +69,49 @@ func (api objectAPIHandlers) GetBucketNotificationHandler(w http.ResponseWriter,
|
|||
return
|
||||
}
|
||||
|
||||
// Construct path to notification.xml for the given bucket.
|
||||
configFile := path.Join(bucketConfigPrefix, bucketName, bucketNotificationConfig)
|
||||
var config = event.Config{
|
||||
XMLNS: "http://s3.amazonaws.com/doc/2006-03-01/",
|
||||
}
|
||||
config.SetRegion(globalServerRegion)
|
||||
|
||||
var config = event.Config{}
|
||||
configData, err := readConfig(ctx, objAPI, configFile)
|
||||
configData, err := globalBucketMetadataSys.GetConfig(bucketName, bucketNotificationConfig)
|
||||
if err != nil {
|
||||
if err != errConfigNotFound {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
config.SetRegion(globalServerRegion)
|
||||
config.XMLNS = "http://s3.amazonaws.com/doc/2006-03-01/"
|
||||
notificationBytes, err := xml.Marshal(config)
|
||||
if err != nil {
|
||||
} else {
|
||||
if err = xml.Unmarshal(configData, &config); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
writeSuccessResponseXML(w, notificationBytes)
|
||||
return
|
||||
}
|
||||
|
||||
config.SetRegion(globalServerRegion)
|
||||
|
||||
if err = xml.Unmarshal(configData, &config); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
if err = config.Validate(globalServerRegion, globalNotificationSys.targetList); err != nil {
|
||||
arnErr, ok := err.(*event.ErrARNNotFound)
|
||||
if !ok {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
for i, queue := range config.QueueList {
|
||||
// Remove ARN not found queues, because we previously allowed
|
||||
// adding unexpected entries into the config.
|
||||
//
|
||||
// With newer config disallowing changing / turning off
|
||||
// notification targets without removing ARN in notification
|
||||
// configuration we won't see this problem anymore.
|
||||
if reflect.DeepEqual(queue.ARN, arnErr.ARN) && i < len(config.QueueList) {
|
||||
config.QueueList = append(config.QueueList[:i],
|
||||
config.QueueList[i+1:]...)
|
||||
if err = config.Validate(globalServerRegion, globalNotificationSys.targetList); err != nil {
|
||||
arnErr, ok := err.(*event.ErrARNNotFound)
|
||||
if ok {
|
||||
for i, queue := range config.QueueList {
|
||||
// Remove ARN not found queues, because we previously allowed
|
||||
// adding unexpected entries into the config.
|
||||
//
|
||||
// With newer config disallowing changing / turning off
|
||||
// notification targets without removing ARN in notification
|
||||
// configuration we won't see this problem anymore.
|
||||
if reflect.DeepEqual(queue.ARN, arnErr.ARN) && i < len(config.QueueList) {
|
||||
config.QueueList = append(config.QueueList[:i],
|
||||
config.QueueList[i+1:]...)
|
||||
}
|
||||
// This is a one time activity we shall do this
|
||||
// here and allow stale ARN to be removed. We shall
|
||||
// never reach a stage where we will have stale
|
||||
// notification configs.
|
||||
}
|
||||
} else {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
// This is a one time activity we shall do this
|
||||
// here and allow stale ARN to be removed. We shall
|
||||
// never reach a stage where we will have stale
|
||||
// notification configs.
|
||||
}
|
||||
}
|
||||
|
||||
// If xml namespace is empty, set a default value before returning.
|
||||
if config.XMLNS == "" {
|
||||
config.XMLNS = "http://s3.amazonaws.com/doc/2006-03-01/"
|
||||
}
|
||||
|
||||
notificationBytes, err := xml.Marshal(config)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
|
@ -178,8 +159,7 @@ func (api objectAPIHandlers) PutBucketNotificationHandler(w http.ResponseWriter,
|
|||
return
|
||||
}
|
||||
|
||||
lreader := io.LimitReader(r.Body, r.ContentLength)
|
||||
config, err := event.ParseConfig(lreader, globalServerRegion, globalNotificationSys.targetList)
|
||||
config, err := event.ParseConfig(io.LimitReader(r.Body, r.ContentLength), globalServerRegion, globalNotificationSys.targetList)
|
||||
if err != nil {
|
||||
apiErr := errorCodes.ToAPIErr(ErrMalformedXML)
|
||||
if event.IsEventError(err) {
|
||||
|
@ -189,14 +169,19 @@ func (api objectAPIHandlers) PutBucketNotificationHandler(w http.ResponseWriter,
|
|||
return
|
||||
}
|
||||
|
||||
if err = saveNotificationConfig(ctx, objectAPI, bucketName, config); err != nil {
|
||||
configData, err := xml.Marshal(config)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
if err = globalBucketMetadataSys.Update(bucketName, bucketNotificationConfig, configData); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
rulesMap := config.ToRulesMap()
|
||||
globalNotificationSys.AddRulesMap(bucketName, rulesMap)
|
||||
globalNotificationSys.PutBucketNotification(ctx, bucketName, rulesMap)
|
||||
|
||||
writeSuccessResponseHeadersOnly(w)
|
||||
}
|
||||
|
|
|
@ -19,12 +19,9 @@ package cmd
|
|||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/xml"
|
||||
"errors"
|
||||
"math"
|
||||
"net/http"
|
||||
"path"
|
||||
"sync"
|
||||
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/auth"
|
||||
|
@ -34,50 +31,36 @@ import (
|
|||
|
||||
// BucketObjectLockSys - map of bucket and retention configuration.
|
||||
type BucketObjectLockSys struct {
|
||||
sync.RWMutex
|
||||
retentionMap map[string]*objectlock.Retention
|
||||
}
|
||||
|
||||
// Set - set retention configuration.
|
||||
func (sys *BucketObjectLockSys) Set(bucketName string, retention *objectlock.Retention) {
|
||||
if globalIsGateway {
|
||||
// no-op
|
||||
return
|
||||
}
|
||||
|
||||
sys.Lock()
|
||||
sys.retentionMap[bucketName] = retention
|
||||
sys.Unlock()
|
||||
retentionMap map[string]objectlock.Retention
|
||||
}
|
||||
|
||||
// Get - Get retention configuration.
|
||||
func (sys *BucketObjectLockSys) Get(bucketName string) (r *objectlock.Retention, ok bool) {
|
||||
func (sys *BucketObjectLockSys) Get(bucketName string) (r objectlock.Retention, err error) {
|
||||
if globalIsGateway {
|
||||
// When gateway is enabled, no cached value
|
||||
// is used to validate bucket object lock configuration
|
||||
objAPI := newObjectLayerWithoutSafeModeFn()
|
||||
if objAPI == nil {
|
||||
return
|
||||
return r, errServerNotInitialized
|
||||
}
|
||||
|
||||
lc, err := objAPI.GetBucketObjectLockConfig(GlobalContext, bucketName)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
return lc.ToRetention(), true
|
||||
return r, nil
|
||||
}
|
||||
|
||||
sys.RLock()
|
||||
defer sys.RUnlock()
|
||||
r, ok = sys.retentionMap[bucketName]
|
||||
return r, ok
|
||||
}
|
||||
|
||||
// Remove - removes retention sysuration.
|
||||
func (sys *BucketObjectLockSys) Remove(bucketName string) {
|
||||
sys.Lock()
|
||||
delete(sys.retentionMap, bucketName)
|
||||
sys.Unlock()
|
||||
r, ok := sys.retentionMap[bucketName]
|
||||
if ok {
|
||||
return r, nil
|
||||
}
|
||||
configData, err := globalBucketMetadataSys.GetConfig(bucketName, objectLockConfig)
|
||||
if err != nil {
|
||||
if errors.Is(err, errConfigNotFound) {
|
||||
return r, nil
|
||||
}
|
||||
return r, err
|
||||
}
|
||||
config, err := objectlock.ParseObjectLockConfig(bytes.NewReader(configData))
|
||||
if err != nil {
|
||||
return r, err
|
||||
}
|
||||
return config.ToRetention(), nil
|
||||
}
|
||||
|
||||
// Similar to enforceRetentionBypassForDelete but for WebUI
|
||||
|
@ -345,11 +328,16 @@ func checkPutObjectLockAllowed(ctx context.Context, r *http.Request, bucket, obj
|
|||
retentionRequested := objectlock.IsObjectLockRetentionRequested(r.Header)
|
||||
legalHoldRequested := objectlock.IsObjectLockLegalHoldRequested(r.Header)
|
||||
|
||||
retentionCfg, isWORMBucket := globalBucketObjectLockSys.Get(bucket)
|
||||
if !isWORMBucket {
|
||||
retentionCfg, err := globalBucketObjectLockSys.Get(bucket)
|
||||
if err != nil {
|
||||
return mode, retainDate, legalHold, ErrInvalidBucketObjectLockConfiguration
|
||||
}
|
||||
|
||||
if !retentionCfg.LockEnabled {
|
||||
if legalHoldRequested || retentionRequested {
|
||||
return mode, retainDate, legalHold, ErrInvalidBucketObjectLockConfiguration
|
||||
}
|
||||
|
||||
// If this not a WORM enabled bucket, we should return right here.
|
||||
return mode, retainDate, legalHold, ErrNone
|
||||
}
|
||||
|
@ -406,7 +394,7 @@ func checkPutObjectLockAllowed(ctx context.Context, r *http.Request, bucket, obj
|
|||
return rMode, rDate, legalHold, ErrNone
|
||||
}
|
||||
|
||||
if !retentionRequested && isWORMBucket {
|
||||
if !retentionRequested && retentionCfg.Validity > 0 {
|
||||
if retentionPermErr != ErrNone {
|
||||
return mode, retainDate, legalHold, retentionPermErr
|
||||
}
|
||||
|
@ -419,7 +407,7 @@ func checkPutObjectLockAllowed(ctx context.Context, r *http.Request, bucket, obj
|
|||
if objExists && retainDate.After(t) {
|
||||
return mode, retainDate, legalHold, ErrObjectLocked
|
||||
}
|
||||
if !legalHoldRequested && !retentionCfg.IsEmpty() {
|
||||
if !legalHoldRequested {
|
||||
// inherit retention from bucket configuration
|
||||
return retentionCfg.Mode, objectlock.RetentionDate{Time: t.Add(retentionCfg.Validity)}, legalHold, ErrNone
|
||||
}
|
||||
|
@ -428,54 +416,10 @@ func checkPutObjectLockAllowed(ctx context.Context, r *http.Request, bucket, obj
|
|||
return mode, retainDate, legalHold, ErrNone
|
||||
}
|
||||
|
||||
func readBucketObjectLockConfig(ctx context.Context, objAPI ObjectLayer, bucket string) (*objectlock.Config, error) {
|
||||
meta, err := loadBucketMetadata(ctx, objAPI, bucket)
|
||||
if err != nil && err != errMetaDataConverted {
|
||||
return nil, toObjectErr(err, bucket)
|
||||
}
|
||||
if !meta.LockEnabled {
|
||||
return nil, BucketObjectLockConfigNotFound{Bucket: bucket}
|
||||
}
|
||||
configFile := path.Join(bucketConfigPrefix, bucket, objectLockConfig)
|
||||
configData, err := readConfig(ctx, objAPI, configFile)
|
||||
if err != nil {
|
||||
if err != errConfigNotFound {
|
||||
return nil, toObjectErr(err, bucket)
|
||||
}
|
||||
return objectlock.NewObjectLockConfig(), nil
|
||||
}
|
||||
cfg, err := objectlock.ParseObjectLockConfig(bytes.NewReader(configData))
|
||||
if err != nil {
|
||||
return nil, toObjectErr(err, bucket)
|
||||
}
|
||||
return cfg, nil
|
||||
}
|
||||
|
||||
func saveBucketObjectLockConfig(ctx context.Context, objAPI ObjectLayer, bucket string, config *objectlock.Config) error {
|
||||
meta, err := loadBucketMetadata(ctx, objAPI, bucket)
|
||||
if err != nil && err != errMetaDataConverted {
|
||||
return toObjectErr(err, bucket)
|
||||
}
|
||||
if !meta.LockEnabled {
|
||||
return BucketObjectLockConfigNotFound{Bucket: bucket}
|
||||
}
|
||||
|
||||
data, err := xml.Marshal(config)
|
||||
if err != nil {
|
||||
return toObjectErr(err, bucket)
|
||||
}
|
||||
|
||||
configFile := path.Join(bucketConfigPrefix, bucket, objectLockConfig)
|
||||
if err = saveConfig(ctx, objAPI, configFile, data); err != nil {
|
||||
return toObjectErr(err, bucket)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewBucketObjectLockSys returns initialized BucketObjectLockSys
|
||||
func NewBucketObjectLockSys() *BucketObjectLockSys {
|
||||
return &BucketObjectLockSys{
|
||||
retentionMap: make(map[string]*objectlock.Retention),
|
||||
retentionMap: make(map[string]objectlock.Retention),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -498,38 +442,22 @@ func (sys *BucketObjectLockSys) Init(buckets []BucketInfo, objAPI ObjectLayer) e
|
|||
func (sys *BucketObjectLockSys) load(buckets []BucketInfo, objAPI ObjectLayer) error {
|
||||
for _, bucket := range buckets {
|
||||
ctx := logger.SetReqInfo(GlobalContext, &logger.ReqInfo{BucketName: bucket.Name})
|
||||
meta, err := loadBucketMetadata(ctx, objAPI, bucket.Name)
|
||||
if err != nil {
|
||||
if err != errMetaDataConverted {
|
||||
return err
|
||||
}
|
||||
|
||||
meta.Created = bucket.Created
|
||||
logger.LogIf(ctx, meta.save(ctx, objAPI))
|
||||
}
|
||||
if !meta.LockEnabled {
|
||||
continue
|
||||
}
|
||||
|
||||
configFile := path.Join(bucketConfigPrefix, bucket.Name, objectLockConfig)
|
||||
configData, err := readConfig(ctx, objAPI, configFile)
|
||||
configData, err := globalBucketMetadataSys.GetConfig(bucket.Name, objectLockConfig)
|
||||
if err != nil {
|
||||
if errors.Is(err, errConfigNotFound) {
|
||||
globalBucketObjectLockSys.Set(bucket.Name, &objectlock.Retention{})
|
||||
continue
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
retention := objectlock.Retention{}
|
||||
config, err := objectlock.ParseObjectLockConfig(bytes.NewReader(configData))
|
||||
if err != nil {
|
||||
return err
|
||||
logger.LogIf(ctx, err)
|
||||
sys.retentionMap[bucket.Name] = retention
|
||||
continue
|
||||
}
|
||||
retention := &objectlock.Retention{}
|
||||
if config.Rule != nil {
|
||||
retention = config.ToRetention()
|
||||
}
|
||||
globalBucketObjectLockSys.Set(bucket.Name, retention)
|
||||
sys.retentionMap[bucket.Name] = config.ToRetention()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -18,6 +18,7 @@ package cmd
|
|||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"io"
|
||||
"net/http"
|
||||
|
||||
|
@ -87,13 +88,16 @@ func (api objectAPIHandlers) PutBucketPolicyHandler(w http.ResponseWriter, r *ht
|
|||
return
|
||||
}
|
||||
|
||||
if err = objAPI.SetBucketPolicy(ctx, bucket, bucketPolicy); err != nil {
|
||||
configData, err := json.Marshal(bucketPolicy)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
globalPolicySys.Set(bucket, bucketPolicy)
|
||||
globalNotificationSys.SetBucketPolicy(ctx, bucket, bucketPolicy)
|
||||
if err = globalBucketMetadataSys.Update(bucket, bucketPolicyConfig, configData); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Success.
|
||||
writeSuccessNoContent(w)
|
||||
|
@ -125,14 +129,11 @@ func (api objectAPIHandlers) DeleteBucketPolicyHandler(w http.ResponseWriter, r
|
|||
return
|
||||
}
|
||||
|
||||
if err := objAPI.DeleteBucketPolicy(ctx, bucket); err != nil {
|
||||
if err := globalBucketMetadataSys.Update(bucket, bucketPolicyConfig, nil); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
globalPolicySys.Remove(bucket)
|
||||
globalNotificationSys.RemoveBucketPolicy(ctx, bucket)
|
||||
|
||||
// Success.
|
||||
writeSuccessNoContent(w)
|
||||
}
|
||||
|
@ -164,18 +165,15 @@ func (api objectAPIHandlers) GetBucketPolicyHandler(w http.ResponseWriter, r *ht
|
|||
}
|
||||
|
||||
// Read bucket access policy.
|
||||
bucketPolicy, err := objAPI.GetBucketPolicy(ctx, bucket)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
policyData, err := json.Marshal(bucketPolicy)
|
||||
configData, err := globalBucketMetadataSys.GetConfig(bucket, bucketPolicyConfig)
|
||||
if err != nil {
|
||||
if errors.Is(err, errConfigNotFound) {
|
||||
err = BucketPolicyNotFound{Bucket: bucket}
|
||||
}
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Write to client.
|
||||
w.Write(policyData)
|
||||
writeSuccessResponseJSON(w, configData)
|
||||
}
|
||||
|
|
|
@ -18,14 +18,12 @@ package cmd
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
jsoniter "github.com/json-iterator/go"
|
||||
|
@ -38,53 +36,58 @@ import (
|
|||
|
||||
// PolicySys - policy subsystem.
|
||||
type PolicySys struct {
|
||||
sync.RWMutex
|
||||
bucketPolicyMap map[string]*policy.Policy
|
||||
}
|
||||
|
||||
// Set - sets policy to given bucket name. If policy is empty, existing policy is removed.
|
||||
func (sys *PolicySys) Set(bucketName string, policy *policy.Policy) {
|
||||
// Get returns stored bucket policy
|
||||
func (sys *PolicySys) Get(bucket string) (*policy.Policy, error) {
|
||||
if globalIsGateway {
|
||||
// Set policy is a non-op under gateway mode.
|
||||
return
|
||||
objAPI := newObjectLayerFn()
|
||||
if objAPI == nil {
|
||||
return nil, errServerNotInitialized
|
||||
}
|
||||
return objAPI.GetBucketPolicy(GlobalContext, bucket)
|
||||
}
|
||||
|
||||
sys.Lock()
|
||||
defer sys.Unlock()
|
||||
|
||||
if policy.IsEmpty() {
|
||||
delete(sys.bucketPolicyMap, bucketName)
|
||||
} else {
|
||||
sys.bucketPolicyMap[bucketName] = policy
|
||||
configData, err := globalBucketMetadataSys.GetConfig(bucket, bucketPolicyConfig)
|
||||
if err != nil {
|
||||
if !errors.Is(err, errConfigNotFound) {
|
||||
return nil, err
|
||||
}
|
||||
return nil, BucketPolicyNotFound{Bucket: bucket}
|
||||
}
|
||||
}
|
||||
|
||||
// Remove - removes policy for given bucket name.
|
||||
func (sys *PolicySys) Remove(bucketName string) {
|
||||
sys.Lock()
|
||||
defer sys.Unlock()
|
||||
|
||||
delete(sys.bucketPolicyMap, bucketName)
|
||||
return policy.ParseConfig(bytes.NewReader(configData), bucket)
|
||||
}
|
||||
|
||||
// IsAllowed - checks given policy args is allowed to continue the Rest API.
|
||||
func (sys *PolicySys) IsAllowed(args policy.Args) bool {
|
||||
if globalIsGateway {
|
||||
// When gateway is enabled, no cached value
|
||||
// is used to validate bucket policies.
|
||||
objAPI := newObjectLayerFn()
|
||||
if objAPI != nil {
|
||||
config, err := objAPI.GetBucketPolicy(GlobalContext, args.BucketName)
|
||||
if err == nil {
|
||||
return config.IsAllowed(args)
|
||||
}
|
||||
if objAPI == nil {
|
||||
return false
|
||||
}
|
||||
p, err := objAPI.GetBucketPolicy(GlobalContext, args.BucketName)
|
||||
if err == nil {
|
||||
return p.IsAllowed(args)
|
||||
}
|
||||
return args.IsOwner
|
||||
}
|
||||
|
||||
// If policy is available for given bucket, check the policy.
|
||||
p, found := sys.bucketPolicyMap[args.BucketName]
|
||||
if found {
|
||||
return p.IsAllowed(args)
|
||||
}
|
||||
|
||||
configData, err := globalBucketMetadataSys.GetConfig(args.BucketName, bucketPolicyConfig)
|
||||
if err != nil {
|
||||
if !errors.Is(err, errConfigNotFound) {
|
||||
logger.LogIf(GlobalContext, err)
|
||||
}
|
||||
} else {
|
||||
sys.RLock()
|
||||
defer sys.RUnlock()
|
||||
|
||||
// If policy is available for given bucket, check the policy.
|
||||
if p, found := sys.bucketPolicyMap[args.BucketName]; found {
|
||||
p, err = policy.ParseConfig(bytes.NewReader(configData), args.BucketName)
|
||||
if err != nil {
|
||||
logger.LogIf(GlobalContext, err)
|
||||
} else {
|
||||
return p.IsAllowed(args)
|
||||
}
|
||||
}
|
||||
|
@ -97,28 +100,19 @@ func (sys *PolicySys) IsAllowed(args policy.Args) bool {
|
|||
// Loads policies for all buckets into PolicySys.
|
||||
func (sys *PolicySys) load(buckets []BucketInfo, objAPI ObjectLayer) error {
|
||||
for _, bucket := range buckets {
|
||||
config, err := getPolicyConfig(objAPI, bucket.Name)
|
||||
configData, err := globalBucketMetadataSys.GetConfig(bucket.Name, bucketPolicyConfig)
|
||||
if err != nil {
|
||||
if _, ok := err.(BucketPolicyNotFound); ok {
|
||||
if errors.Is(err, errConfigNotFound) {
|
||||
continue
|
||||
}
|
||||
return err
|
||||
}
|
||||
// This part is specifically written to handle migration
|
||||
// when the Version string is empty, this was allowed
|
||||
// in all previous minio releases but we need to migrate
|
||||
// those policies by properly setting the Version string
|
||||
// from now on.
|
||||
if config.Version == "" {
|
||||
logger.Info("Found in-consistent bucket policies, Migrating them for Bucket: (%s)", bucket.Name)
|
||||
config.Version = policy.DefaultVersion
|
||||
|
||||
if err = savePolicyConfig(GlobalContext, objAPI, bucket.Name, config); err != nil {
|
||||
logger.LogIf(GlobalContext, err)
|
||||
return err
|
||||
}
|
||||
config, err := policy.ParseConfig(bytes.NewReader(configData), bucket.Name)
|
||||
if err != nil {
|
||||
logger.LogIf(GlobalContext, err)
|
||||
continue
|
||||
}
|
||||
sys.Set(bucket.Name, config)
|
||||
sys.bucketPolicyMap[bucket.Name] = config
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -226,50 +220,6 @@ func getConditionValues(r *http.Request, lc string, username string, claims map[
|
|||
return args
|
||||
}
|
||||
|
||||
// getPolicyConfig - get policy config for given bucket name.
|
||||
func getPolicyConfig(objAPI ObjectLayer, bucketName string) (*policy.Policy, error) {
|
||||
// Construct path to policy.json for the given bucket.
|
||||
configFile := path.Join(bucketConfigPrefix, bucketName, bucketPolicyConfig)
|
||||
|
||||
configData, err := readConfig(GlobalContext, objAPI, configFile)
|
||||
if err != nil {
|
||||
if err == errConfigNotFound {
|
||||
err = BucketPolicyNotFound{Bucket: bucketName}
|
||||
}
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return policy.ParseConfig(bytes.NewReader(configData), bucketName)
|
||||
}
|
||||
|
||||
func savePolicyConfig(ctx context.Context, objAPI ObjectLayer, bucketName string, bucketPolicy *policy.Policy) error {
|
||||
data, err := json.Marshal(bucketPolicy)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Construct path to policy.json for the given bucket.
|
||||
configFile := path.Join(bucketConfigPrefix, bucketName, bucketPolicyConfig)
|
||||
|
||||
return saveConfig(ctx, objAPI, configFile, data)
|
||||
}
|
||||
|
||||
func removePolicyConfig(ctx context.Context, objAPI ObjectLayer, bucketName string) error {
|
||||
// Construct path to policy.json for the given bucket.
|
||||
configFile := path.Join(bucketConfigPrefix, bucketName, bucketPolicyConfig)
|
||||
|
||||
if err := objAPI.DeleteObject(ctx, minioMetaBucket, configFile); err != nil {
|
||||
if _, ok := err.(ObjectNotFound); ok {
|
||||
return BucketPolicyNotFound{Bucket: bucketName}
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// PolicyToBucketAccessPolicy - converts policy.Policy to minio-go/policy.BucketAccessPolicy.
|
||||
func PolicyToBucketAccessPolicy(bucketPolicy *policy.Policy) (*miniogopolicy.BucketAccessPolicy, error) {
|
||||
// Return empty BucketAccessPolicy for empty bucket policy.
|
||||
|
|
|
@ -21,7 +21,6 @@ import (
|
|||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"path"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
|
@ -34,36 +33,35 @@ import (
|
|||
|
||||
// BucketQuotaSys - map of bucket and quota configuration.
|
||||
type BucketQuotaSys struct {
|
||||
sync.RWMutex
|
||||
quotaMap map[string]madmin.BucketQuota
|
||||
}
|
||||
|
||||
// Set - set quota configuration.
|
||||
func (sys *BucketQuotaSys) Set(bucketName string, q madmin.BucketQuota) {
|
||||
sys.Lock()
|
||||
sys.quotaMap[bucketName] = q
|
||||
sys.Unlock()
|
||||
}
|
||||
|
||||
// Get - Get quota configuration.
|
||||
func (sys *BucketQuotaSys) Get(bucketName string) (q madmin.BucketQuota, ok bool) {
|
||||
sys.RLock()
|
||||
defer sys.RUnlock()
|
||||
q, ok = sys.quotaMap[bucketName]
|
||||
return
|
||||
}
|
||||
func (sys *BucketQuotaSys) Get(bucketName string) (q madmin.BucketQuota, err error) {
|
||||
if globalIsGateway {
|
||||
objAPI := newObjectLayerFn()
|
||||
if objAPI == nil {
|
||||
return q, errServerNotInitialized
|
||||
}
|
||||
return q, BucketQuotaConfigNotFound{Bucket: bucketName}
|
||||
}
|
||||
|
||||
// Remove - removes quota configuration.
|
||||
func (sys *BucketQuotaSys) Remove(bucketName string) {
|
||||
sys.Lock()
|
||||
delete(sys.quotaMap, bucketName)
|
||||
sys.Unlock()
|
||||
q, ok := sys.quotaMap[bucketName]
|
||||
if !ok {
|
||||
configData, err := globalBucketMetadataSys.GetConfig(bucketName, bucketQuotaConfigFile)
|
||||
if err != nil {
|
||||
if errors.Is(err, errConfigNotFound) {
|
||||
return q, BucketQuotaConfigNotFound{Bucket: bucketName}
|
||||
}
|
||||
return q, err
|
||||
}
|
||||
return parseBucketQuota(bucketName, configData)
|
||||
}
|
||||
return q, nil
|
||||
}
|
||||
|
||||
// Buckets - list of buckets with quota configuration
|
||||
func (sys *BucketQuotaSys) Buckets() []string {
|
||||
sys.RLock()
|
||||
defer sys.RUnlock()
|
||||
var buckets []string
|
||||
for k := range sys.quotaMap {
|
||||
buckets = append(buckets, k)
|
||||
|
@ -89,19 +87,21 @@ func (sys *BucketQuotaSys) Init(buckets []BucketInfo, objAPI ObjectLayer) error
|
|||
func (sys *BucketQuotaSys) load(buckets []BucketInfo, objAPI ObjectLayer) error {
|
||||
for _, bucket := range buckets {
|
||||
ctx := logger.SetReqInfo(GlobalContext, &logger.ReqInfo{BucketName: bucket.Name})
|
||||
configFile := path.Join(bucketConfigPrefix, bucket.Name, bucketQuotaConfigFile)
|
||||
configData, err := readConfig(ctx, objAPI, configFile)
|
||||
configData, err := globalBucketMetadataSys.GetConfig(bucket.Name, bucketQuotaConfigFile)
|
||||
if err != nil {
|
||||
if errors.Is(err, errConfigNotFound) {
|
||||
continue
|
||||
}
|
||||
return err
|
||||
}
|
||||
quotaCfg, err := parseBucketQuota(configData)
|
||||
quotaCfg, err := parseBucketQuota(bucket.Name, configData)
|
||||
if err != nil {
|
||||
return err
|
||||
if _, ok := err.(BucketQuotaConfigNotFound); !ok {
|
||||
logger.LogIf(ctx, err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
globalBucketQuotaSys.Set(bucket.Name, quotaCfg)
|
||||
sys.quotaMap[bucket.Name] = quotaCfg
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -112,10 +112,12 @@ func NewBucketQuotaSys() *BucketQuotaSys {
|
|||
}
|
||||
|
||||
// parseBucketQuota parses BucketQuota from json
|
||||
func parseBucketQuota(data []byte) (quotaCfg madmin.BucketQuota, err error) {
|
||||
err = json.Unmarshal(data, "aCfg)
|
||||
if err != nil {
|
||||
return
|
||||
func parseBucketQuota(bucket string, data []byte) (quotaCfg madmin.BucketQuota, err error) {
|
||||
if len(data) == 0 {
|
||||
return quotaCfg, BucketQuotaConfigNotFound{Bucket: bucket}
|
||||
}
|
||||
if err = json.Unmarshal(data, "aCfg); err != nil {
|
||||
return quotaCfg, err
|
||||
}
|
||||
if !quotaCfg.Type.IsValid() {
|
||||
return quotaCfg, fmt.Errorf("Invalid quota type %s", quotaCfg.Type)
|
||||
|
@ -141,7 +143,7 @@ func (b *bucketStorageCache) check(ctx context.Context, q madmin.BucketQuota, bu
|
|||
b.bucketsSizes = dui.BucketsSizes
|
||||
}
|
||||
currUsage := b.bucketsSizes[bucket]
|
||||
if (currUsage + uint64(size)) > q.Quota {
|
||||
if (currUsage+uint64(size)) > q.Quota && q.Quota > 0 {
|
||||
return BucketQuotaExceeded{Bucket: bucket}
|
||||
}
|
||||
return nil
|
||||
|
@ -150,8 +152,12 @@ func enforceBucketQuota(ctx context.Context, bucket string, size int64) error {
|
|||
if size < 0 {
|
||||
return nil
|
||||
}
|
||||
q, ok := globalBucketQuotaSys.Get(bucket)
|
||||
if !ok {
|
||||
|
||||
q, err := globalBucketQuotaSys.Get(bucket)
|
||||
if err != nil {
|
||||
if _, ok := err.(BucketQuotaConfigNotFound); !ok {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -194,26 +200,28 @@ func enforceFIFOQuota(ctx context.Context, objectAPI ObjectLayer) error {
|
|||
}
|
||||
for _, bucket := range globalBucketQuotaSys.Buckets() {
|
||||
// Check if the current bucket has quota restrictions, if not skip it
|
||||
cfg, ok := globalBucketQuotaSys.Get(bucket)
|
||||
if !ok {
|
||||
cfg, err := globalBucketQuotaSys.Get(bucket)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
if cfg.Type != madmin.FIFOQuota {
|
||||
continue
|
||||
}
|
||||
_, bucketHasLockConfig := globalBucketObjectLockSys.Get(bucket)
|
||||
|
||||
dataUsageInfo, err := loadDataUsageFromBackend(ctx, objectAPI)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var toFree uint64
|
||||
if dataUsageInfo.BucketsSizes[bucket] > cfg.Quota {
|
||||
if dataUsageInfo.BucketsSizes[bucket] > cfg.Quota && cfg.Quota > 0 {
|
||||
toFree = dataUsageInfo.BucketsSizes[bucket] - cfg.Quota
|
||||
}
|
||||
|
||||
if toFree == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
// Allocate new results channel to receive ObjectInfo.
|
||||
objInfoCh := make(chan ObjectInfo)
|
||||
|
||||
|
@ -230,9 +238,11 @@ func enforceFIFOQuota(ctx context.Context, objectAPI ObjectLayer) error {
|
|||
return err
|
||||
}
|
||||
|
||||
rcfg, _ := globalBucketObjectLockSys.Get(bucket)
|
||||
|
||||
for obj := range objInfoCh {
|
||||
// skip objects currently under retention
|
||||
if bucketHasLockConfig && enforceRetentionForDeletion(ctx, obj) {
|
||||
if rcfg.LockEnabled && enforceRetentionForDeletion(ctx, obj) {
|
||||
continue
|
||||
}
|
||||
scorer.addFile(obj.Name, obj.ModTime, obj.Size, 1)
|
||||
|
|
|
@ -1,61 +0,0 @@
|
|||
/*
|
||||
* MinIO Cloud Storage, (C) 2020 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/xml"
|
||||
"path"
|
||||
|
||||
"github.com/minio/minio-go/v6/pkg/tags"
|
||||
)
|
||||
|
||||
func saveBucketTagging(ctx context.Context, objAPI ObjectLayer, bucket string, t *tags.Tags) error {
|
||||
data, err := xml.Marshal(t)
|
||||
if err != nil {
|
||||
return toObjectErr(err, bucket)
|
||||
}
|
||||
configFile := path.Join(bucketConfigPrefix, bucket, bucketTaggingConfigFile)
|
||||
if err := saveConfig(ctx, objAPI, configFile, data); err != nil {
|
||||
return toObjectErr(err, bucket)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func deleteBucketTagging(ctx context.Context, objAPI ObjectLayer, bucket string) error {
|
||||
configFile := path.Join(bucketConfigPrefix, bucket, bucketTaggingConfigFile)
|
||||
if err := deleteConfig(ctx, objAPI, configFile); err != nil && err != errConfigNotFound {
|
||||
return toObjectErr(err, bucket)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func readBucketTagging(ctx context.Context, objAPI ObjectLayer, bucket string) (*tags.Tags, error) {
|
||||
configFile := path.Join(bucketConfigPrefix, bucket, bucketTaggingConfigFile)
|
||||
configData, err := readConfig(ctx, objAPI, configFile)
|
||||
if err != nil {
|
||||
if err == errConfigNotFound {
|
||||
return nil, BucketTaggingNotFound{Bucket: bucket}
|
||||
}
|
||||
return nil, toObjectErr(err, bucket)
|
||||
}
|
||||
t := &tags.Tags{}
|
||||
if err = xml.Unmarshal(configData, t); err != nil {
|
||||
return nil, toObjectErr(err, bucket)
|
||||
}
|
||||
return t, nil
|
||||
}
|
|
@ -57,12 +57,15 @@ func lifecycleRound(ctx context.Context, objAPI ObjectLayer) error {
|
|||
|
||||
for _, bucket := range buckets {
|
||||
// Check if the current bucket has a configured lifecycle policy, skip otherwise
|
||||
l, ok := globalLifecycleSys.Get(bucket.Name)
|
||||
if !ok {
|
||||
l, err := globalLifecycleSys.Get(bucket.Name)
|
||||
if err != nil {
|
||||
if _, ok := err.(BucketLifecycleNotFound); !ok {
|
||||
logger.LogIf(ctx, err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
_, bucketHasLockConfig := globalBucketObjectLockSys.Get(bucket.Name)
|
||||
rcfg, _ := globalBucketObjectLockSys.Get(bucket.Name)
|
||||
|
||||
// Calculate the common prefix of all lifecycle rules
|
||||
var prefixes []string
|
||||
|
@ -88,7 +91,7 @@ func lifecycleRound(ctx context.Context, objAPI ObjectLayer) error {
|
|||
}
|
||||
// Find the action that need to be executed
|
||||
if l.ComputeAction(obj.Name, obj.UserTags, obj.ModTime) == lifecycle.DeleteAction {
|
||||
if bucketHasLockConfig && enforceRetentionForDeletion(ctx, obj) {
|
||||
if rcfg.LockEnabled && enforceRetentionForDeletion(ctx, obj) {
|
||||
continue
|
||||
}
|
||||
objects = append(objects, obj.Name)
|
||||
|
|
|
@ -17,10 +17,10 @@
|
|||
package cmd
|
||||
|
||||
import (
|
||||
"encoding/xml"
|
||||
"net/http"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/bucket/policy"
|
||||
)
|
||||
|
||||
|
@ -30,67 +30,9 @@ import (
|
|||
|
||||
// GetBucketWebsite - GET bucket website, a dummy api
|
||||
func (api objectAPIHandlers) GetBucketWebsiteHandler(w http.ResponseWriter, r *http.Request) {
|
||||
writeSuccessResponseHeadersOnly(w)
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
ctx := newContext(r, w, "GetBucketWebsite")
|
||||
|
||||
// GetBucketAccelerate - GET bucket accelerate, a dummy api
|
||||
func (api objectAPIHandlers) GetBucketAccelerateHandler(w http.ResponseWriter, r *http.Request) {
|
||||
writeSuccessResponseHeadersOnly(w)
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
// GetBucketRequestPaymentHandler - GET bucket requestPayment, a dummy api
|
||||
func (api objectAPIHandlers) GetBucketRequestPaymentHandler(w http.ResponseWriter, r *http.Request) {
|
||||
writeSuccessResponseHeadersOnly(w)
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
// GetBucketLoggingHandler - GET bucket logging, a dummy api
|
||||
func (api objectAPIHandlers) GetBucketLoggingHandler(w http.ResponseWriter, r *http.Request) {
|
||||
writeSuccessResponseHeadersOnly(w)
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
// GetBucketReplicationHandler - GET bucket replication, a dummy api
|
||||
func (api objectAPIHandlers) GetBucketReplicationHandler(w http.ResponseWriter, r *http.Request) {
|
||||
writeSuccessResponseHeadersOnly(w)
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
// DeleteBucketWebsiteHandler - DELETE bucket website, a dummy api
|
||||
func (api objectAPIHandlers) DeleteBucketWebsiteHandler(w http.ResponseWriter, r *http.Request) {
|
||||
writeSuccessResponseHeadersOnly(w)
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
type allowedMethod string
|
||||
|
||||
// Define strings
|
||||
const (
|
||||
GET allowedMethod = http.MethodGet
|
||||
PUT allowedMethod = http.MethodPut
|
||||
HEAD allowedMethod = http.MethodHead
|
||||
POST allowedMethod = http.MethodPost
|
||||
DELETE allowedMethod = http.MethodDelete
|
||||
)
|
||||
|
||||
// GetBucketCorsHandler - GET bucket cors, a dummy api
|
||||
func (api objectAPIHandlers) GetBucketCorsHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "GetBucketCorsHandler")
|
||||
|
||||
type corsRule struct {
|
||||
AllowedHeaders []string `xml:"AllowedHeaders"`
|
||||
AllowedMethods []allowedMethod `xml:"AllowedMethod"`
|
||||
AllowedOrigins []string `xml:"AllowedOrigin"`
|
||||
ExposeHeaders []string `xml:"ExposeHeader"`
|
||||
MaxAgeSeconds int64 `xml:"MaxAgeSeconds"`
|
||||
}
|
||||
|
||||
type corsConfiguration struct {
|
||||
XMLName xml.Name `xml:"CORSConfiguration"`
|
||||
CorsRule []corsRule `xml:"CORSRule"`
|
||||
}
|
||||
defer logger.AuditLog(w, r, "GetBucketWebsite", mustGetClaimsFromToken(r))
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
|
@ -115,11 +57,175 @@ func (api objectAPIHandlers) GetBucketCorsHandler(w http.ResponseWriter, r *http
|
|||
return
|
||||
}
|
||||
|
||||
cors := &corsConfiguration{}
|
||||
if err := xml.NewEncoder(w).Encode(cors); err != nil {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNoSuchWebsiteConfiguration), r.URL, guessIsBrowserReq(r))
|
||||
}
|
||||
|
||||
// GetBucketAccelerate - GET bucket accelerate, a dummy api
|
||||
func (api objectAPIHandlers) GetBucketAccelerateHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "GetBucketAccelerate")
|
||||
|
||||
defer logger.AuditLog(w, r, "GetBucketAccelerate", mustGetClaimsFromToken(r))
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
|
||||
objAPI := api.ObjectAPI()
|
||||
if objAPI == nil {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Allow getBucketCors if policy action is set, since this is a dummy call
|
||||
// we are simply re-purposing the bucketPolicyAction.
|
||||
if s3Error := checkRequestAuthType(ctx, r, policy.GetBucketPolicyAction, bucket, ""); s3Error != ErrNone {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Validate if bucket exists, before proceeding further...
|
||||
_, err := objAPI.GetBucketInfo(ctx, bucket)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
const accelerateDefaultConfig = `<?xml version="1.0" encoding="UTF-8"?>\n<AccelerateConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/"/>`
|
||||
writeSuccessResponseXML(w, []byte(accelerateDefaultConfig))
|
||||
}
|
||||
|
||||
// GetBucketRequestPaymentHandler - GET bucket requestPayment, a dummy api
|
||||
func (api objectAPIHandlers) GetBucketRequestPaymentHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "GetBucketRequestPayment")
|
||||
|
||||
defer logger.AuditLog(w, r, "GetBucketRequestPayment", mustGetClaimsFromToken(r))
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
|
||||
objAPI := api.ObjectAPI()
|
||||
if objAPI == nil {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Allow getBucketCors if policy action is set, since this is a dummy call
|
||||
// we are simply re-purposing the bucketPolicyAction.
|
||||
if s3Error := checkRequestAuthType(ctx, r, policy.GetBucketPolicyAction, bucket, ""); s3Error != ErrNone {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Validate if bucket exists, before proceeding further...
|
||||
_, err := objAPI.GetBucketInfo(ctx, bucket)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
const requestPaymentDefaultConfig = `<?xml version="1.0" encoding="UTF-8"?>\n<RequestPaymentConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/"><Payer>BucketOwner</Payer></RequestPaymentConfiguration>`
|
||||
|
||||
writeSuccessResponseXML(w, []byte(requestPaymentDefaultConfig))
|
||||
}
|
||||
|
||||
// GetBucketLoggingHandler - GET bucket logging, a dummy api
|
||||
func (api objectAPIHandlers) GetBucketLoggingHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "GetBucketLogging")
|
||||
|
||||
defer logger.AuditLog(w, r, "GetBucketLogging", mustGetClaimsFromToken(r))
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
|
||||
objAPI := api.ObjectAPI()
|
||||
if objAPI == nil {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Allow getBucketCors if policy action is set, since this is a dummy call
|
||||
// we are simply re-purposing the bucketPolicyAction.
|
||||
if s3Error := checkRequestAuthType(ctx, r, policy.GetBucketPolicyAction, bucket, ""); s3Error != ErrNone {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Validate if bucket exists, before proceeding further...
|
||||
_, err := objAPI.GetBucketInfo(ctx, bucket)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
const loggingDefaultConfig = `<?xml version="1.0" encoding="UTF-8"?>\n\n<BucketLoggingStatus xmlns="http://s3.amazonaws.com/doc/2006-03-01/">\n <!--<LoggingEnabled><TargetBucket>myLogsBucket</TargetBucket><TargetPrefix>add/this/prefix/to/my/log/files/access_log-</TargetPrefix></LoggingEnabled>-->\n</BucketLoggingStatus>\n`
|
||||
writeSuccessResponseXML(w, []byte(loggingDefaultConfig))
|
||||
}
|
||||
|
||||
// GetBucketReplicationHandler - GET bucket replication, a dummy api
|
||||
func (api objectAPIHandlers) GetBucketReplicationHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "GetBucketReplication")
|
||||
|
||||
defer logger.AuditLog(w, r, "GetBucketReplication", mustGetClaimsFromToken(r))
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
|
||||
objAPI := api.ObjectAPI()
|
||||
if objAPI == nil {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Allow getBucketCors if policy action is set, since this is a dummy call
|
||||
// we are simply re-purposing the bucketPolicyAction.
|
||||
if s3Error := checkRequestAuthType(ctx, r, policy.GetBucketPolicyAction, bucket, ""); s3Error != ErrNone {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Validate if bucket exists, before proceeding further...
|
||||
_, err := objAPI.GetBucketInfo(ctx, bucket)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrReplicationConfigurationNotFoundError), r.URL, guessIsBrowserReq(r))
|
||||
}
|
||||
|
||||
// DeleteBucketWebsiteHandler - DELETE bucket website, a dummy api
|
||||
func (api objectAPIHandlers) DeleteBucketWebsiteHandler(w http.ResponseWriter, r *http.Request) {
|
||||
writeSuccessResponseHeadersOnly(w)
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
// GetBucketCorsHandler - GET bucket cors, a dummy api
|
||||
func (api objectAPIHandlers) GetBucketCorsHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "GetBucketCors")
|
||||
|
||||
defer logger.AuditLog(w, r, "GetBucketCors", mustGetClaimsFromToken(r))
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
|
||||
objAPI := api.ObjectAPI()
|
||||
if objAPI == nil {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Allow getBucketCors if policy action is set, since this is a dummy call
|
||||
// we are simply re-purposing the bucketPolicyAction.
|
||||
if s3Error := checkRequestAuthType(ctx, r, policy.GetBucketPolicyAction, bucket, ""); s3Error != ErrNone {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Validate if bucket exists, before proceeding further...
|
||||
_, err := objAPI.GetBucketInfo(ctx, bucket)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNoSuchCORSConfiguration), r.URL, guessIsBrowserReq(r))
|
||||
}
|
||||
|
|
96
cmd/fs-v1.go
96
cmd/fs-v1.go
|
@ -39,10 +39,6 @@ import (
|
|||
"github.com/minio/minio/cmd/config"
|
||||
xhttp "github.com/minio/minio/cmd/http"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
bucketsse "github.com/minio/minio/pkg/bucket/encryption"
|
||||
"github.com/minio/minio/pkg/bucket/lifecycle"
|
||||
objectlock "github.com/minio/minio/pkg/bucket/object/lock"
|
||||
"github.com/minio/minio/pkg/bucket/policy"
|
||||
"github.com/minio/minio/pkg/color"
|
||||
"github.com/minio/minio/pkg/lock"
|
||||
"github.com/minio/minio/pkg/madmin"
|
||||
|
@ -55,6 +51,8 @@ var defaultEtag = "00000000000000000000000000000000-1"
|
|||
|
||||
// FSObjects - Implements fs object layer.
|
||||
type FSObjects struct {
|
||||
GatewayUnsupported
|
||||
|
||||
// Disk usage metrics
|
||||
totalUsed uint64 // ref: https://golang.org/pkg/sync/atomic/#pkg-note-BUG
|
||||
|
||||
|
@ -340,11 +338,12 @@ func (fs *FSObjects) MakeBucketWithLocation(ctx context.Context, bucket, locatio
|
|||
}
|
||||
|
||||
meta := newBucketMetadata(bucket)
|
||||
meta.LockEnabled = false
|
||||
if err := meta.save(ctx, fs); err != nil {
|
||||
if err := meta.Save(ctx, fs); err != nil {
|
||||
return toObjectErr(err, bucket)
|
||||
}
|
||||
|
||||
globalBucketMetadataSys.Set(bucket, meta)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -360,8 +359,12 @@ func (fs *FSObjects) GetBucketInfo(ctx context.Context, bucket string) (bi Bucke
|
|||
return bi, toObjectErr(err, bucket)
|
||||
}
|
||||
|
||||
// As os.Stat() doesn't carry other than ModTime(), use ModTime() as CreatedTime.
|
||||
createdTime := st.ModTime()
|
||||
meta, err := globalBucketMetadataSys.Get(bucket)
|
||||
if err == nil {
|
||||
createdTime = meta.Created
|
||||
}
|
||||
|
||||
return BucketInfo{
|
||||
Name: bucket,
|
||||
Created: createdTime,
|
||||
|
@ -403,13 +406,12 @@ func (fs *FSObjects) ListBuckets(ctx context.Context) ([]BucketInfo, error) {
|
|||
continue
|
||||
}
|
||||
var created = fi.ModTime()
|
||||
meta, err := loadBucketMetadata(ctx, fs, fi.Name())
|
||||
meta, err := globalBucketMetadataSys.Get(fi.Name())
|
||||
if err == nil {
|
||||
created = meta.Created
|
||||
}
|
||||
if err != errMetaDataConverted {
|
||||
logger.LogIf(ctx, err)
|
||||
}
|
||||
globalBucketMetadataSys.Set(fi.Name(), meta)
|
||||
|
||||
bucketInfos = append(bucketInfos, BucketInfo{
|
||||
Name: fi.Name(),
|
||||
Created: created,
|
||||
|
@ -459,7 +461,7 @@ func (fs *FSObjects) DeleteBucket(ctx context.Context, bucket string, forceDelet
|
|||
}
|
||||
|
||||
// Delete all bucket metadata.
|
||||
deleteBucketMetadata(ctx, bucket, fs)
|
||||
deleteBucketMetadata(ctx, fs, bucket)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -1317,76 +1319,6 @@ func (fs *FSObjects) GetMetrics(ctx context.Context) (*Metrics, error) {
|
|||
return &Metrics{}, NotImplemented{}
|
||||
}
|
||||
|
||||
// SetBucketPolicy sets policy on bucket
|
||||
func (fs *FSObjects) SetBucketPolicy(ctx context.Context, bucket string, policy *policy.Policy) error {
|
||||
return savePolicyConfig(ctx, fs, bucket, policy)
|
||||
}
|
||||
|
||||
// GetBucketPolicy will get policy on bucket
|
||||
func (fs *FSObjects) GetBucketPolicy(ctx context.Context, bucket string) (*policy.Policy, error) {
|
||||
return getPolicyConfig(fs, bucket)
|
||||
}
|
||||
|
||||
// DeleteBucketPolicy deletes all policies on bucket
|
||||
func (fs *FSObjects) DeleteBucketPolicy(ctx context.Context, bucket string) error {
|
||||
return removePolicyConfig(ctx, fs, bucket)
|
||||
}
|
||||
|
||||
// SetBucketLifecycle sets lifecycle on bucket
|
||||
func (fs *FSObjects) SetBucketLifecycle(ctx context.Context, bucket string, lifecycle *lifecycle.Lifecycle) error {
|
||||
return saveLifecycleConfig(ctx, fs, bucket, lifecycle)
|
||||
}
|
||||
|
||||
// GetBucketLifecycle will get lifecycle on bucket
|
||||
func (fs *FSObjects) GetBucketLifecycle(ctx context.Context, bucket string) (*lifecycle.Lifecycle, error) {
|
||||
return getLifecycleConfig(fs, bucket)
|
||||
}
|
||||
|
||||
// DeleteBucketLifecycle deletes all lifecycle on bucket
|
||||
func (fs *FSObjects) DeleteBucketLifecycle(ctx context.Context, bucket string) error {
|
||||
return removeLifecycleConfig(ctx, fs, bucket)
|
||||
}
|
||||
|
||||
// GetBucketSSEConfig returns bucket encryption config on given bucket
|
||||
func (fs *FSObjects) GetBucketSSEConfig(ctx context.Context, bucket string) (*bucketsse.BucketSSEConfig, error) {
|
||||
return getBucketSSEConfig(fs, bucket)
|
||||
}
|
||||
|
||||
// SetBucketSSEConfig sets bucket encryption config on given bucket
|
||||
func (fs *FSObjects) SetBucketSSEConfig(ctx context.Context, bucket string, config *bucketsse.BucketSSEConfig) error {
|
||||
return saveBucketSSEConfig(ctx, fs, bucket, config)
|
||||
}
|
||||
|
||||
// DeleteBucketSSEConfig deletes bucket encryption config on given bucket
|
||||
func (fs *FSObjects) DeleteBucketSSEConfig(ctx context.Context, bucket string) error {
|
||||
return removeBucketSSEConfig(ctx, fs, bucket)
|
||||
}
|
||||
|
||||
// SetBucketObjectLockConfig enables/clears default object lock configuration
|
||||
func (fs *FSObjects) SetBucketObjectLockConfig(ctx context.Context, bucket string, config *objectlock.Config) error {
|
||||
return saveBucketObjectLockConfig(ctx, fs, bucket, config)
|
||||
}
|
||||
|
||||
// GetBucketObjectLockConfig - returns current defaults for object lock configuration
|
||||
func (fs *FSObjects) GetBucketObjectLockConfig(ctx context.Context, bucket string) (*objectlock.Config, error) {
|
||||
return readBucketObjectLockConfig(ctx, fs, bucket)
|
||||
}
|
||||
|
||||
// SetBucketTagging sets bucket tags on given bucket
|
||||
func (fs *FSObjects) SetBucketTagging(ctx context.Context, bucket string, t *tags.Tags) error {
|
||||
return saveBucketTagging(ctx, fs, bucket, t)
|
||||
}
|
||||
|
||||
// GetBucketTagging get bucket tags set on given bucket
|
||||
func (fs *FSObjects) GetBucketTagging(ctx context.Context, bucket string) (*tags.Tags, error) {
|
||||
return readBucketTagging(ctx, fs, bucket)
|
||||
}
|
||||
|
||||
// DeleteBucketTagging delete bucket tags set if any.
|
||||
func (fs *FSObjects) DeleteBucketTagging(ctx context.Context, bucket string) error {
|
||||
return deleteBucketTagging(ctx, fs, bucket)
|
||||
}
|
||||
|
||||
// ListObjectsV2 lists all blobs in bucket filtered by prefix
|
||||
func (fs *FSObjects) ListObjectsV2(ctx context.Context, bucket, prefix, continuationToken, delimiter string, maxKeys int, fetchOwner bool, startAfter string) (result ListObjectsV2Info, err error) {
|
||||
marker := continuationToken
|
||||
|
|
|
@ -147,8 +147,9 @@ var (
|
|||
// globalEnvTargetList has list of targets configured via env.
|
||||
globalEnvTargetList *event.TargetList
|
||||
|
||||
globalPolicySys *PolicySys
|
||||
globalIAMSys *IAMSys
|
||||
globalBucketMetadataSys *BucketMetadataSys
|
||||
globalPolicySys *PolicySys
|
||||
globalIAMSys *IAMSys
|
||||
|
||||
globalLifecycleSys *LifecycleSys
|
||||
globalBucketSSEConfigSys *BucketSSEConfigSys
|
||||
|
|
|
@ -20,11 +20,10 @@ import (
|
|||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"encoding/xml"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/url"
|
||||
"path"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
|
@ -34,9 +33,6 @@ import (
|
|||
"github.com/minio/minio-go/v6/pkg/set"
|
||||
"github.com/minio/minio/cmd/crypto"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
bucketsse "github.com/minio/minio/pkg/bucket/encryption"
|
||||
"github.com/minio/minio/pkg/bucket/lifecycle"
|
||||
objectlock "github.com/minio/minio/pkg/bucket/object/lock"
|
||||
"github.com/minio/minio/pkg/bucket/policy"
|
||||
"github.com/minio/minio/pkg/event"
|
||||
"github.com/minio/minio/pkg/madmin"
|
||||
|
@ -546,182 +542,46 @@ func (sys *NotificationSys) GetLocks(ctx context.Context) []*PeerLocks {
|
|||
return locksResp
|
||||
}
|
||||
|
||||
// SetBucketPolicy - calls SetBucketPolicy RPC call on all peers.
|
||||
func (sys *NotificationSys) SetBucketPolicy(ctx context.Context, bucketName string, bucketPolicy *policy.Policy) {
|
||||
go func() {
|
||||
ng := WithNPeers(len(sys.peerClients))
|
||||
for idx, client := range sys.peerClients {
|
||||
if client == nil {
|
||||
continue
|
||||
}
|
||||
client := client
|
||||
ng.Go(ctx, func() error {
|
||||
return client.SetBucketPolicy(bucketName, bucketPolicy)
|
||||
}, idx, *client.host)
|
||||
// LoadBucketMetadata - calls LoadBucketMetadata call on all peers
|
||||
func (sys *NotificationSys) LoadBucketMetadata(ctx context.Context, bucketName string) {
|
||||
ng := WithNPeers(len(sys.peerClients))
|
||||
for idx, client := range sys.peerClients {
|
||||
if client == nil {
|
||||
continue
|
||||
}
|
||||
ng.Wait()
|
||||
}()
|
||||
client := client
|
||||
ng.Go(ctx, func() error {
|
||||
return client.LoadBucketMetadata(bucketName)
|
||||
}, idx, *client.host)
|
||||
}
|
||||
for _, nErr := range ng.Wait() {
|
||||
reqInfo := (&logger.ReqInfo{}).AppendTags("peerAddress", nErr.Host.String())
|
||||
if nErr.Err != nil {
|
||||
logger.LogIf(logger.SetReqInfo(ctx, reqInfo), nErr.Err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// DeleteBucket - calls DeleteBucket RPC call on all peers.
|
||||
func (sys *NotificationSys) DeleteBucket(ctx context.Context, bucketName string) {
|
||||
globalNotificationSys.RemoveNotification(bucketName)
|
||||
globalBucketObjectLockSys.Remove(bucketName)
|
||||
globalBucketQuotaSys.Remove(bucketName)
|
||||
globalPolicySys.Remove(bucketName)
|
||||
globalLifecycleSys.Remove(bucketName)
|
||||
// DeleteBucketMetadata - calls DeleteBucketMetadata call on all peers
|
||||
func (sys *NotificationSys) DeleteBucketMetadata(ctx context.Context, bucketName string) {
|
||||
globalBucketMetadataSys.Remove(bucketName)
|
||||
|
||||
go func() {
|
||||
ng := WithNPeers(len(sys.peerClients))
|
||||
for idx, client := range sys.peerClients {
|
||||
if client == nil {
|
||||
continue
|
||||
}
|
||||
client := client
|
||||
ng.Go(ctx, func() error {
|
||||
return client.DeleteBucket(bucketName)
|
||||
}, idx, *client.host)
|
||||
ng := WithNPeers(len(sys.peerClients))
|
||||
for idx, client := range sys.peerClients {
|
||||
if client == nil {
|
||||
continue
|
||||
}
|
||||
ng.Wait()
|
||||
}()
|
||||
}
|
||||
|
||||
// RemoveBucketPolicy - calls RemoveBucketPolicy RPC call on all peers.
|
||||
func (sys *NotificationSys) RemoveBucketPolicy(ctx context.Context, bucketName string) {
|
||||
go func() {
|
||||
ng := WithNPeers(len(sys.peerClients))
|
||||
for idx, client := range sys.peerClients {
|
||||
if client == nil {
|
||||
continue
|
||||
}
|
||||
client := client
|
||||
ng.Go(ctx, func() error {
|
||||
return client.RemoveBucketPolicy(bucketName)
|
||||
}, idx, *client.host)
|
||||
client := client
|
||||
ng.Go(ctx, func() error {
|
||||
return client.DeleteBucketMetadata(bucketName)
|
||||
}, idx, *client.host)
|
||||
}
|
||||
for _, nErr := range ng.Wait() {
|
||||
reqInfo := (&logger.ReqInfo{}).AppendTags("peerAddress", nErr.Host.String())
|
||||
if nErr.Err != nil {
|
||||
logger.LogIf(logger.SetReqInfo(ctx, reqInfo), nErr.Err)
|
||||
}
|
||||
ng.Wait()
|
||||
}()
|
||||
}
|
||||
|
||||
// RemoveBucketObjectLockConfig - calls RemoveBucketObjectLockConfig RPC call on all peers.
|
||||
func (sys *NotificationSys) RemoveBucketObjectLockConfig(ctx context.Context, bucketName string) {
|
||||
go func() {
|
||||
ng := WithNPeers(len(sys.peerClients))
|
||||
for idx, client := range sys.peerClients {
|
||||
if client == nil {
|
||||
continue
|
||||
}
|
||||
client := client
|
||||
ng.Go(ctx, func() error {
|
||||
return client.RemoveBucketObjectLockConfig(bucketName)
|
||||
}, idx, *client.host)
|
||||
}
|
||||
ng.Wait()
|
||||
}()
|
||||
}
|
||||
|
||||
// RemoveBucketQuotaConfig - calls RemoveBucketQuotaConfig RPC call on all peers.
|
||||
func (sys *NotificationSys) RemoveBucketQuotaConfig(ctx context.Context, bucketName string) {
|
||||
go func() {
|
||||
ng := WithNPeers(len(sys.peerClients))
|
||||
for idx, client := range sys.peerClients {
|
||||
if client == nil {
|
||||
continue
|
||||
}
|
||||
client := client
|
||||
ng.Go(ctx, func() error {
|
||||
return client.RemoveBucketQuotaConfig(bucketName)
|
||||
}, idx, *client.host)
|
||||
}
|
||||
ng.Wait()
|
||||
}()
|
||||
}
|
||||
|
||||
// SetBucketLifecycle - calls SetBucketLifecycle on all peers.
|
||||
func (sys *NotificationSys) SetBucketLifecycle(ctx context.Context, bucketName string,
|
||||
bucketLifecycle *lifecycle.Lifecycle) {
|
||||
go func() {
|
||||
ng := WithNPeers(len(sys.peerClients))
|
||||
for idx, client := range sys.peerClients {
|
||||
if client == nil {
|
||||
continue
|
||||
}
|
||||
client := client
|
||||
ng.Go(ctx, func() error {
|
||||
return client.SetBucketLifecycle(bucketName, bucketLifecycle)
|
||||
}, idx, *client.host)
|
||||
}
|
||||
ng.Wait()
|
||||
}()
|
||||
}
|
||||
|
||||
// RemoveBucketLifecycle - calls RemoveLifecycle on all peers.
|
||||
func (sys *NotificationSys) RemoveBucketLifecycle(ctx context.Context, bucketName string) {
|
||||
go func() {
|
||||
ng := WithNPeers(len(sys.peerClients))
|
||||
for idx, client := range sys.peerClients {
|
||||
if client == nil {
|
||||
continue
|
||||
}
|
||||
client := client
|
||||
ng.Go(ctx, func() error {
|
||||
return client.RemoveBucketLifecycle(bucketName)
|
||||
}, idx, *client.host)
|
||||
}
|
||||
ng.Wait()
|
||||
}()
|
||||
}
|
||||
|
||||
// SetBucketSSEConfig - calls SetBucketSSEConfig on all peers.
|
||||
func (sys *NotificationSys) SetBucketSSEConfig(ctx context.Context, bucketName string,
|
||||
encConfig *bucketsse.BucketSSEConfig) {
|
||||
go func() {
|
||||
ng := WithNPeers(len(sys.peerClients))
|
||||
for idx, client := range sys.peerClients {
|
||||
if client == nil {
|
||||
continue
|
||||
}
|
||||
client := client
|
||||
ng.Go(ctx, func() error {
|
||||
return client.SetBucketSSEConfig(bucketName, encConfig)
|
||||
}, idx, *client.host)
|
||||
}
|
||||
ng.Wait()
|
||||
}()
|
||||
}
|
||||
|
||||
// RemoveBucketSSEConfig - calls RemoveBucketSSEConfig on all peers.
|
||||
func (sys *NotificationSys) RemoveBucketSSEConfig(ctx context.Context, bucketName string) {
|
||||
go func() {
|
||||
ng := WithNPeers(len(sys.peerClients))
|
||||
for idx, client := range sys.peerClients {
|
||||
if client == nil {
|
||||
continue
|
||||
}
|
||||
client := client
|
||||
ng.Go(ctx, func() error {
|
||||
return client.RemoveBucketSSEConfig(bucketName)
|
||||
}, idx, *client.host)
|
||||
}
|
||||
ng.Wait()
|
||||
}()
|
||||
}
|
||||
|
||||
// PutBucketNotification - calls PutBucketNotification RPC call on all peers.
|
||||
func (sys *NotificationSys) PutBucketNotification(ctx context.Context, bucketName string, rulesMap event.RulesMap) {
|
||||
go func() {
|
||||
ng := WithNPeers(len(sys.peerClients))
|
||||
for idx, client := range sys.peerClients {
|
||||
if client == nil {
|
||||
continue
|
||||
}
|
||||
client := client
|
||||
ng.Go(ctx, func() error {
|
||||
return client.PutBucketNotification(bucketName, rulesMap)
|
||||
}, idx, *client.host)
|
||||
}
|
||||
ng.Wait()
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
// AddRemoteTarget - adds event rules map, HTTP/PeerRPC client target to bucket name.
|
||||
|
@ -753,14 +613,18 @@ func (sys *NotificationSys) AddRemoteTarget(bucketName string, target event.Targ
|
|||
func (sys *NotificationSys) load(buckets []BucketInfo, objAPI ObjectLayer) error {
|
||||
for _, bucket := range buckets {
|
||||
ctx := logger.SetReqInfo(GlobalContext, &logger.ReqInfo{BucketName: bucket.Name})
|
||||
config, err := readNotificationConfig(ctx, objAPI, bucket.Name)
|
||||
if err != nil && err != errNoSuchNotifications {
|
||||
if _, ok := err.(*event.ErrARNNotFound); ok {
|
||||
configData, err := globalBucketMetadataSys.GetConfig(bucket.Name, bucketNotificationConfig)
|
||||
if err != nil {
|
||||
if errors.Is(err, errConfigNotFound) {
|
||||
continue
|
||||
}
|
||||
return err
|
||||
}
|
||||
if err == errNoSuchNotifications {
|
||||
config, err := event.ParseConfig(bytes.NewReader(configData), globalServerRegion, globalNotificationSys.targetList)
|
||||
if err != nil {
|
||||
if _, ok := err.(*event.ErrARNNotFound); !ok {
|
||||
logger.LogIf(ctx, err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
sys.AddRulesMap(bucket.Name, config.ToRulesMap())
|
||||
|
@ -904,46 +768,6 @@ func (sys *NotificationSys) Send(args eventArgs) {
|
|||
sys.targetList.Send(args.ToEvent(true), targetIDSet, sys.targetResCh)
|
||||
}
|
||||
|
||||
// PutBucketObjectLockConfig - put bucket object lock configuration to all peers.
|
||||
func (sys *NotificationSys) PutBucketObjectLockConfig(ctx context.Context, bucketName string, retention *objectlock.Retention) {
|
||||
g := errgroup.WithNErrs(len(sys.peerClients))
|
||||
for index, client := range sys.peerClients {
|
||||
if client == nil {
|
||||
continue
|
||||
}
|
||||
index := index
|
||||
g.Go(func() error {
|
||||
return sys.peerClients[index].PutBucketObjectLockConfig(bucketName, retention)
|
||||
}, index)
|
||||
}
|
||||
for i, err := range g.Wait() {
|
||||
if err != nil {
|
||||
logger.GetReqInfo(ctx).AppendTags("remotePeer", sys.peerClients[i].host.String())
|
||||
logger.LogIf(ctx, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// PutBucketQuotaConfig - put bucket quota configuration to all peers.
|
||||
func (sys *NotificationSys) PutBucketQuotaConfig(ctx context.Context, bucketName string, q madmin.BucketQuota) {
|
||||
g := errgroup.WithNErrs(len(sys.peerClients))
|
||||
for index, client := range sys.peerClients {
|
||||
if client == nil {
|
||||
continue
|
||||
}
|
||||
index := index
|
||||
g.Go(func() error {
|
||||
return sys.peerClients[index].PutBucketQuotaConfig(bucketName, q)
|
||||
}, index)
|
||||
}
|
||||
for i, err := range g.Wait() {
|
||||
if err != nil {
|
||||
logger.GetReqInfo(ctx).AppendTags("remotePeer", sys.peerClients[i].host.String())
|
||||
logger.LogIf(ctx, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// NetOBDInfo - Net OBD information
|
||||
func (sys *NotificationSys) NetOBDInfo(ctx context.Context) madmin.ServerNetOBDInfo {
|
||||
var sortedGlobalEndpoints []string
|
||||
|
@ -1387,30 +1211,3 @@ func sendEvent(args eventArgs) {
|
|||
|
||||
globalNotificationSys.Send(args)
|
||||
}
|
||||
|
||||
func readNotificationConfig(ctx context.Context, objAPI ObjectLayer, bucketName string) (*event.Config, error) {
|
||||
// Construct path to notification.xml for the given bucket.
|
||||
configFile := path.Join(bucketConfigPrefix, bucketName, bucketNotificationConfig)
|
||||
configData, err := readConfig(ctx, objAPI, configFile)
|
||||
if err != nil {
|
||||
if err == errConfigNotFound {
|
||||
err = errNoSuchNotifications
|
||||
}
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
config, err := event.ParseConfig(bytes.NewReader(configData), globalServerRegion, globalNotificationSys.targetList)
|
||||
logger.LogIf(ctx, err)
|
||||
return config, err
|
||||
}
|
||||
|
||||
func saveNotificationConfig(ctx context.Context, objAPI ObjectLayer, bucketName string, config *event.Config) error {
|
||||
data, err := xml.Marshal(config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
configFile := path.Join(bucketConfigPrefix, bucketName, bucketNotificationConfig)
|
||||
return saveConfig(ctx, objAPI, configFile, data)
|
||||
}
|
||||
|
|
|
@ -18,7 +18,6 @@ package cmd
|
|||
|
||||
import (
|
||||
"context"
|
||||
"path"
|
||||
"sync"
|
||||
|
||||
"strings"
|
||||
|
@ -80,17 +79,6 @@ func dirObjectInfo(bucket, object string, size int64, metadata map[string]string
|
|||
}
|
||||
}
|
||||
|
||||
func deleteBucketMetadata(ctx context.Context, bucket string, objAPI ObjectLayer) {
|
||||
// Delete bucket access policy, if present - ignore any errors.
|
||||
removePolicyConfig(ctx, objAPI, bucket)
|
||||
|
||||
// Delete notification config, if present - ignore any errors.
|
||||
logger.LogIf(ctx, removeNotificationConfig(ctx, objAPI, bucket))
|
||||
|
||||
// Delete bucket meta config, if present - ignore any errors.
|
||||
logger.LogIf(ctx, removeBucketMeta(ctx, objAPI, bucket))
|
||||
}
|
||||
|
||||
// Depending on the disk type network or local, initialize storage API.
|
||||
func newStorageAPI(endpoint Endpoint) (storage StorageAPI, err error) {
|
||||
if endpoint.IsLocal {
|
||||
|
@ -145,15 +133,6 @@ func cleanupDir(ctx context.Context, storage StorageAPI, volume, dirPath string)
|
|||
return err
|
||||
}
|
||||
|
||||
// Removes notification.xml for a given bucket, only used during DeleteBucket.
|
||||
func removeNotificationConfig(ctx context.Context, objAPI ObjectLayer, bucket string) error {
|
||||
ncPath := path.Join(bucketConfigPrefix, bucket, bucketNotificationConfig)
|
||||
if err := deleteConfig(ctx, objAPI, ncPath); err != nil && err != errConfigNotFound {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func listObjectsNonSlash(ctx context.Context, bucket, prefix, marker, delimiter string, maxKeys int, tpool *TreeWalkPool, listDir ListDirFunc, getObjInfo func(context.Context, string, string) (ObjectInfo, error), getObjectInfoDirs ...func(context.Context, string, string) (ObjectInfo, error)) (loi ListObjectsInfo, err error) {
|
||||
endWalkCh := make(chan struct{})
|
||||
defer close(endWalkCh)
|
||||
|
|
|
@ -23,11 +23,8 @@ import (
|
|||
|
||||
"github.com/minio/minio-go/v6/pkg/encrypt"
|
||||
"github.com/minio/minio-go/v6/pkg/tags"
|
||||
bucketsse "github.com/minio/minio/pkg/bucket/encryption"
|
||||
"github.com/minio/minio/pkg/bucket/lifecycle"
|
||||
objectlock "github.com/minio/minio/pkg/bucket/object/lock"
|
||||
"github.com/minio/minio/pkg/bucket/policy"
|
||||
|
||||
"github.com/minio/minio/pkg/bucket/policy"
|
||||
"github.com/minio/minio/pkg/madmin"
|
||||
)
|
||||
|
||||
|
@ -105,7 +102,6 @@ type ObjectLayer interface {
|
|||
HealBucket(ctx context.Context, bucket string, dryRun, remove bool) (madmin.HealResultItem, error)
|
||||
HealObject(ctx context.Context, bucket, object string, opts madmin.HealOpts) (madmin.HealResultItem, error)
|
||||
HealObjects(ctx context.Context, bucket, prefix string, opts madmin.HealOpts, fn healObjectFn) error
|
||||
|
||||
ListBucketsHeal(ctx context.Context) (buckets []BucketInfo, err error)
|
||||
|
||||
// Policy operations
|
||||
|
@ -121,25 +117,6 @@ type ObjectLayer interface {
|
|||
// Compression support check.
|
||||
IsCompressionSupported() bool
|
||||
|
||||
// Lifecycle operations
|
||||
SetBucketLifecycle(context.Context, string, *lifecycle.Lifecycle) error
|
||||
GetBucketLifecycle(context.Context, string) (*lifecycle.Lifecycle, error)
|
||||
DeleteBucketLifecycle(context.Context, string) error
|
||||
|
||||
// Bucket Encryption operations
|
||||
SetBucketSSEConfig(context.Context, string, *bucketsse.BucketSSEConfig) error
|
||||
GetBucketSSEConfig(context.Context, string) (*bucketsse.BucketSSEConfig, error)
|
||||
DeleteBucketSSEConfig(context.Context, string) error
|
||||
|
||||
// Bucket locking configuration operations
|
||||
SetBucketObjectLockConfig(context.Context, string, *objectlock.Config) error
|
||||
GetBucketObjectLockConfig(context.Context, string) (*objectlock.Config, error)
|
||||
|
||||
// Bucket tagging operations
|
||||
SetBucketTagging(context.Context, string, *tags.Tags) error
|
||||
GetBucketTagging(context.Context, string) (*tags.Tags, error)
|
||||
DeleteBucketTagging(context.Context, string) error
|
||||
|
||||
// Backend related metrics
|
||||
GetMetrics(ctx context.Context) (*Metrics, error)
|
||||
|
||||
|
|
|
@ -1799,28 +1799,31 @@ func testObjectCompleteMultipartUpload(obj ObjectLayer, instanceType string, t T
|
|||
// Expecting to fail due to bad
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
actualResult, actualErr := obj.CompleteMultipartUpload(context.Background(), testCase.bucket, testCase.object, testCase.uploadID, testCase.parts, ObjectOptions{})
|
||||
if actualErr != nil && testCase.shouldPass {
|
||||
t.Errorf("Test %d: %s: Expected to pass, but failed with: <ERROR> %s", i+1, instanceType, actualErr)
|
||||
}
|
||||
if actualErr == nil && !testCase.shouldPass {
|
||||
t.Errorf("Test %d: %s: Expected to fail with <ERROR> \"%s\", but passed instead", i+1, instanceType, testCase.expectedErr)
|
||||
}
|
||||
// Failed as expected, but does it fail for the expected reason.
|
||||
if actualErr != nil && !testCase.shouldPass {
|
||||
if reflect.TypeOf(actualErr) != reflect.TypeOf(testCase.expectedErr) {
|
||||
t.Errorf("Test %d: %s: Expected to fail with error \"%s\", but instead failed with error \"%s\"", i+1, instanceType, testCase.expectedErr, actualErr)
|
||||
for _, testCase := range testCases {
|
||||
testCase := testCase
|
||||
t.(*testing.T).Run("", func(t *testing.T) {
|
||||
actualResult, actualErr := obj.CompleteMultipartUpload(context.Background(), testCase.bucket, testCase.object, testCase.uploadID, testCase.parts, ObjectOptions{})
|
||||
if actualErr != nil && testCase.shouldPass {
|
||||
t.Errorf("%s: Expected to pass, but failed with: <ERROR> %s", instanceType, actualErr)
|
||||
}
|
||||
}
|
||||
// Passes as expected, but asserting the results.
|
||||
if actualErr == nil && testCase.shouldPass {
|
||||
if actualErr == nil && !testCase.shouldPass {
|
||||
t.Errorf("%s: Expected to fail with <ERROR> \"%s\", but passed instead", instanceType, testCase.expectedErr)
|
||||
}
|
||||
// Failed as expected, but does it fail for the expected reason.
|
||||
if actualErr != nil && !testCase.shouldPass {
|
||||
if reflect.TypeOf(actualErr) != reflect.TypeOf(testCase.expectedErr) {
|
||||
t.Errorf("%s: Expected to fail with error \"%s\", but instead failed with error \"%s\"", instanceType, testCase.expectedErr, actualErr)
|
||||
}
|
||||
}
|
||||
// Passes as expected, but asserting the results.
|
||||
if actualErr == nil && testCase.shouldPass {
|
||||
|
||||
// Asserting IsTruncated.
|
||||
if actualResult.ETag != testCase.expectedS3MD5 {
|
||||
t.Errorf("Test %d: %s: Expected the result to be \"%v\", but found it to \"%v\"", i+1, instanceType, testCase.expectedS3MD5, actualResult)
|
||||
// Asserting IsTruncated.
|
||||
if actualResult.ETag != testCase.expectedS3MD5 {
|
||||
t.Errorf("%s: Expected the result to be \"%v\", but found it to \"%v\"", instanceType, testCase.expectedS3MD5, actualResult)
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -822,9 +822,9 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re
|
|||
}
|
||||
|
||||
// Check if bucket encryption is enabled
|
||||
_, encEnabled := globalBucketSSEConfigSys.Get(dstBucket)
|
||||
_, err = globalBucketSSEConfigSys.Get(dstBucket)
|
||||
// This request header needs to be set prior to setting ObjectOptions
|
||||
if (globalAutoEncryption || encEnabled) && !crypto.SSEC.IsRequested(r.Header) {
|
||||
if (globalAutoEncryption || err == nil) && !crypto.SSEC.IsRequested(r.Header) {
|
||||
r.Header.Add(crypto.SSEHeader, crypto.SSEAlgorithmAES256)
|
||||
}
|
||||
|
||||
|
@ -1349,10 +1349,11 @@ func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Req
|
|||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Check if bucket encryption is enabled
|
||||
_, encEnabled := globalBucketSSEConfigSys.Get(bucket)
|
||||
_, err = globalBucketSSEConfigSys.Get(bucket)
|
||||
// This request header needs to be set prior to setting ObjectOptions
|
||||
if (globalAutoEncryption || encEnabled) && !crypto.SSEC.IsRequested(r.Header) && !crypto.S3KMS.IsRequested(r.Header) {
|
||||
if (globalAutoEncryption || err == nil) && !crypto.SSEC.IsRequested(r.Header) && !crypto.S3KMS.IsRequested(r.Header) {
|
||||
r.Header.Add(crypto.SSEHeader, crypto.SSEAlgorithmAES256)
|
||||
}
|
||||
|
||||
|
@ -1531,10 +1532,11 @@ func (api objectAPIHandlers) NewMultipartUploadHandler(w http.ResponseWriter, r
|
|||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Check if bucket encryption is enabled
|
||||
_, encEnabled := globalBucketSSEConfigSys.Get(bucket)
|
||||
_, err = globalBucketSSEConfigSys.Get(bucket)
|
||||
// This request header needs to be set prior to setting ObjectOptions
|
||||
if (globalAutoEncryption || encEnabled) && !crypto.SSEC.IsRequested(r.Header) && !crypto.S3KMS.IsRequested(r.Header) {
|
||||
if (globalAutoEncryption || err == nil) && !crypto.SSEC.IsRequested(r.Header) && !crypto.S3KMS.IsRequested(r.Header) {
|
||||
r.Header.Add(crypto.SSEHeader, crypto.SSEAlgorithmAES256)
|
||||
}
|
||||
|
||||
|
@ -2585,7 +2587,7 @@ func (api objectAPIHandlers) DeleteObjectHandler(w http.ResponseWriter, r *http.
|
|||
}
|
||||
|
||||
apiErr := ErrNone
|
||||
if _, ok := globalBucketObjectLockSys.Get(bucket); ok {
|
||||
if rcfg, _ := globalBucketObjectLockSys.Get(bucket); rcfg.LockEnabled {
|
||||
apiErr = enforceRetentionBypassForDelete(ctx, r, bucket, object, getObjectInfo)
|
||||
if apiErr != ErrNone && apiErr != ErrNoSuchKey {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(apiErr), r.URL, guessIsBrowserReq(r))
|
||||
|
@ -2649,7 +2651,7 @@ func (api objectAPIHandlers) PutObjectLegalHoldHandler(w http.ResponseWriter, r
|
|||
return
|
||||
}
|
||||
|
||||
if _, ok := globalBucketObjectLockSys.Get(bucket); !ok {
|
||||
if rcfg, _ := globalBucketObjectLockSys.Get(bucket); !rcfg.LockEnabled {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidBucketObjectLockConfiguration), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
@ -2731,7 +2733,7 @@ func (api objectAPIHandlers) GetObjectLegalHoldHandler(w http.ResponseWriter, r
|
|||
getObjectInfo = api.CacheAPI().GetObjectInfo
|
||||
}
|
||||
|
||||
if _, ok := globalBucketObjectLockSys.Get(bucket); !ok {
|
||||
if rcfg, _ := globalBucketObjectLockSys.Get(bucket); !rcfg.LockEnabled {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidBucketObjectLockConfiguration), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
@ -2808,7 +2810,7 @@ func (api objectAPIHandlers) PutObjectRetentionHandler(w http.ResponseWriter, r
|
|||
return
|
||||
}
|
||||
|
||||
if _, ok := globalBucketObjectLockSys.Get(bucket); !ok {
|
||||
if rcfg, _ := globalBucketObjectLockSys.Get(bucket); !rcfg.LockEnabled {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidBucketObjectLockConfiguration), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
|
|
@ -34,10 +34,6 @@ import (
|
|||
"github.com/minio/minio/cmd/http"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/cmd/rest"
|
||||
bucketsse "github.com/minio/minio/pkg/bucket/encryption"
|
||||
"github.com/minio/minio/pkg/bucket/lifecycle"
|
||||
objectlock "github.com/minio/minio/pkg/bucket/object/lock"
|
||||
"github.com/minio/minio/pkg/bucket/policy"
|
||||
"github.com/minio/minio/pkg/event"
|
||||
"github.com/minio/minio/pkg/madmin"
|
||||
xnet "github.com/minio/minio/pkg/net"
|
||||
|
@ -471,11 +467,23 @@ func (client *peerRESTClient) DownloadProfileData() (data map[string][]byte, err
|
|||
return data, err
|
||||
}
|
||||
|
||||
// DeleteBucket - Delete notification and policies related to the bucket.
|
||||
func (client *peerRESTClient) DeleteBucket(bucket string) error {
|
||||
// LoadBucketMetadata - load bucket metadata
|
||||
func (client *peerRESTClient) LoadBucketMetadata(bucket string) error {
|
||||
values := make(url.Values)
|
||||
values.Set(peerRESTBucket, bucket)
|
||||
respBody, err := client.call(peerRESTMethodDeleteBucket, values, nil, -1)
|
||||
respBody, err := client.call(peerRESTMethodLoadBucketMetadata, values, nil, -1)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer http.DrainBody(respBody)
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeleteBucketMetadata - Delete bucket metadata
|
||||
func (client *peerRESTClient) DeleteBucketMetadata(bucket string) error {
|
||||
values := make(url.Values)
|
||||
values.Set(peerRESTBucket, bucket)
|
||||
respBody, err := client.call(peerRESTMethodDeleteBucketMetadata, values, nil, -1)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -500,30 +508,6 @@ func (client *peerRESTClient) ReloadFormat(dryRun bool) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// RemoveBucketPolicy - Remove bucket policy on the peer node.
|
||||
func (client *peerRESTClient) RemoveBucketPolicy(bucket string) error {
|
||||
values := make(url.Values)
|
||||
values.Set(peerRESTBucket, bucket)
|
||||
respBody, err := client.call(peerRESTMethodBucketPolicyRemove, values, nil, -1)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer http.DrainBody(respBody)
|
||||
return nil
|
||||
}
|
||||
|
||||
// RemoveBucketObjectLockConfig - Remove bucket object lock config on the peer node.
|
||||
func (client *peerRESTClient) RemoveBucketObjectLockConfig(bucket string) error {
|
||||
values := make(url.Values)
|
||||
values.Set(peerRESTBucket, bucket)
|
||||
respBody, err := client.call(peerRESTMethodBucketObjectLockConfigRemove, values, nil, -1)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer http.DrainBody(respBody)
|
||||
return nil
|
||||
}
|
||||
|
||||
// cycleServerBloomFilter will cycle the bloom filter to start recording to index y if not already.
|
||||
// The response will contain a bloom filter starting at index x up to, but not including index y.
|
||||
// If y is 0, the response will not update y, but return the currently recorded information
|
||||
|
@ -543,156 +527,6 @@ func (client *peerRESTClient) cycleServerBloomFilter(ctx context.Context, req bl
|
|||
return &resp, gob.NewDecoder(respBody).Decode(&resp)
|
||||
}
|
||||
|
||||
// RemoveBucketQuotaConfig - Remove bucket quota config on the peer node.
|
||||
func (client *peerRESTClient) RemoveBucketQuotaConfig(bucket string) error {
|
||||
values := make(url.Values)
|
||||
values.Set(peerRESTBucket, bucket)
|
||||
respBody, err := client.call(peerRESTMethodBucketQuotaConfigRemove, values, nil, -1)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer http.DrainBody(respBody)
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetBucketPolicy - Set bucket policy on the peer node.
|
||||
func (client *peerRESTClient) SetBucketPolicy(bucket string, bucketPolicy *policy.Policy) error {
|
||||
values := make(url.Values)
|
||||
values.Set(peerRESTBucket, bucket)
|
||||
|
||||
var reader bytes.Buffer
|
||||
err := gob.NewEncoder(&reader).Encode(bucketPolicy)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
respBody, err := client.call(peerRESTMethodBucketPolicySet, values, &reader, -1)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer http.DrainBody(respBody)
|
||||
return nil
|
||||
}
|
||||
|
||||
// RemoveBucketLifecycle - Remove bucket lifecycle configuration on the peer node
|
||||
func (client *peerRESTClient) RemoveBucketLifecycle(bucket string) error {
|
||||
values := make(url.Values)
|
||||
values.Set(peerRESTBucket, bucket)
|
||||
respBody, err := client.call(peerRESTMethodBucketLifecycleRemove, values, nil, -1)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer http.DrainBody(respBody)
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetBucketLifecycle - Set bucket lifecycle configuration on the peer node
|
||||
func (client *peerRESTClient) SetBucketLifecycle(bucket string, bucketLifecycle *lifecycle.Lifecycle) error {
|
||||
values := make(url.Values)
|
||||
values.Set(peerRESTBucket, bucket)
|
||||
|
||||
var reader bytes.Buffer
|
||||
err := gob.NewEncoder(&reader).Encode(bucketLifecycle)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
respBody, err := client.call(peerRESTMethodBucketLifecycleSet, values, &reader, -1)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer http.DrainBody(respBody)
|
||||
return nil
|
||||
}
|
||||
|
||||
// RemoveBucketSSEConfig - Remove bucket encryption configuration on the peer node
|
||||
func (client *peerRESTClient) RemoveBucketSSEConfig(bucket string) error {
|
||||
values := make(url.Values)
|
||||
values.Set(peerRESTBucket, bucket)
|
||||
respBody, err := client.call(peerRESTMethodBucketEncryptionRemove, values, nil, -1)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer http.DrainBody(respBody)
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetBucketSSEConfig - Set bucket encryption configuration on the peer node
|
||||
func (client *peerRESTClient) SetBucketSSEConfig(bucket string, encConfig *bucketsse.BucketSSEConfig) error {
|
||||
values := make(url.Values)
|
||||
values.Set(peerRESTBucket, bucket)
|
||||
|
||||
var reader bytes.Buffer
|
||||
err := gob.NewEncoder(&reader).Encode(encConfig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
respBody, err := client.call(peerRESTMethodBucketEncryptionSet, values, &reader, -1)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer http.DrainBody(respBody)
|
||||
return nil
|
||||
}
|
||||
|
||||
// PutBucketNotification - Put bucket notification on the peer node.
|
||||
func (client *peerRESTClient) PutBucketNotification(bucket string, rulesMap event.RulesMap) error {
|
||||
values := make(url.Values)
|
||||
values.Set(peerRESTBucket, bucket)
|
||||
|
||||
var reader bytes.Buffer
|
||||
err := gob.NewEncoder(&reader).Encode(&rulesMap)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
respBody, err := client.call(peerRESTMethodBucketNotificationPut, values, &reader, -1)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer http.DrainBody(respBody)
|
||||
return nil
|
||||
}
|
||||
|
||||
// PutBucketObjectLockConfig - PUT bucket object lock configuration.
|
||||
func (client *peerRESTClient) PutBucketObjectLockConfig(bucket string, retention *objectlock.Retention) error {
|
||||
values := make(url.Values)
|
||||
values.Set(peerRESTBucket, bucket)
|
||||
|
||||
var reader bytes.Buffer
|
||||
err := gob.NewEncoder(&reader).Encode(retention)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
respBody, err := client.call(peerRESTMethodPutBucketObjectLockConfig, values, &reader, -1)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer http.DrainBody(respBody)
|
||||
return nil
|
||||
}
|
||||
|
||||
// PutBucketQuotaConfig - PUT bucket quota configuration.
|
||||
func (client *peerRESTClient) PutBucketQuotaConfig(bucket string, q madmin.BucketQuota) error {
|
||||
values := make(url.Values)
|
||||
values.Set(peerRESTBucket, bucket)
|
||||
|
||||
var reader bytes.Buffer
|
||||
err := gob.NewEncoder(&reader).Encode(&q)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
respBody, err := client.call(peerRESTMethodPutBucketQuotaConfig, values, &reader, -1)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer http.DrainBody(respBody)
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeletePolicy - delete a specific canned policy.
|
||||
func (client *peerRESTClient) DeletePolicy(policyName string) (err error) {
|
||||
values := make(url.Values)
|
||||
|
|
|
@ -17,53 +17,43 @@
|
|||
package cmd
|
||||
|
||||
const (
|
||||
peerRESTVersion = "v8"
|
||||
peerRESTVersion = "v9"
|
||||
peerRESTVersionPrefix = SlashSeparator + peerRESTVersion
|
||||
peerRESTPrefix = minioReservedBucketPath + "/peer"
|
||||
peerRESTPath = peerRESTPrefix + peerRESTVersionPrefix
|
||||
)
|
||||
|
||||
const (
|
||||
peerRESTMethodServerInfo = "/serverinfo"
|
||||
peerRESTMethodDriveOBDInfo = "/driveobdinfo"
|
||||
peerRESTMethodNetOBDInfo = "/netobdinfo"
|
||||
peerRESTMethodCPUOBDInfo = "/cpuobdinfo"
|
||||
peerRESTMethodDiskHwOBDInfo = "/diskhwobdinfo"
|
||||
peerRESTMethodOsInfoOBDInfo = "/osinfoobdinfo"
|
||||
peerRESTMethodMemOBDInfo = "/memobdinfo"
|
||||
peerRESTMethodProcOBDInfo = "/procobdinfo"
|
||||
peerRESTMethodDispatchNetOBDInfo = "/dispatchnetobdinfo"
|
||||
peerRESTMethodDeleteBucket = "/deletebucket"
|
||||
peerRESTMethodServerUpdate = "/serverupdate"
|
||||
peerRESTMethodSignalService = "/signalservice"
|
||||
peerRESTMethodBackgroundHealStatus = "/backgroundhealstatus"
|
||||
peerRESTMethodGetLocks = "/getlocks"
|
||||
peerRESTMethodBucketPolicyRemove = "/removebucketpolicy"
|
||||
peerRESTMethodLoadUser = "/loaduser"
|
||||
peerRESTMethodLoadServiceAccount = "/loadserviceaccount"
|
||||
peerRESTMethodDeleteUser = "/deleteuser"
|
||||
peerRESTMethodDeleteServiceAccount = "/deleteserviceaccount"
|
||||
peerRESTMethodLoadPolicy = "/loadpolicy"
|
||||
peerRESTMethodLoadPolicyMapping = "/loadpolicymapping"
|
||||
peerRESTMethodDeletePolicy = "/deletepolicy"
|
||||
peerRESTMethodLoadGroup = "/loadgroup"
|
||||
peerRESTMethodStartProfiling = "/startprofiling"
|
||||
peerRESTMethodDownloadProfilingData = "/downloadprofilingdata"
|
||||
peerRESTMethodBucketPolicySet = "/setbucketpolicy"
|
||||
peerRESTMethodBucketNotificationPut = "/putbucketnotification"
|
||||
peerRESTMethodReloadFormat = "/reloadformat"
|
||||
peerRESTMethodCycleBloom = "/cyclebloom"
|
||||
peerRESTMethodTrace = "/trace"
|
||||
peerRESTMethodListen = "/listen"
|
||||
peerRESTMethodLog = "/log"
|
||||
peerRESTMethodBucketLifecycleSet = "/setbucketlifecycle"
|
||||
peerRESTMethodBucketLifecycleRemove = "/removebucketlifecycle"
|
||||
peerRESTMethodBucketEncryptionSet = "/setbucketencryption"
|
||||
peerRESTMethodBucketEncryptionRemove = "/removebucketencryption"
|
||||
peerRESTMethodPutBucketObjectLockConfig = "/putbucketobjectlockconfig"
|
||||
peerRESTMethodBucketObjectLockConfigRemove = "/removebucketobjectlockconfig"
|
||||
peerRESTMethodPutBucketQuotaConfig = "/putbucketquotaconfig"
|
||||
peerRESTMethodBucketQuotaConfigRemove = "/removebucketquotaconfig"
|
||||
peerRESTMethodServerInfo = "/serverinfo"
|
||||
peerRESTMethodDriveOBDInfo = "/driveobdinfo"
|
||||
peerRESTMethodNetOBDInfo = "/netobdinfo"
|
||||
peerRESTMethodCPUOBDInfo = "/cpuobdinfo"
|
||||
peerRESTMethodDiskHwOBDInfo = "/diskhwobdinfo"
|
||||
peerRESTMethodOsInfoOBDInfo = "/osinfoobdinfo"
|
||||
peerRESTMethodMemOBDInfo = "/memobdinfo"
|
||||
peerRESTMethodProcOBDInfo = "/procobdinfo"
|
||||
peerRESTMethodDispatchNetOBDInfo = "/dispatchnetobdinfo"
|
||||
peerRESTMethodDeleteBucketMetadata = "/deletebucketmetadata"
|
||||
peerRESTMethodLoadBucketMetadata = "/loadbucketmetadata"
|
||||
peerRESTMethodServerUpdate = "/serverupdate"
|
||||
peerRESTMethodSignalService = "/signalservice"
|
||||
peerRESTMethodBackgroundHealStatus = "/backgroundhealstatus"
|
||||
peerRESTMethodGetLocks = "/getlocks"
|
||||
peerRESTMethodLoadUser = "/loaduser"
|
||||
peerRESTMethodLoadServiceAccount = "/loadserviceaccount"
|
||||
peerRESTMethodDeleteUser = "/deleteuser"
|
||||
peerRESTMethodDeleteServiceAccount = "/deleteserviceaccount"
|
||||
peerRESTMethodLoadPolicy = "/loadpolicy"
|
||||
peerRESTMethodLoadPolicyMapping = "/loadpolicymapping"
|
||||
peerRESTMethodDeletePolicy = "/deletepolicy"
|
||||
peerRESTMethodLoadGroup = "/loadgroup"
|
||||
peerRESTMethodStartProfiling = "/startprofiling"
|
||||
peerRESTMethodDownloadProfilingData = "/downloadprofilingdata"
|
||||
peerRESTMethodReloadFormat = "/reloadformat"
|
||||
peerRESTMethodCycleBloom = "/cyclebloom"
|
||||
peerRESTMethodTrace = "/trace"
|
||||
peerRESTMethodListen = "/listen"
|
||||
peerRESTMethodLog = "/log"
|
||||
)
|
||||
|
||||
const (
|
||||
|
|
|
@ -30,18 +30,13 @@ import (
|
|||
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
bucketsse "github.com/minio/minio/pkg/bucket/encryption"
|
||||
"github.com/minio/minio/pkg/bucket/lifecycle"
|
||||
objectlock "github.com/minio/minio/pkg/bucket/object/lock"
|
||||
"github.com/minio/minio/pkg/bucket/policy"
|
||||
"github.com/minio/minio/pkg/event"
|
||||
"github.com/minio/minio/pkg/madmin"
|
||||
trace "github.com/minio/minio/pkg/trace"
|
||||
)
|
||||
|
||||
// To abstract a node over network.
|
||||
type peerRESTServer struct {
|
||||
}
|
||||
type peerRESTServer struct{}
|
||||
|
||||
// GetLocksHandler - returns list of older lock from the server.
|
||||
func (s *peerRESTServer) GetLocksHandler(w http.ResponseWriter, r *http.Request) {
|
||||
|
@ -566,8 +561,8 @@ func (s *peerRESTServer) MemOBDInfoHandler(w http.ResponseWriter, r *http.Reques
|
|||
logger.LogIf(ctx, gob.NewEncoder(w).Encode(info))
|
||||
}
|
||||
|
||||
// DeleteBucketHandler - Delete notification and policies related to the bucket.
|
||||
func (s *peerRESTServer) DeleteBucketHandler(w http.ResponseWriter, r *http.Request) {
|
||||
// DeleteBucketMetadataHandler - Delete in memory bucket metadata
|
||||
func (s *peerRESTServer) DeleteBucketMetadataHandler(w http.ResponseWriter, r *http.Request) {
|
||||
if !s.IsValid(w, r) {
|
||||
s.writeErrorResponse(w, errors.New("Invalid request"))
|
||||
return
|
||||
|
@ -580,15 +575,41 @@ func (s *peerRESTServer) DeleteBucketHandler(w http.ResponseWriter, r *http.Requ
|
|||
return
|
||||
}
|
||||
|
||||
globalNotificationSys.RemoveNotification(bucketName)
|
||||
globalPolicySys.Remove(bucketName)
|
||||
globalBucketObjectLockSys.Remove(bucketName)
|
||||
globalBucketQuotaSys.Remove(bucketName)
|
||||
globalLifecycleSys.Remove(bucketName)
|
||||
|
||||
globalBucketMetadataSys.Remove(bucketName)
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
// LoadBucketMetadataHandler - reloads in memory bucket metadata
|
||||
func (s *peerRESTServer) LoadBucketMetadataHandler(w http.ResponseWriter, r *http.Request) {
|
||||
if !s.IsValid(w, r) {
|
||||
s.writeErrorResponse(w, errors.New("Invalid request"))
|
||||
return
|
||||
}
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucketName := vars[peerRESTBucket]
|
||||
if bucketName == "" {
|
||||
s.writeErrorResponse(w, errors.New("Bucket name is missing"))
|
||||
return
|
||||
}
|
||||
|
||||
objAPI := newObjectLayerWithoutSafeModeFn()
|
||||
if objAPI == nil {
|
||||
s.writeErrorResponse(w, errServerNotInitialized)
|
||||
return
|
||||
}
|
||||
|
||||
meta, err := loadBucketMetadata(r.Context(), objAPI, bucketName)
|
||||
if err != nil {
|
||||
if !errors.Is(err, errConfigNotFound) {
|
||||
s.writeErrorResponse(w, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
globalBucketMetadataSys.Set(bucketName, meta)
|
||||
}
|
||||
|
||||
// ReloadFormatHandler - Reload Format.
|
||||
func (s *peerRESTServer) ReloadFormatHandler(w http.ResponseWriter, r *http.Request) {
|
||||
if !s.IsValid(w, r) {
|
||||
|
@ -628,137 +649,6 @@ func (s *peerRESTServer) ReloadFormatHandler(w http.ResponseWriter, r *http.Requ
|
|||
w.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
// RemoveBucketPolicyHandler - Remove bucket policy.
|
||||
func (s *peerRESTServer) RemoveBucketPolicyHandler(w http.ResponseWriter, r *http.Request) {
|
||||
if !s.IsValid(w, r) {
|
||||
s.writeErrorResponse(w, errors.New("Invalid request"))
|
||||
return
|
||||
}
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucketName := vars[peerRESTBucket]
|
||||
if bucketName == "" {
|
||||
s.writeErrorResponse(w, errors.New("Bucket name is missing"))
|
||||
return
|
||||
}
|
||||
|
||||
globalPolicySys.Remove(bucketName)
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
// SetBucketPolicyHandler - Set bucket policy.
|
||||
func (s *peerRESTServer) SetBucketPolicyHandler(w http.ResponseWriter, r *http.Request) {
|
||||
if !s.IsValid(w, r) {
|
||||
s.writeErrorResponse(w, errors.New("Invalid request"))
|
||||
return
|
||||
}
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucketName := vars[peerRESTBucket]
|
||||
if bucketName == "" {
|
||||
s.writeErrorResponse(w, errors.New("Bucket name is missing"))
|
||||
return
|
||||
}
|
||||
|
||||
if r.ContentLength < 0 {
|
||||
s.writeErrorResponse(w, errInvalidArgument)
|
||||
return
|
||||
}
|
||||
|
||||
var policyData = &policy.Policy{}
|
||||
err := gob.NewDecoder(r.Body).Decode(policyData)
|
||||
if err != nil {
|
||||
s.writeErrorResponse(w, err)
|
||||
return
|
||||
}
|
||||
globalPolicySys.Set(bucketName, policyData)
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
// RemoveBucketLifecycleHandler - Remove bucket lifecycle.
|
||||
func (s *peerRESTServer) RemoveBucketLifecycleHandler(w http.ResponseWriter, r *http.Request) {
|
||||
if !s.IsValid(w, r) {
|
||||
s.writeErrorResponse(w, errors.New("Invalid request"))
|
||||
return
|
||||
}
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucketName := vars[peerRESTBucket]
|
||||
if bucketName == "" {
|
||||
s.writeErrorResponse(w, errors.New("Bucket name is missing"))
|
||||
return
|
||||
}
|
||||
|
||||
globalLifecycleSys.Remove(bucketName)
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
// SetBucketLifecycleHandler - Set bucket lifecycle.
|
||||
func (s *peerRESTServer) SetBucketLifecycleHandler(w http.ResponseWriter, r *http.Request) {
|
||||
vars := mux.Vars(r)
|
||||
bucketName := vars[peerRESTBucket]
|
||||
if bucketName == "" {
|
||||
s.writeErrorResponse(w, errors.New("Bucket name is missing"))
|
||||
return
|
||||
}
|
||||
if r.ContentLength < 0 {
|
||||
s.writeErrorResponse(w, errInvalidArgument)
|
||||
return
|
||||
}
|
||||
|
||||
var lifecycleData = &lifecycle.Lifecycle{}
|
||||
err := gob.NewDecoder(r.Body).Decode(lifecycleData)
|
||||
if err != nil {
|
||||
s.writeErrorResponse(w, err)
|
||||
return
|
||||
}
|
||||
globalLifecycleSys.Set(bucketName, lifecycleData)
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
// RemoveBucketSSEConfigHandler - Remove bucket encryption.
|
||||
func (s *peerRESTServer) RemoveBucketSSEConfigHandler(w http.ResponseWriter, r *http.Request) {
|
||||
if !s.IsValid(w, r) {
|
||||
s.writeErrorResponse(w, errors.New("Invalid request"))
|
||||
return
|
||||
}
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucketName := vars[peerRESTBucket]
|
||||
if bucketName == "" {
|
||||
s.writeErrorResponse(w, errors.New("Bucket name is missing"))
|
||||
return
|
||||
}
|
||||
|
||||
globalBucketSSEConfigSys.Remove(bucketName)
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
// SetBucketSSEConfigHandler - Set bucket encryption.
|
||||
func (s *peerRESTServer) SetBucketSSEConfigHandler(w http.ResponseWriter, r *http.Request) {
|
||||
vars := mux.Vars(r)
|
||||
bucketName := vars[peerRESTBucket]
|
||||
if bucketName == "" {
|
||||
s.writeErrorResponse(w, errors.New("Bucket name is missing"))
|
||||
return
|
||||
}
|
||||
|
||||
if r.ContentLength < 0 {
|
||||
s.writeErrorResponse(w, errInvalidArgument)
|
||||
return
|
||||
}
|
||||
|
||||
var encConfig = &bucketsse.BucketSSEConfig{}
|
||||
err := gob.NewDecoder(r.Body).Decode(encConfig)
|
||||
if err != nil {
|
||||
s.writeErrorResponse(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
globalBucketSSEConfigSys.Set(bucketName, encConfig)
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
// CycleServerBloomFilterHandler cycles bllom filter on server.
|
||||
func (s *peerRESTServer) CycleServerBloomFilterHandler(w http.ResponseWriter, r *http.Request) {
|
||||
if !s.IsValid(w, r) {
|
||||
|
@ -813,102 +703,6 @@ func (s *peerRESTServer) PutBucketNotificationHandler(w http.ResponseWriter, r *
|
|||
w.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
// RemoveBucketObjectLockConfigHandler - handles DELETE bucket object lock configuration.
|
||||
func (s *peerRESTServer) RemoveBucketObjectLockConfigHandler(w http.ResponseWriter, r *http.Request) {
|
||||
if !s.IsValid(w, r) {
|
||||
s.writeErrorResponse(w, errors.New("Invalid request"))
|
||||
return
|
||||
}
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucketName := vars[peerRESTBucket]
|
||||
if bucketName == "" {
|
||||
s.writeErrorResponse(w, errors.New("Bucket name is missing"))
|
||||
return
|
||||
}
|
||||
|
||||
globalBucketObjectLockSys.Remove(bucketName)
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
// PutBucketObjectLockConfigHandler - handles PUT bucket object lock configuration.
|
||||
func (s *peerRESTServer) PutBucketObjectLockConfigHandler(w http.ResponseWriter, r *http.Request) {
|
||||
if !s.IsValid(w, r) {
|
||||
s.writeErrorResponse(w, errors.New("Invalid request"))
|
||||
return
|
||||
}
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucketName := vars[peerRESTBucket]
|
||||
if bucketName == "" {
|
||||
s.writeErrorResponse(w, errors.New("Bucket name is missing"))
|
||||
return
|
||||
}
|
||||
|
||||
if r.ContentLength < 0 {
|
||||
s.writeErrorResponse(w, errInvalidArgument)
|
||||
return
|
||||
}
|
||||
|
||||
var retention = &objectlock.Retention{}
|
||||
err := gob.NewDecoder(r.Body).Decode(retention)
|
||||
if err != nil {
|
||||
s.writeErrorResponse(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
globalBucketObjectLockSys.Set(bucketName, retention)
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
// PutBucketQuotaConfigHandler - handles PUT bucket quota configuration.
|
||||
func (s *peerRESTServer) PutBucketQuotaConfigHandler(w http.ResponseWriter, r *http.Request) {
|
||||
if !s.IsValid(w, r) {
|
||||
s.writeErrorResponse(w, errors.New("Invalid request"))
|
||||
return
|
||||
}
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucketName := vars[peerRESTBucket]
|
||||
if bucketName == "" {
|
||||
s.writeErrorResponse(w, errors.New("Bucket name is missing"))
|
||||
return
|
||||
}
|
||||
|
||||
var quota madmin.BucketQuota
|
||||
if r.ContentLength < 0 {
|
||||
s.writeErrorResponse(w, errInvalidArgument)
|
||||
return
|
||||
}
|
||||
|
||||
err := gob.NewDecoder(r.Body).Decode("a)
|
||||
if err != nil {
|
||||
s.writeErrorResponse(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
globalBucketQuotaSys.Set(bucketName, quota)
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
// RemoveBucketQuotaConfigHandler - handles DELETE bucket quota configuration.
|
||||
func (s *peerRESTServer) RemoveBucketQuotaConfigHandler(w http.ResponseWriter, r *http.Request) {
|
||||
if !s.IsValid(w, r) {
|
||||
s.writeErrorResponse(w, errors.New("Invalid request"))
|
||||
return
|
||||
}
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucketName := vars[peerRESTBucket]
|
||||
if bucketName == "" {
|
||||
s.writeErrorResponse(w, errors.New("Bucket name is missing"))
|
||||
return
|
||||
}
|
||||
|
||||
globalBucketQuotaSys.Remove(bucketName)
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
// ServerUpdateHandler - updates the current server.
|
||||
func (s *peerRESTServer) ServerUpdateHandler(w http.ResponseWriter, r *http.Request) {
|
||||
if !s.IsValid(w, r) {
|
||||
|
@ -1183,13 +977,11 @@ func registerPeerRESTHandlers(router *mux.Router) {
|
|||
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodNetOBDInfo).HandlerFunc(httpTraceHdrs(server.NetOBDInfoHandler))
|
||||
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodDispatchNetOBDInfo).HandlerFunc(httpTraceHdrs(server.DispatchNetOBDInfoHandler))
|
||||
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodCycleBloom).HandlerFunc(httpTraceHdrs(server.CycleServerBloomFilterHandler))
|
||||
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodDeleteBucket).HandlerFunc(httpTraceHdrs(server.DeleteBucketHandler)).Queries(restQueries(peerRESTBucket)...)
|
||||
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodDeleteBucketMetadata).HandlerFunc(httpTraceHdrs(server.DeleteBucketMetadataHandler)).Queries(restQueries(peerRESTBucket)...)
|
||||
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodLoadBucketMetadata).HandlerFunc(httpTraceHdrs(server.LoadBucketMetadataHandler)).Queries(restQueries(peerRESTBucket)...)
|
||||
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodSignalService).HandlerFunc(httpTraceHdrs(server.SignalServiceHandler)).Queries(restQueries(peerRESTSignal)...)
|
||||
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodServerUpdate).HandlerFunc(httpTraceHdrs(server.ServerUpdateHandler)).Queries(restQueries(peerRESTUpdateURL, peerRESTSha256Hex, peerRESTLatestRelease)...)
|
||||
|
||||
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodBucketPolicyRemove).HandlerFunc(httpTraceAll(server.RemoveBucketPolicyHandler)).Queries(restQueries(peerRESTBucket)...)
|
||||
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodBucketPolicySet).HandlerFunc(httpTraceHdrs(server.SetBucketPolicyHandler)).Queries(restQueries(peerRESTBucket)...)
|
||||
|
||||
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodDeletePolicy).HandlerFunc(httpTraceAll(server.DeletePolicyHandler)).Queries(restQueries(peerRESTPolicy)...)
|
||||
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodLoadPolicy).HandlerFunc(httpTraceAll(server.LoadPolicyHandler)).Queries(restQueries(peerRESTPolicy)...)
|
||||
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodLoadPolicyMapping).HandlerFunc(httpTraceAll(server.LoadPolicyMappingHandler)).Queries(restQueries(peerRESTUserOrGroup)...)
|
||||
|
@ -1201,20 +993,9 @@ func registerPeerRESTHandlers(router *mux.Router) {
|
|||
|
||||
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodStartProfiling).HandlerFunc(httpTraceAll(server.StartProfilingHandler)).Queries(restQueries(peerRESTProfiler)...)
|
||||
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodDownloadProfilingData).HandlerFunc(httpTraceHdrs(server.DownloadProfilingDataHandler))
|
||||
|
||||
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodBucketNotificationPut).HandlerFunc(httpTraceHdrs(server.PutBucketNotificationHandler)).Queries(restQueries(peerRESTBucket)...)
|
||||
|
||||
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodReloadFormat).HandlerFunc(httpTraceHdrs(server.ReloadFormatHandler)).Queries(restQueries(peerRESTDryRun)...)
|
||||
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodBucketLifecycleSet).HandlerFunc(httpTraceHdrs(server.SetBucketLifecycleHandler)).Queries(restQueries(peerRESTBucket)...)
|
||||
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodBucketLifecycleRemove).HandlerFunc(httpTraceHdrs(server.RemoveBucketLifecycleHandler)).Queries(restQueries(peerRESTBucket)...)
|
||||
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodBucketEncryptionSet).HandlerFunc(httpTraceHdrs(server.SetBucketSSEConfigHandler)).Queries(restQueries(peerRESTBucket)...)
|
||||
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodBucketEncryptionRemove).HandlerFunc(httpTraceHdrs(server.RemoveBucketSSEConfigHandler)).Queries(restQueries(peerRESTBucket)...)
|
||||
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodTrace).HandlerFunc(server.TraceHandler)
|
||||
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodListen).HandlerFunc(httpTraceHdrs(server.ListenHandler))
|
||||
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodBackgroundHealStatus).HandlerFunc(server.BackgroundHealStatusHandler)
|
||||
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodLog).HandlerFunc(server.ConsoleLogHandler)
|
||||
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodPutBucketObjectLockConfig).HandlerFunc(httpTraceHdrs(server.PutBucketObjectLockConfigHandler)).Queries(restQueries(peerRESTBucket)...)
|
||||
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodBucketObjectLockConfigRemove).HandlerFunc(httpTraceHdrs(server.RemoveBucketObjectLockConfigHandler)).Queries(restQueries(peerRESTBucket)...)
|
||||
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodPutBucketQuotaConfig).HandlerFunc(httpTraceHdrs(server.PutBucketQuotaConfigHandler)).Queries(restQueries(peerRESTBucket)...)
|
||||
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodBucketQuotaConfigRemove).HandlerFunc(httpTraceHdrs(server.RemoveBucketQuotaConfigHandler)).Queries(restQueries(peerRESTBucket)...)
|
||||
}
|
||||
|
|
|
@ -26,129 +26,9 @@ import (
|
|||
"github.com/minio/minio/pkg/bucket/policy/condition"
|
||||
)
|
||||
|
||||
func TestPolicySysSet(t *testing.T) {
|
||||
case1PolicySys := NewPolicySys()
|
||||
case1Policy := policy.Policy{
|
||||
Version: policy.DefaultVersion,
|
||||
Statements: []policy.Statement{
|
||||
policy.NewStatement(
|
||||
policy.Allow,
|
||||
policy.NewPrincipal("*"),
|
||||
policy.NewActionSet(policy.PutObjectAction),
|
||||
policy.NewResourceSet(policy.NewResource("mybucket", "/myobject*")),
|
||||
condition.NewFunctions(),
|
||||
),
|
||||
},
|
||||
}
|
||||
case1Result := NewPolicySys()
|
||||
case1Result.bucketPolicyMap["mybucket"] = &case1Policy
|
||||
|
||||
case2PolicySys := NewPolicySys()
|
||||
case2PolicySys.bucketPolicyMap["mybucket"] = &case1Policy
|
||||
case2Policy := policy.Policy{
|
||||
Version: policy.DefaultVersion,
|
||||
Statements: []policy.Statement{
|
||||
policy.NewStatement(
|
||||
policy.Allow,
|
||||
policy.NewPrincipal("*"),
|
||||
policy.NewActionSet(policy.GetObjectAction),
|
||||
policy.NewResourceSet(policy.NewResource("mybucket", "/myobject*")),
|
||||
condition.NewFunctions(),
|
||||
),
|
||||
},
|
||||
}
|
||||
case2Result := NewPolicySys()
|
||||
case2Result.bucketPolicyMap["mybucket"] = &case2Policy
|
||||
|
||||
case3PolicySys := NewPolicySys()
|
||||
case3PolicySys.bucketPolicyMap["mybucket"] = &case2Policy
|
||||
case3Policy := policy.Policy{
|
||||
ID: "MyPolicyForMyBucket",
|
||||
Version: policy.DefaultVersion,
|
||||
}
|
||||
case3Result := NewPolicySys()
|
||||
|
||||
testCases := []struct {
|
||||
policySys *PolicySys
|
||||
bucketName string
|
||||
bucketPolicy policy.Policy
|
||||
expectedResult *PolicySys
|
||||
}{
|
||||
{case1PolicySys, "mybucket", case1Policy, case1Result},
|
||||
{case2PolicySys, "mybucket", case2Policy, case2Result},
|
||||
{case3PolicySys, "mybucket", case3Policy, case3Result},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
result := testCase.policySys
|
||||
result.Set(testCase.bucketName, &testCase.bucketPolicy)
|
||||
|
||||
if !reflect.DeepEqual(result, testCase.expectedResult) {
|
||||
t.Fatalf("case %v: expected: %v, got: %v\n", i+1, testCase.expectedResult, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestPolicySysRemove(t *testing.T) {
|
||||
case1Policy := policy.Policy{
|
||||
Version: policy.DefaultVersion,
|
||||
Statements: []policy.Statement{
|
||||
policy.NewStatement(
|
||||
policy.Allow,
|
||||
policy.NewPrincipal("*"),
|
||||
policy.NewActionSet(policy.PutObjectAction),
|
||||
policy.NewResourceSet(policy.NewResource("mybucket", "/myobject*")),
|
||||
condition.NewFunctions(),
|
||||
),
|
||||
},
|
||||
}
|
||||
case1PolicySys := NewPolicySys()
|
||||
case1PolicySys.bucketPolicyMap["mybucket"] = &case1Policy
|
||||
case1Result := NewPolicySys()
|
||||
|
||||
case2Policy := policy.Policy{
|
||||
Version: policy.DefaultVersion,
|
||||
Statements: []policy.Statement{
|
||||
policy.NewStatement(
|
||||
policy.Allow,
|
||||
policy.NewPrincipal("*"),
|
||||
policy.NewActionSet(policy.GetObjectAction),
|
||||
policy.NewResourceSet(policy.NewResource("mybucket", "/myobject*")),
|
||||
condition.NewFunctions(),
|
||||
),
|
||||
},
|
||||
}
|
||||
case2PolicySys := NewPolicySys()
|
||||
case2PolicySys.bucketPolicyMap["mybucket"] = &case2Policy
|
||||
case2Result := NewPolicySys()
|
||||
case2Result.bucketPolicyMap["mybucket"] = &case2Policy
|
||||
|
||||
case3PolicySys := NewPolicySys()
|
||||
case3Result := NewPolicySys()
|
||||
|
||||
testCases := []struct {
|
||||
policySys *PolicySys
|
||||
bucketName string
|
||||
expectedResult *PolicySys
|
||||
}{
|
||||
{case1PolicySys, "mybucket", case1Result},
|
||||
{case2PolicySys, "yourbucket", case2Result},
|
||||
{case3PolicySys, "mybucket", case3Result},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
result := testCase.policySys
|
||||
result.Remove(testCase.bucketName)
|
||||
|
||||
if !reflect.DeepEqual(result, testCase.expectedResult) {
|
||||
t.Fatalf("case %v: expected: %v, got: %v\n", i+1, testCase.expectedResult, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestPolicySysIsAllowed(t *testing.T) {
|
||||
policySys := NewPolicySys()
|
||||
policySys.Set("mybucket", &policy.Policy{
|
||||
policySys.bucketPolicyMap["mybucket"] = &policy.Policy{
|
||||
Version: policy.DefaultVersion,
|
||||
Statements: []policy.Statement{
|
||||
policy.NewStatement(
|
||||
|
@ -166,7 +46,7 @@ func TestPolicySysIsAllowed(t *testing.T) {
|
|||
condition.NewFunctions(),
|
||||
),
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
anonGetBucketLocationArgs := policy.Args{
|
||||
AccountName: "Q3AM3UQ867SPQQA43P2F",
|
||||
|
|
|
@ -141,6 +141,9 @@ func newAllSubsystems() {
|
|||
// Create new notification system and initialize notification targets
|
||||
globalNotificationSys = NewNotificationSys(globalEndpoints)
|
||||
|
||||
// Create new bucket metadata system.
|
||||
globalBucketMetadataSys = NewBucketMetadataSys()
|
||||
|
||||
// Create a new config system.
|
||||
globalConfigSys = NewConfigSys()
|
||||
|
||||
|
@ -322,6 +325,11 @@ func initAllSubsystems(newObject ObjectLayer) (err error) {
|
|||
return fmt.Errorf("Unable to initialize IAM system: %w", err)
|
||||
}
|
||||
|
||||
// Initialize bucket metadata sub-system.
|
||||
if err = globalBucketMetadataSys.Init(GlobalContext, buckets, newObject); err != nil {
|
||||
return fmt.Errorf("Unable to initialize bucket metadata sub-system: %w", err)
|
||||
}
|
||||
|
||||
// Initialize notification system.
|
||||
if err = globalNotificationSys.Init(buckets, newObject); err != nil {
|
||||
return fmt.Errorf("Unable to initialize notification system: %w", err)
|
||||
|
|
|
@ -174,33 +174,7 @@ func prepareFS() (ObjectLayer, string, error) {
|
|||
}
|
||||
|
||||
func prepareXLSets32(ctx context.Context) (ObjectLayer, []string, error) {
|
||||
fsDirs1, err := getRandomDisks(16)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
endpoints1 := mustGetNewEndpoints(fsDirs1...)
|
||||
fsDirs2, err := getRandomDisks(16)
|
||||
if err != nil {
|
||||
removeRoots(fsDirs1)
|
||||
return nil, nil, err
|
||||
}
|
||||
endpoints2 := mustGetNewEndpoints(fsDirs2...)
|
||||
|
||||
endpoints := append(endpoints1, endpoints2...)
|
||||
fsDirs := append(fsDirs1, fsDirs2...)
|
||||
storageDisks, format, err := waitForFormatXL(true, endpoints, 1, 2, 16, "")
|
||||
if err != nil {
|
||||
removeRoots(fsDirs)
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
objAPI, err := newXLSets(ctx, endpoints, storageDisks, format)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
return objAPI, fsDirs, nil
|
||||
return prepareXL(ctx, 32)
|
||||
}
|
||||
|
||||
func prepareXL(ctx context.Context, nDisks int) (ObjectLayer, []string, error) {
|
||||
|
@ -1580,16 +1554,9 @@ func newTestObjectLayer(ctx context.Context, endpointZones EndpointZones) (newOb
|
|||
return nil, err
|
||||
}
|
||||
|
||||
globalConfigSys = NewConfigSys()
|
||||
newAllSubsystems()
|
||||
|
||||
globalIAMSys = NewIAMSys()
|
||||
globalIAMSys.Init(GlobalContext, z)
|
||||
|
||||
globalPolicySys = NewPolicySys()
|
||||
globalPolicySys.Init(nil, z)
|
||||
|
||||
globalNotificationSys = NewNotificationSys(endpointZones)
|
||||
globalNotificationSys.Init(nil, z)
|
||||
initAllSubsystems(z)
|
||||
|
||||
return z, nil
|
||||
}
|
||||
|
@ -1633,6 +1600,10 @@ func removeDiskN(disks []string, n int) {
|
|||
// initialies the root and returns its path.
|
||||
// return credentials.
|
||||
func initAPIHandlerTest(obj ObjectLayer, endpoints []string) (string, http.Handler, error) {
|
||||
newAllSubsystems()
|
||||
|
||||
initAllSubsystems(obj)
|
||||
|
||||
// get random bucket name.
|
||||
bucketName := getRandomBucketName()
|
||||
|
||||
|
@ -1682,8 +1653,6 @@ func prepareTestBackend(ctx context.Context, instanceType string) (ObjectLayer,
|
|||
// policyFunc - function to return bucketPolicy statement which would permit the anonymous request to be served.
|
||||
// The test works in 2 steps, here is the description of the steps.
|
||||
// STEP 1: Call the handler with the unsigned HTTP request (anonReq), assert for the `ErrAccessDenied` error response.
|
||||
// STEP 2: Set the policy to allow the unsigned request, use the policyFunc to obtain the relevant statement and call
|
||||
// the handler again to verify its success.
|
||||
func ExecObjectLayerAPIAnonTest(t *testing.T, obj ObjectLayer, testName, bucketName, objectName, instanceType string, apiRouter http.Handler,
|
||||
anonReq *http.Request, bucketPolicy *policy.Policy) {
|
||||
|
||||
|
@ -1708,7 +1677,6 @@ func ExecObjectLayerAPIAnonTest(t *testing.T, obj ObjectLayer, testName, bucketN
|
|||
// creating 2 read closer (to set as request body) from the body content.
|
||||
readerOne := ioutil.NopCloser(bytes.NewBuffer(buf))
|
||||
readerTwo := ioutil.NopCloser(bytes.NewBuffer(buf))
|
||||
readerThree := ioutil.NopCloser(bytes.NewBuffer(buf))
|
||||
|
||||
anonReq.Body = readerOne
|
||||
|
||||
|
@ -1744,39 +1712,8 @@ func ExecObjectLayerAPIAnonTest(t *testing.T, obj ObjectLayer, testName, bucketN
|
|||
}
|
||||
}
|
||||
|
||||
if err := obj.SetBucketPolicy(context.Background(), bucketName, bucketPolicy); err != nil {
|
||||
t.Fatalf("unexpected error. %v", err)
|
||||
}
|
||||
globalPolicySys.Set(bucketName, bucketPolicy)
|
||||
defer globalPolicySys.Remove(bucketName)
|
||||
|
||||
// now call the handler again with the unsigned/anonymous request, it should be accepted.
|
||||
rec = httptest.NewRecorder()
|
||||
|
||||
anonReq.Body = readerTwo
|
||||
|
||||
apiRouter.ServeHTTP(rec, anonReq)
|
||||
|
||||
var expectedHTTPStatus int
|
||||
// expectedHTTPStatus returns 204 (http.StatusNoContent) on success.
|
||||
if testName == "TestAPIDeleteObjectHandler" || testName == "TestAPIAbortMultipartHandler" {
|
||||
expectedHTTPStatus = http.StatusNoContent
|
||||
} else if strings.Contains(testName, "BucketPolicyHandler") || testName == "ListBucketsHandler" {
|
||||
// BucketPolicyHandlers and `ListBucketsHandler` doesn't support anonymous request, policy changes should allow unsigned requests.
|
||||
expectedHTTPStatus = http.StatusForbidden
|
||||
} else {
|
||||
// other API handlers return 200OK on success.
|
||||
expectedHTTPStatus = http.StatusOK
|
||||
}
|
||||
|
||||
// compare the HTTP response status code with the expected one.
|
||||
if rec.Code != expectedHTTPStatus {
|
||||
t.Fatal(failTestStr(anonTestStr, fmt.Sprintf("Expected the anonymous HTTP request to be served after the policy changes\n,Expected response HTTP status code to be %d, got %d",
|
||||
expectedHTTPStatus, rec.Code)))
|
||||
}
|
||||
|
||||
// test for unknown auth case.
|
||||
anonReq.Body = readerThree
|
||||
anonReq.Body = readerTwo
|
||||
// Setting the `Authorization` header to a random value so that the signature falls into unknown auth case.
|
||||
anonReq.Header.Set("Authorization", "nothingElse")
|
||||
// initialize new response recorder.
|
||||
|
@ -1877,6 +1814,7 @@ func ExecObjectLayerAPITest(t *testing.T, objAPITest objAPITestType, endpoints [
|
|||
if err != nil {
|
||||
t.Fatalf("Initialization of object layer failed for single node setup: %s", err)
|
||||
}
|
||||
|
||||
bucketFS, fsAPIRouter, err := initAPIHandlerTest(objLayer, endpoints)
|
||||
if err != nil {
|
||||
t.Fatalf("Initialization of API handler tests failed: <ERROR> %s", err)
|
||||
|
@ -1888,17 +1826,6 @@ func ExecObjectLayerAPITest(t *testing.T, objAPITest objAPITestType, endpoints [
|
|||
t.Fatalf("Unable to initialize server config. %s", err)
|
||||
}
|
||||
|
||||
newAllSubsystems()
|
||||
|
||||
globalIAMSys.Init(GlobalContext, objLayer)
|
||||
|
||||
buckets, err := objLayer.ListBuckets(context.Background())
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to list buckets on backend %s", err)
|
||||
}
|
||||
|
||||
globalPolicySys.Init(buckets, objLayer)
|
||||
|
||||
credentials := globalActiveCred
|
||||
|
||||
// Executing the object layer tests for single node setup.
|
||||
|
@ -1908,6 +1835,7 @@ func ExecObjectLayerAPITest(t *testing.T, objAPITest objAPITestType, endpoints [
|
|||
if err != nil {
|
||||
t.Fatalf("Initialization of object layer failed for XL setup: %s", err)
|
||||
}
|
||||
|
||||
bucketXL, xlAPIRouter, err := initAPIHandlerTest(objLayer, endpoints)
|
||||
if err != nil {
|
||||
t.Fatalf("Initialzation of API handler tests failed: <ERROR> %s", err)
|
||||
|
@ -1942,33 +1870,28 @@ func ExecObjectLayerTest(t TestErrHandler, objTest objTestType) {
|
|||
t.Fatalf("Initialization of object layer failed for single node setup: %s", err)
|
||||
}
|
||||
|
||||
newAllSubsystems()
|
||||
|
||||
// initialize the server and obtain the credentials and root.
|
||||
// credentials are necessary to sign the HTTP request.
|
||||
if err = newTestConfig(globalMinioDefaultRegion, objLayer); err != nil {
|
||||
t.Fatal("Unexpected error", err)
|
||||
}
|
||||
|
||||
globalIAMSys = NewIAMSys()
|
||||
globalIAMSys.Init(ctx, objLayer)
|
||||
|
||||
buckets, err := objLayer.ListBuckets(context.Background())
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to list buckets on backend %s", err)
|
||||
}
|
||||
|
||||
globalPolicySys = NewPolicySys()
|
||||
globalPolicySys.Init(buckets, objLayer)
|
||||
|
||||
globalBucketSSEConfigSys = NewBucketSSEConfigSys()
|
||||
globalBucketSSEConfigSys.Init(buckets, objLayer)
|
||||
initAllSubsystems(objLayer)
|
||||
|
||||
// Executing the object layer tests for single node setup.
|
||||
objTest(objLayer, FSTestStr, t)
|
||||
|
||||
newAllSubsystems()
|
||||
|
||||
objLayer, fsDirs, err := prepareXLSets32(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("Initialization of object layer failed for XL setup: %s", err)
|
||||
}
|
||||
|
||||
initAllSubsystems(objLayer)
|
||||
|
||||
defer removeRoots(append(fsDirs, fsDir))
|
||||
// Executing the object layer tests for XL.
|
||||
objTest(objLayer, XLTestStr, t)
|
||||
|
|
|
@ -266,7 +266,7 @@ func (web *webAPIHandlers) DeleteBucket(r *http.Request, args *RemoveBucketArgs,
|
|||
}
|
||||
}
|
||||
|
||||
globalNotificationSys.DeleteBucket(ctx, args.BucketName)
|
||||
globalNotificationSys.DeleteBucketMetadata(ctx, args.BucketName)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -1034,8 +1034,8 @@ func (web *webAPIHandlers) Upload(w http.ResponseWriter, r *http.Request) {
|
|||
}
|
||||
|
||||
// Check if bucket encryption is enabled
|
||||
_, encEnabled := globalBucketSSEConfigSys.Get(bucket)
|
||||
if (globalAutoEncryption || encEnabled) && !crypto.SSEC.IsRequested(r.Header) {
|
||||
_, err = globalBucketSSEConfigSys.Get(bucket)
|
||||
if (globalAutoEncryption || err == nil) && !crypto.SSEC.IsRequested(r.Header) {
|
||||
r.Header.Add(crypto.SSEHeader, crypto.SSEAlgorithmAES256)
|
||||
}
|
||||
|
||||
|
@ -1655,12 +1655,11 @@ func (web *webAPIHandlers) GetBucketPolicy(r *http.Request, args *GetBucketPolic
|
|||
return toJSONError(ctx, err, args.BucketName)
|
||||
}
|
||||
} else {
|
||||
bucketPolicy, err := objectAPI.GetBucketPolicy(ctx, args.BucketName)
|
||||
bucketPolicy, err := globalPolicySys.Get(args.BucketName)
|
||||
if err != nil {
|
||||
if _, ok := err.(BucketPolicyNotFound); !ok {
|
||||
return toJSONError(ctx, err, args.BucketName)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
policyInfo, err = PolicyToBucketAccessPolicy(bucketPolicy)
|
||||
|
@ -1750,7 +1749,7 @@ func (web *webAPIHandlers) ListAllBucketPolicies(r *http.Request, args *ListAllB
|
|||
}
|
||||
}
|
||||
} else {
|
||||
bucketPolicy, err := objectAPI.GetBucketPolicy(ctx, args.BucketName)
|
||||
bucketPolicy, err := globalPolicySys.Get(args.BucketName)
|
||||
if err != nil {
|
||||
if _, ok := err.(BucketPolicyNotFound); !ok {
|
||||
return toJSONError(ctx, err, args.BucketName)
|
||||
|
@ -1874,7 +1873,7 @@ func (web *webAPIHandlers) SetBucketPolicy(r *http.Request, args *SetBucketPolic
|
|||
}
|
||||
|
||||
} else {
|
||||
bucketPolicy, err := objectAPI.GetBucketPolicy(ctx, args.BucketName)
|
||||
bucketPolicy, err := globalPolicySys.Get(args.BucketName)
|
||||
if err != nil {
|
||||
if _, ok := err.(BucketPolicyNotFound); !ok {
|
||||
return toJSONError(ctx, err, args.BucketName)
|
||||
|
@ -1888,11 +1887,10 @@ func (web *webAPIHandlers) SetBucketPolicy(r *http.Request, args *SetBucketPolic
|
|||
|
||||
policyInfo.Statements = miniogopolicy.SetPolicy(policyInfo.Statements, policyType, args.BucketName, args.Prefix)
|
||||
if len(policyInfo.Statements) == 0 {
|
||||
if err = objectAPI.DeleteBucketPolicy(ctx, args.BucketName); err != nil {
|
||||
if err = globalBucketMetadataSys.Update(args.BucketName, bucketPolicyConfig, nil); err != nil {
|
||||
return toJSONError(ctx, err, args.BucketName)
|
||||
}
|
||||
|
||||
globalPolicySys.Remove(args.BucketName)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -1902,13 +1900,15 @@ func (web *webAPIHandlers) SetBucketPolicy(r *http.Request, args *SetBucketPolic
|
|||
return toJSONError(ctx, err, args.BucketName)
|
||||
}
|
||||
|
||||
// Parse validate and save bucket policy.
|
||||
if err := objectAPI.SetBucketPolicy(ctx, args.BucketName, bucketPolicy); err != nil {
|
||||
configData, err := json.Marshal(bucketPolicy)
|
||||
if err != nil {
|
||||
return toJSONError(ctx, err, args.BucketName)
|
||||
}
|
||||
|
||||
globalPolicySys.Set(args.BucketName, bucketPolicy)
|
||||
globalNotificationSys.SetBucketPolicy(ctx, args.BucketName, bucketPolicy)
|
||||
// Parse validate and save bucket policy.
|
||||
if err = globalBucketMetadataSys.Update(args.BucketName, bucketPolicyConfig, configData); err != nil {
|
||||
return toJSONError(ctx, err, args.BucketName)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
|
|
|
@ -35,13 +35,8 @@ import (
|
|||
|
||||
jwtgo "github.com/dgrijalva/jwt-go"
|
||||
humanize "github.com/dustin/go-humanize"
|
||||
miniogopolicy "github.com/minio/minio-go/v6/pkg/policy"
|
||||
xjwt "github.com/minio/minio/cmd/jwt"
|
||||
"github.com/minio/minio/pkg/auth"
|
||||
"github.com/minio/minio/pkg/bucket/policy"
|
||||
"github.com/minio/minio/pkg/bucket/policy/condition"
|
||||
"github.com/minio/minio/pkg/hash"
|
||||
"github.com/minio/minio/pkg/madmin"
|
||||
)
|
||||
|
||||
// Implement a dummy flush writer.
|
||||
|
@ -533,30 +528,6 @@ func testListObjectsWebHandler(obj ObjectLayer, instanceType string, t TestErrHa
|
|||
if err == nil {
|
||||
t.Fatalf("Expected error `%s`", err)
|
||||
}
|
||||
|
||||
bucketPolicy := &policy.Policy{
|
||||
Version: policy.DefaultVersion,
|
||||
Statements: []policy.Statement{policy.NewStatement(
|
||||
policy.Allow,
|
||||
policy.NewPrincipal("*"),
|
||||
policy.NewActionSet(policy.ListBucketAction),
|
||||
policy.NewResourceSet(policy.NewResource(bucketName, "")),
|
||||
condition.NewFunctions(),
|
||||
)},
|
||||
}
|
||||
|
||||
if err = obj.SetBucketPolicy(context.Background(), bucketName, bucketPolicy); err != nil {
|
||||
t.Fatalf("unexpected error. %v", err)
|
||||
}
|
||||
globalPolicySys.Set(bucketName, bucketPolicy)
|
||||
defer globalPolicySys.Remove(bucketName)
|
||||
|
||||
// Unauthenticated ListObjects with READ bucket policy should succeed.
|
||||
reply, err = test("")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
verifyReply(reply)
|
||||
}
|
||||
|
||||
// Wrapper for calling RemoveObject Web Handler
|
||||
|
@ -687,69 +658,6 @@ func testGenerateAuthWebHandler(obj ObjectLayer, instanceType string, t TestErrH
|
|||
}
|
||||
}
|
||||
|
||||
// Wrapper for calling Set Auth Handler
|
||||
func TestWebHandlerSetAuth(t *testing.T) {
|
||||
ExecObjectLayerTest(t, testSetAuthWebHandler)
|
||||
}
|
||||
|
||||
// testSetAuthWebHandler - Test SetAuth web handler
|
||||
func testSetAuthWebHandler(obj ObjectLayer, instanceType string, t TestErrHandler) {
|
||||
// Register the API end points with XL/FS object layer.
|
||||
apiRouter := initTestWebRPCEndPoint(obj)
|
||||
credentials, err := auth.GetNewCredentials()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
globalIAMSys.SetUser(credentials.AccessKey, madmin.UserInfo{
|
||||
SecretKey: credentials.SecretKey,
|
||||
Status: madmin.AccountEnabled,
|
||||
})
|
||||
|
||||
rec := httptest.NewRecorder()
|
||||
authorization, err := getWebRPCToken(apiRouter, credentials.AccessKey, credentials.SecretKey)
|
||||
if err != nil {
|
||||
t.Fatal("Cannot authenticate")
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
currentAccessKey string
|
||||
currentSecretKey string
|
||||
newAccessKey string
|
||||
newSecretKey string
|
||||
success bool
|
||||
}{
|
||||
{"", "", "", "", false},
|
||||
{"1", "1", "1", "1", false},
|
||||
{credentials.AccessKey, credentials.SecretKey, "azerty", "bar", false},
|
||||
{credentials.AccessKey, credentials.SecretKey, "azerty", "foooooooooooooo", true},
|
||||
}
|
||||
|
||||
// Iterating over the test cases, calling the function under test and asserting the response.
|
||||
for i, testCase := range testCases {
|
||||
setAuthRequest := SetAuthArgs{CurrentAccessKey: testCase.currentAccessKey, CurrentSecretKey: testCase.currentSecretKey, NewAccessKey: testCase.newAccessKey, NewSecretKey: testCase.newSecretKey}
|
||||
setAuthReply := &SetAuthReply{}
|
||||
req, err := newTestWebRPCRequest("Web.SetAuth", authorization, setAuthRequest)
|
||||
if err != nil {
|
||||
t.Fatalf("Test %d: Failed to create HTTP request: <ERROR> %v", i+1, err)
|
||||
}
|
||||
apiRouter.ServeHTTP(rec, req)
|
||||
if rec.Code != http.StatusOK {
|
||||
t.Fatalf("Test %d: Expected the response status to be 200, but instead found `%d`", i+1, rec.Code)
|
||||
}
|
||||
err = getTestWebRPCResponse(rec, &setAuthReply)
|
||||
if testCase.success && err != nil {
|
||||
t.Fatalf("Test %d: Supposed to succeed but failed, %v", i+1, err)
|
||||
}
|
||||
if !testCase.success && err == nil {
|
||||
t.Fatalf("Test %d: Supposed to fail but succeeded, %v", i+1, err)
|
||||
}
|
||||
if testCase.success && setAuthReply.Token == "" {
|
||||
t.Fatalf("Test %d: Failed to set auth keys", i+1)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestWebCreateURLToken(t *testing.T) {
|
||||
ExecObjectLayerTest(t, testCreateURLToken)
|
||||
}
|
||||
|
@ -892,28 +800,6 @@ func testUploadWebHandler(obj ObjectLayer, instanceType string, t TestErrHandler
|
|||
t.Fatalf("Expected the response status to be 403, but instead found `%d`", code)
|
||||
}
|
||||
|
||||
bucketPolicy := &policy.Policy{
|
||||
Version: policy.DefaultVersion,
|
||||
Statements: []policy.Statement{policy.NewStatement(
|
||||
policy.Allow,
|
||||
policy.NewPrincipal("*"),
|
||||
policy.NewActionSet(policy.PutObjectAction),
|
||||
policy.NewResourceSet(policy.NewResource(bucketName, "*")),
|
||||
condition.NewFunctions(),
|
||||
)},
|
||||
}
|
||||
|
||||
if err := obj.SetBucketPolicy(context.Background(), bucketName, bucketPolicy); err != nil {
|
||||
t.Fatalf("unexpected error. %v", err)
|
||||
}
|
||||
globalPolicySys.Set(bucketName, bucketPolicy)
|
||||
defer globalPolicySys.Remove(bucketName)
|
||||
|
||||
// Unauthenticated upload with WRITE policy should succeed.
|
||||
code = test("", true)
|
||||
if code != http.StatusOK {
|
||||
t.Fatalf("Expected the response status to be 200, but instead found `%d`", code)
|
||||
}
|
||||
}
|
||||
|
||||
// Wrapper for calling Download Handler
|
||||
|
@ -1010,33 +896,6 @@ func testDownloadWebHandler(obj ObjectLayer, instanceType string, t TestErrHandl
|
|||
if code != http.StatusForbidden {
|
||||
t.Fatalf("Expected the response status to be 403, but instead found `%d`", code)
|
||||
}
|
||||
|
||||
bucketPolicy := &policy.Policy{
|
||||
Version: policy.DefaultVersion,
|
||||
Statements: []policy.Statement{policy.NewStatement(
|
||||
policy.Allow,
|
||||
policy.NewPrincipal("*"),
|
||||
policy.NewActionSet(policy.GetObjectAction),
|
||||
policy.NewResourceSet(policy.NewResource(bucketName, "*")),
|
||||
condition.NewFunctions(),
|
||||
)},
|
||||
}
|
||||
|
||||
if err := obj.SetBucketPolicy(context.Background(), bucketName, bucketPolicy); err != nil {
|
||||
t.Fatalf("unexpected error. %v", err)
|
||||
}
|
||||
globalPolicySys.Set(bucketName, bucketPolicy)
|
||||
defer globalPolicySys.Remove(bucketName)
|
||||
|
||||
// Unauthenticated download with READ policy should succeed.
|
||||
code, bodyContent = test("")
|
||||
if code != http.StatusOK {
|
||||
t.Fatalf("Expected the response status to be 200, but instead found `%d`", code)
|
||||
}
|
||||
|
||||
if !bytes.Equal(bodyContent, content) {
|
||||
t.Fatalf("The downloaded file is corrupted")
|
||||
}
|
||||
}
|
||||
|
||||
// Test web.DownloadZip
|
||||
|
@ -1229,251 +1088,6 @@ func testWebPresignedGetHandler(obj ObjectLayer, instanceType string, t TestErrH
|
|||
}
|
||||
}
|
||||
|
||||
// Wrapper for calling GetBucketPolicy Handler
|
||||
func TestWebHandlerGetBucketPolicyHandler(t *testing.T) {
|
||||
ExecObjectLayerTest(t, testWebGetBucketPolicyHandler)
|
||||
}
|
||||
|
||||
// testWebGetBucketPolicyHandler - Test GetBucketPolicy web handler
|
||||
func testWebGetBucketPolicyHandler(obj ObjectLayer, instanceType string, t TestErrHandler) {
|
||||
// Register the API end points with XL/FS object layer.
|
||||
apiRouter := initTestWebRPCEndPoint(obj)
|
||||
credentials := globalActiveCred
|
||||
|
||||
authorization, err := getWebRPCToken(apiRouter, credentials.AccessKey, credentials.SecretKey)
|
||||
if err != nil {
|
||||
t.Fatal("Cannot authenticate")
|
||||
}
|
||||
|
||||
rec := httptest.NewRecorder()
|
||||
|
||||
bucketName := getRandomBucketName()
|
||||
if err = obj.MakeBucketWithLocation(context.Background(), bucketName, "", false); err != nil {
|
||||
t.Fatal("Unexpected error: ", err)
|
||||
}
|
||||
|
||||
bucketPolicy := &policy.Policy{
|
||||
Version: policy.DefaultVersion,
|
||||
Statements: []policy.Statement{
|
||||
policy.NewStatement(
|
||||
policy.Allow,
|
||||
policy.NewPrincipal("*"),
|
||||
policy.NewActionSet(policy.GetBucketLocationAction, policy.ListBucketAction),
|
||||
policy.NewResourceSet(policy.NewResource(bucketName, "")),
|
||||
condition.NewFunctions(),
|
||||
),
|
||||
policy.NewStatement(
|
||||
policy.Allow,
|
||||
policy.NewPrincipal("*"),
|
||||
policy.NewActionSet(policy.GetObjectAction),
|
||||
policy.NewResourceSet(policy.NewResource(bucketName, "*")),
|
||||
condition.NewFunctions(),
|
||||
),
|
||||
},
|
||||
}
|
||||
|
||||
if err = savePolicyConfig(context.Background(), obj, bucketName, bucketPolicy); err != nil {
|
||||
t.Fatal("Unexpected error: ", err)
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
bucketName string
|
||||
prefix string
|
||||
expectedResult miniogopolicy.BucketPolicy
|
||||
}{
|
||||
{bucketName, "", miniogopolicy.BucketPolicyReadOnly},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
args := &GetBucketPolicyArgs{BucketName: testCase.bucketName, Prefix: testCase.prefix}
|
||||
reply := &GetBucketPolicyRep{}
|
||||
req, err := newTestWebRPCRequest("Web.GetBucketPolicy", authorization, args)
|
||||
if err != nil {
|
||||
t.Fatalf("Test %d: Failed to create HTTP request: <ERROR> %v", i+1, err)
|
||||
}
|
||||
apiRouter.ServeHTTP(rec, req)
|
||||
if rec.Code != http.StatusOK {
|
||||
t.Fatalf("Test %d: Expected the response status to be 200, but instead found `%d`", i+1, rec.Code)
|
||||
}
|
||||
if err = getTestWebRPCResponse(rec, &reply); err != nil {
|
||||
t.Fatalf("Test %d: Should succeed but it didn't, %v", i+1, err)
|
||||
}
|
||||
if testCase.expectedResult != reply.Policy {
|
||||
t.Fatalf("Test %d: expected: %v, got: %v", i+1, testCase.expectedResult, reply.Policy)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Wrapper for calling ListAllBucketPolicies Handler
|
||||
func TestWebHandlerListAllBucketPoliciesHandler(t *testing.T) {
|
||||
ExecObjectLayerTest(t, testWebListAllBucketPoliciesHandler)
|
||||
}
|
||||
|
||||
// testWebListAllBucketPoliciesHandler - Test ListAllBucketPolicies web handler
|
||||
func testWebListAllBucketPoliciesHandler(obj ObjectLayer, instanceType string, t TestErrHandler) {
|
||||
// Register the API end points with XL/FS object layer.
|
||||
apiRouter := initTestWebRPCEndPoint(obj)
|
||||
credentials := globalActiveCred
|
||||
|
||||
authorization, err := getWebRPCToken(apiRouter, credentials.AccessKey, credentials.SecretKey)
|
||||
if err != nil {
|
||||
t.Fatal("Cannot authenticate")
|
||||
}
|
||||
|
||||
rec := httptest.NewRecorder()
|
||||
|
||||
bucketName := getRandomBucketName()
|
||||
if err = obj.MakeBucketWithLocation(context.Background(), bucketName, "", false); err != nil {
|
||||
t.Fatal("Unexpected error: ", err)
|
||||
}
|
||||
|
||||
func1, err := condition.NewStringEqualsFunc(condition.S3Prefix, "hello")
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to create string equals condition function. %v", err)
|
||||
}
|
||||
|
||||
bucketPolicy := &policy.Policy{
|
||||
Version: policy.DefaultVersion,
|
||||
Statements: []policy.Statement{
|
||||
policy.NewStatement(
|
||||
policy.Allow,
|
||||
policy.NewPrincipal("*"),
|
||||
policy.NewActionSet(policy.GetBucketLocationAction),
|
||||
policy.NewResourceSet(policy.NewResource(bucketName, "")),
|
||||
condition.NewFunctions(),
|
||||
),
|
||||
policy.NewStatement(
|
||||
policy.Allow,
|
||||
policy.NewPrincipal("*"),
|
||||
policy.NewActionSet(policy.ListBucketAction),
|
||||
policy.NewResourceSet(policy.NewResource(bucketName, "")),
|
||||
condition.NewFunctions(func1),
|
||||
),
|
||||
policy.NewStatement(
|
||||
policy.Allow,
|
||||
policy.NewPrincipal("*"),
|
||||
policy.NewActionSet(policy.ListBucketMultipartUploadsAction),
|
||||
policy.NewResourceSet(policy.NewResource(bucketName, "")),
|
||||
condition.NewFunctions(),
|
||||
),
|
||||
policy.NewStatement(
|
||||
policy.Allow,
|
||||
policy.NewPrincipal("*"),
|
||||
policy.NewActionSet(
|
||||
policy.AbortMultipartUploadAction,
|
||||
policy.DeleteObjectAction,
|
||||
policy.GetObjectAction,
|
||||
policy.ListMultipartUploadPartsAction,
|
||||
policy.PutObjectAction,
|
||||
),
|
||||
policy.NewResourceSet(policy.NewResource(bucketName, "hello*")),
|
||||
condition.NewFunctions(),
|
||||
),
|
||||
},
|
||||
}
|
||||
|
||||
if err = savePolicyConfig(context.Background(), obj, bucketName, bucketPolicy); err != nil {
|
||||
t.Fatal("Unexpected error: ", err)
|
||||
}
|
||||
|
||||
testCaseResult1 := []BucketAccessPolicy{{
|
||||
Bucket: bucketName,
|
||||
Prefix: "hello",
|
||||
Policy: miniogopolicy.BucketPolicyReadWrite,
|
||||
}}
|
||||
testCases := []struct {
|
||||
bucketName string
|
||||
expectedResult []BucketAccessPolicy
|
||||
}{
|
||||
{bucketName, testCaseResult1},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
args := &ListAllBucketPoliciesArgs{BucketName: testCase.bucketName}
|
||||
reply := &ListAllBucketPoliciesRep{}
|
||||
req, err := newTestWebRPCRequest("Web.ListAllBucketPolicies", authorization, args)
|
||||
if err != nil {
|
||||
t.Fatalf("Test %d: Failed to create HTTP request: <ERROR> %v", i+1, err)
|
||||
}
|
||||
apiRouter.ServeHTTP(rec, req)
|
||||
if rec.Code != http.StatusOK {
|
||||
t.Fatalf("Test %d: Expected the response status to be 200, but instead found `%d`", i+1, rec.Code)
|
||||
}
|
||||
if err = getTestWebRPCResponse(rec, &reply); err != nil {
|
||||
t.Fatalf("Test %d: Should succeed but it didn't, %v", i+1, err)
|
||||
}
|
||||
if !reflect.DeepEqual(testCase.expectedResult, reply.Policies) {
|
||||
t.Fatalf("Test %d: expected: %v, got: %v", i+1, testCase.expectedResult, reply.Policies)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Wrapper for calling SetBucketPolicy Handler
|
||||
func TestWebHandlerSetBucketPolicyHandler(t *testing.T) {
|
||||
ExecObjectLayerTest(t, testWebSetBucketPolicyHandler)
|
||||
}
|
||||
|
||||
// testWebSetBucketPolicyHandler - Test SetBucketPolicy web handler
|
||||
func testWebSetBucketPolicyHandler(obj ObjectLayer, instanceType string, t TestErrHandler) {
|
||||
// Register the API end points with XL/FS object layer.
|
||||
apiRouter := initTestWebRPCEndPoint(obj)
|
||||
credentials := globalActiveCred
|
||||
|
||||
authorization, err := getWebRPCToken(apiRouter, credentials.AccessKey, credentials.SecretKey)
|
||||
if err != nil {
|
||||
t.Fatal("Cannot authenticate")
|
||||
}
|
||||
|
||||
rec := httptest.NewRecorder()
|
||||
|
||||
// Create a bucket
|
||||
bucketName := getRandomBucketName()
|
||||
if err = obj.MakeBucketWithLocation(context.Background(), bucketName, "", false); err != nil {
|
||||
t.Fatal("Unexpected error: ", err)
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
bucketName string
|
||||
prefix string
|
||||
policy string
|
||||
pass bool
|
||||
}{
|
||||
// Invalid bucket name
|
||||
{"", "", "readonly", false},
|
||||
// Invalid policy
|
||||
{bucketName, "", "foo", false},
|
||||
// Valid parameters
|
||||
{bucketName, "", "readwrite", true},
|
||||
// None is valid and policy should be removed.
|
||||
{bucketName, "", "none", true},
|
||||
// Setting none again meants should return an error.
|
||||
{bucketName, "", "none", false},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
args := &SetBucketPolicyWebArgs{BucketName: testCase.bucketName, Prefix: testCase.prefix, Policy: testCase.policy}
|
||||
reply := &WebGenericRep{}
|
||||
// Call SetBucketPolicy RPC
|
||||
req, err := newTestWebRPCRequest("Web.SetBucketPolicy", authorization, args)
|
||||
if err != nil {
|
||||
t.Fatalf("Test %d: Failed to create HTTP request: <ERROR> %v", i+1, err)
|
||||
}
|
||||
apiRouter.ServeHTTP(rec, req)
|
||||
// Check if we have 200 OK
|
||||
if testCase.pass && rec.Code != http.StatusOK {
|
||||
t.Fatalf("Test %d: Expected the response status to be 200, but instead found `%d`", i+1, rec.Code)
|
||||
}
|
||||
// Parse RPC response
|
||||
err = getTestWebRPCResponse(rec, &reply)
|
||||
if testCase.pass && err != nil {
|
||||
t.Fatalf("Test %d: Should succeed but it didn't, %#v", i+1, err)
|
||||
}
|
||||
if !testCase.pass && err == nil {
|
||||
t.Fatalf("Test %d: Should fail it didn't", i+1)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestWebCheckAuthorization - Test Authorization for all web handlers
|
||||
func TestWebCheckAuthorization(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
|
|
|
@ -32,10 +32,6 @@ import (
|
|||
xhttp "github.com/minio/minio/cmd/http"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/bpool"
|
||||
bucketsse "github.com/minio/minio/pkg/bucket/encryption"
|
||||
"github.com/minio/minio/pkg/bucket/lifecycle"
|
||||
objectlock "github.com/minio/minio/pkg/bucket/object/lock"
|
||||
"github.com/minio/minio/pkg/bucket/policy"
|
||||
"github.com/minio/minio/pkg/dsync"
|
||||
"github.com/minio/minio/pkg/madmin"
|
||||
"github.com/minio/minio/pkg/sync/errgroup"
|
||||
|
@ -64,6 +60,8 @@ type diskConnectInfo struct {
|
|||
// object sets. NOTE: There is no dynamic scaling allowed or intended in
|
||||
// current design.
|
||||
type xlSets struct {
|
||||
GatewayUnsupported
|
||||
|
||||
sets []*xlObjects
|
||||
|
||||
// Reference format.
|
||||
|
@ -573,76 +571,6 @@ func (s *xlSets) ListObjectsV2(ctx context.Context, bucket, prefix, continuation
|
|||
return listObjectsV2Info, err
|
||||
}
|
||||
|
||||
// SetBucketPolicy persist the new policy on the bucket.
|
||||
func (s *xlSets) SetBucketPolicy(ctx context.Context, bucket string, policy *policy.Policy) error {
|
||||
return savePolicyConfig(ctx, s, bucket, policy)
|
||||
}
|
||||
|
||||
// GetBucketPolicy will return a policy on a bucket
|
||||
func (s *xlSets) GetBucketPolicy(ctx context.Context, bucket string) (*policy.Policy, error) {
|
||||
return getPolicyConfig(s, bucket)
|
||||
}
|
||||
|
||||
// DeleteBucketPolicy deletes all policies on bucket
|
||||
func (s *xlSets) DeleteBucketPolicy(ctx context.Context, bucket string) error {
|
||||
return removePolicyConfig(ctx, s, bucket)
|
||||
}
|
||||
|
||||
// SetBucketLifecycle sets lifecycle on bucket
|
||||
func (s *xlSets) SetBucketLifecycle(ctx context.Context, bucket string, lifecycle *lifecycle.Lifecycle) error {
|
||||
return saveLifecycleConfig(ctx, s, bucket, lifecycle)
|
||||
}
|
||||
|
||||
// GetBucketLifecycle will get lifecycle on bucket
|
||||
func (s *xlSets) GetBucketLifecycle(ctx context.Context, bucket string) (*lifecycle.Lifecycle, error) {
|
||||
return getLifecycleConfig(s, bucket)
|
||||
}
|
||||
|
||||
// DeleteBucketLifecycle deletes all lifecycle on bucket
|
||||
func (s *xlSets) DeleteBucketLifecycle(ctx context.Context, bucket string) error {
|
||||
return removeLifecycleConfig(ctx, s, bucket)
|
||||
}
|
||||
|
||||
// GetBucketSSEConfig returns bucket encryption config on given bucket
|
||||
func (s *xlSets) GetBucketSSEConfig(ctx context.Context, bucket string) (*bucketsse.BucketSSEConfig, error) {
|
||||
return getBucketSSEConfig(s, bucket)
|
||||
}
|
||||
|
||||
// SetBucketSSEConfig sets bucket encryption config on given bucket
|
||||
func (s *xlSets) SetBucketSSEConfig(ctx context.Context, bucket string, config *bucketsse.BucketSSEConfig) error {
|
||||
return saveBucketSSEConfig(ctx, s, bucket, config)
|
||||
}
|
||||
|
||||
// DeleteBucketSSEConfig deletes bucket encryption config on given bucket
|
||||
func (s *xlSets) DeleteBucketSSEConfig(ctx context.Context, bucket string) error {
|
||||
return removeBucketSSEConfig(ctx, s, bucket)
|
||||
}
|
||||
|
||||
// SetBucketObjectLockConfig enables/clears default object lock configuration
|
||||
func (s *xlSets) SetBucketObjectLockConfig(ctx context.Context, bucket string, config *objectlock.Config) error {
|
||||
return saveBucketObjectLockConfig(ctx, s, bucket, config)
|
||||
}
|
||||
|
||||
// GetBucketObjectLockConfig - returns current defaults for object lock configuration
|
||||
func (s *xlSets) GetBucketObjectLockConfig(ctx context.Context, bucket string) (*objectlock.Config, error) {
|
||||
return readBucketObjectLockConfig(ctx, s, bucket)
|
||||
}
|
||||
|
||||
// SetBucketTagging sets bucket tags on given bucket
|
||||
func (s *xlSets) SetBucketTagging(ctx context.Context, bucket string, t *tags.Tags) error {
|
||||
return saveBucketTagging(ctx, s, bucket, t)
|
||||
}
|
||||
|
||||
// GetBucketTagging get bucket tags set on given bucket
|
||||
func (s *xlSets) GetBucketTagging(ctx context.Context, bucket string) (*tags.Tags, error) {
|
||||
return readBucketTagging(ctx, s, bucket)
|
||||
}
|
||||
|
||||
// DeleteBucketTagging delete bucket tags set if any.
|
||||
func (s *xlSets) DeleteBucketTagging(ctx context.Context, bucket string) error {
|
||||
return deleteBucketTagging(ctx, s, bucket)
|
||||
}
|
||||
|
||||
// IsNotificationSupported returns whether bucket notification is applicable for this layer.
|
||||
func (s *xlSets) IsNotificationSupported() bool {
|
||||
return s.getHashedSet("").IsNotificationSupported()
|
||||
|
@ -688,7 +616,7 @@ func (s *xlSets) DeleteBucket(ctx context.Context, bucket string, forceDelete bo
|
|||
}
|
||||
|
||||
// Delete all bucket metadata.
|
||||
deleteBucketMetadata(ctx, bucket, s)
|
||||
deleteBucketMetadata(ctx, s, bucket)
|
||||
|
||||
// Success.
|
||||
return nil
|
||||
|
|
|
@ -21,12 +21,7 @@ import (
|
|||
"sort"
|
||||
|
||||
"github.com/minio/minio-go/v6/pkg/s3utils"
|
||||
"github.com/minio/minio-go/v6/pkg/tags"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
bucketsse "github.com/minio/minio/pkg/bucket/encryption"
|
||||
"github.com/minio/minio/pkg/bucket/lifecycle"
|
||||
objectlock "github.com/minio/minio/pkg/bucket/object/lock"
|
||||
"github.com/minio/minio/pkg/bucket/policy"
|
||||
|
||||
"github.com/minio/minio/pkg/sync/errgroup"
|
||||
)
|
||||
|
@ -255,76 +250,6 @@ func (xl xlObjects) DeleteBucket(ctx context.Context, bucket string, forceDelete
|
|||
return nil
|
||||
}
|
||||
|
||||
// SetBucketPolicy sets policy on bucket
|
||||
func (xl xlObjects) SetBucketPolicy(ctx context.Context, bucket string, policy *policy.Policy) error {
|
||||
return savePolicyConfig(ctx, xl, bucket, policy)
|
||||
}
|
||||
|
||||
// GetBucketPolicy will get policy on bucket
|
||||
func (xl xlObjects) GetBucketPolicy(ctx context.Context, bucket string) (*policy.Policy, error) {
|
||||
return getPolicyConfig(xl, bucket)
|
||||
}
|
||||
|
||||
// DeleteBucketPolicy deletes all policies on bucket
|
||||
func (xl xlObjects) DeleteBucketPolicy(ctx context.Context, bucket string) error {
|
||||
return removePolicyConfig(ctx, xl, bucket)
|
||||
}
|
||||
|
||||
// SetBucketLifecycle sets lifecycle on bucket
|
||||
func (xl xlObjects) SetBucketLifecycle(ctx context.Context, bucket string, lifecycle *lifecycle.Lifecycle) error {
|
||||
return saveLifecycleConfig(ctx, xl, bucket, lifecycle)
|
||||
}
|
||||
|
||||
// GetBucketLifecycle will get lifecycle on bucket
|
||||
func (xl xlObjects) GetBucketLifecycle(ctx context.Context, bucket string) (*lifecycle.Lifecycle, error) {
|
||||
return getLifecycleConfig(xl, bucket)
|
||||
}
|
||||
|
||||
// DeleteBucketLifecycle deletes all lifecycle on bucket
|
||||
func (xl xlObjects) DeleteBucketLifecycle(ctx context.Context, bucket string) error {
|
||||
return removeLifecycleConfig(ctx, xl, bucket)
|
||||
}
|
||||
|
||||
// GetBucketSSEConfig returns bucket encryption config on given bucket
|
||||
func (xl xlObjects) GetBucketSSEConfig(ctx context.Context, bucket string) (*bucketsse.BucketSSEConfig, error) {
|
||||
return getBucketSSEConfig(xl, bucket)
|
||||
}
|
||||
|
||||
// SetBucketSSEConfig sets bucket encryption config on given bucket
|
||||
func (xl xlObjects) SetBucketSSEConfig(ctx context.Context, bucket string, config *bucketsse.BucketSSEConfig) error {
|
||||
return saveBucketSSEConfig(ctx, xl, bucket, config)
|
||||
}
|
||||
|
||||
// DeleteBucketSSEConfig deletes bucket encryption config on given bucket
|
||||
func (xl xlObjects) DeleteBucketSSEConfig(ctx context.Context, bucket string) error {
|
||||
return removeBucketSSEConfig(ctx, xl, bucket)
|
||||
}
|
||||
|
||||
// SetBucketObjectLockConfig enables/clears default object lock configuration
|
||||
func (xl xlObjects) SetBucketObjectLockConfig(ctx context.Context, bucket string, config *objectlock.Config) error {
|
||||
return saveBucketObjectLockConfig(ctx, xl, bucket, config)
|
||||
}
|
||||
|
||||
// GetBucketObjectLockConfig - returns current defaults for object lock configuration
|
||||
func (xl xlObjects) GetBucketObjectLockConfig(ctx context.Context, bucket string) (*objectlock.Config, error) {
|
||||
return readBucketObjectLockConfig(ctx, xl, bucket)
|
||||
}
|
||||
|
||||
// SetBucketTagging sets bucket tags on given bucket
|
||||
func (xl xlObjects) SetBucketTagging(ctx context.Context, bucket string, t *tags.Tags) error {
|
||||
return saveBucketTagging(ctx, xl, bucket, t)
|
||||
}
|
||||
|
||||
// GetBucketTagging get bucket tags set on given bucket
|
||||
func (xl xlObjects) GetBucketTagging(ctx context.Context, bucket string) (*tags.Tags, error) {
|
||||
return readBucketTagging(ctx, xl, bucket)
|
||||
}
|
||||
|
||||
// DeleteBucketTagging delete bucket tags set if any.
|
||||
func (xl xlObjects) DeleteBucketTagging(ctx context.Context, bucket string) error {
|
||||
return deleteBucketTagging(ctx, xl, bucket)
|
||||
}
|
||||
|
||||
// IsNotificationSupported returns whether bucket notification is applicable for this layer.
|
||||
func (xl xlObjects) IsNotificationSupported() bool {
|
||||
return true
|
||||
|
|
|
@ -106,10 +106,6 @@ func commitXLMetadata(ctx context.Context, disks []StorageAPI, srcBucket, srcPre
|
|||
// towards simplification of multipart APIs.
|
||||
// The resulting ListMultipartsInfo structure is unmarshalled directly as XML.
|
||||
func (xl xlObjects) ListMultipartUploads(ctx context.Context, bucket, object, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (result ListMultipartsInfo, e error) {
|
||||
if err := checkListMultipartArgs(ctx, bucket, object, keyMarker, uploadIDMarker, delimiter, xl); err != nil {
|
||||
return result, err
|
||||
}
|
||||
|
||||
result.MaxUploads = maxUploads
|
||||
result.KeyMarker = keyMarker
|
||||
result.Prefix = object
|
||||
|
@ -210,9 +206,6 @@ func (xl xlObjects) newMultipartUpload(ctx context.Context, bucket string, objec
|
|||
//
|
||||
// Implements S3 compatible initiate multipart API.
|
||||
func (xl xlObjects) NewMultipartUpload(ctx context.Context, bucket, object string, opts ObjectOptions) (string, error) {
|
||||
if err := checkNewMultipartArgs(ctx, bucket, object, xl); err != nil {
|
||||
return "", err
|
||||
}
|
||||
// No metadata is set, allocate a new one.
|
||||
if opts.UserDefined == nil {
|
||||
opts.UserDefined = make(map[string]string)
|
||||
|
@ -226,11 +219,6 @@ func (xl xlObjects) NewMultipartUpload(ctx context.Context, bucket, object strin
|
|||
//
|
||||
// Implements S3 compatible Upload Part Copy API.
|
||||
func (xl xlObjects) CopyObjectPart(ctx context.Context, srcBucket, srcObject, dstBucket, dstObject, uploadID string, partID int, startOffset int64, length int64, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (pi PartInfo, e error) {
|
||||
|
||||
if err := checkNewMultipartArgs(ctx, srcBucket, srcObject, xl); err != nil {
|
||||
return pi, err
|
||||
}
|
||||
|
||||
partInfo, err := xl.PutObjectPart(ctx, dstBucket, dstObject, uploadID, partID, NewPutObjReader(srcInfo.Reader, nil, nil), dstOpts)
|
||||
if err != nil {
|
||||
return pi, toObjectErr(err, dstBucket, dstObject)
|
||||
|
@ -432,9 +420,7 @@ func (xl xlObjects) PutObjectPart(ctx context.Context, bucket, object, uploadID
|
|||
// ListPartsInfo structure is marshaled directly into XML and
|
||||
// replied back to the client.
|
||||
func (xl xlObjects) ListObjectParts(ctx context.Context, bucket, object, uploadID string, partNumberMarker, maxParts int, opts ObjectOptions) (result ListPartsInfo, e error) {
|
||||
if err := checkListPartsArgs(ctx, bucket, object, xl); err != nil {
|
||||
return result, err
|
||||
}
|
||||
|
||||
if err := xl.checkUploadIDExists(ctx, bucket, object, uploadID); err != nil {
|
||||
return result, toObjectErr(err, bucket, object, uploadID)
|
||||
}
|
||||
|
@ -523,10 +509,6 @@ func (xl xlObjects) ListObjectParts(ctx context.Context, bucket, object, uploadI
|
|||
//
|
||||
// Implements S3 compatible Complete multipart API.
|
||||
func (xl xlObjects) CompleteMultipartUpload(ctx context.Context, bucket string, object string, uploadID string, parts []CompletePart, opts ObjectOptions) (oi ObjectInfo, e error) {
|
||||
if err := checkCompleteMultipartArgs(ctx, bucket, object, xl); err != nil {
|
||||
return oi, err
|
||||
}
|
||||
|
||||
if err := xl.checkUploadIDExists(ctx, bucket, object, uploadID); err != nil {
|
||||
return oi, toObjectErr(err, bucket, object, uploadID)
|
||||
}
|
||||
|
@ -723,9 +705,6 @@ func (xl xlObjects) CompleteMultipartUpload(ctx context.Context, bucket string,
|
|||
// that this is an atomic idempotent operation. Subsequent calls have
|
||||
// no affect and further requests to the same uploadID would not be honored.
|
||||
func (xl xlObjects) AbortMultipartUpload(ctx context.Context, bucket, object, uploadID string) error {
|
||||
if err := checkAbortMultipartArgs(ctx, bucket, object, xl); err != nil {
|
||||
return err
|
||||
}
|
||||
// Validates if upload ID exists.
|
||||
if err := xl.checkUploadIDExists(ctx, bucket, object, uploadID); err != nil {
|
||||
return toObjectErr(err, bucket, object, uploadID)
|
||||
|
|
|
@ -51,6 +51,8 @@ type partialUpload struct {
|
|||
|
||||
// xlObjects - Implements XL object layer.
|
||||
type xlObjects struct {
|
||||
GatewayUnsupported
|
||||
|
||||
// getDisks returns list of storageAPIs.
|
||||
getDisks func() []StorageAPI
|
||||
|
||||
|
|
160
cmd/xl-zones.go
160
cmd/xl-zones.go
|
@ -29,15 +29,13 @@ import (
|
|||
"github.com/minio/minio-go/v6/pkg/tags"
|
||||
xhttp "github.com/minio/minio/cmd/http"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
bucketsse "github.com/minio/minio/pkg/bucket/encryption"
|
||||
"github.com/minio/minio/pkg/bucket/lifecycle"
|
||||
objectlock "github.com/minio/minio/pkg/bucket/object/lock"
|
||||
"github.com/minio/minio/pkg/bucket/policy"
|
||||
"github.com/minio/minio/pkg/madmin"
|
||||
"github.com/minio/minio/pkg/sync/errgroup"
|
||||
)
|
||||
|
||||
type xlZones struct {
|
||||
GatewayUnsupported
|
||||
|
||||
zones []*xlSets
|
||||
}
|
||||
|
||||
|
@ -323,13 +321,14 @@ func (z *xlZones) MakeBucketWithLocation(ctx context.Context, bucket, location s
|
|||
if err := z.zones[0].MakeBucketWithLocation(ctx, bucket, location, lockEnabled); err != nil {
|
||||
return err
|
||||
}
|
||||
if lockEnabled {
|
||||
meta := newBucketMetadata(bucket)
|
||||
meta.LockEnabled = lockEnabled
|
||||
if err := meta.save(ctx, z); err != nil {
|
||||
return toObjectErr(err, bucket)
|
||||
}
|
||||
|
||||
// If it doesn't exist we get a new, so ignore errors
|
||||
meta, _ := globalBucketMetadataSys.Get(bucket)
|
||||
meta.ObjectLockConfigurationXML = defaultBucketObjectLockConfig
|
||||
if err := meta.Save(ctx, z); err != nil {
|
||||
return toObjectErr(err, bucket)
|
||||
}
|
||||
globalBucketMetadataSys.Set(bucket, meta)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -351,13 +350,13 @@ func (z *xlZones) MakeBucketWithLocation(ctx context.Context, bucket, location s
|
|||
}
|
||||
}
|
||||
|
||||
if lockEnabled {
|
||||
meta := newBucketMetadata(bucket)
|
||||
meta.LockEnabled = lockEnabled
|
||||
if err := meta.save(ctx, z); err != nil {
|
||||
return toObjectErr(err, bucket)
|
||||
}
|
||||
// If it doesn't exist we get a new, so ignore errors
|
||||
meta, _ := globalBucketMetadataSys.Get(bucket)
|
||||
meta.ObjectLockConfigurationXML = defaultBucketObjectLockConfig
|
||||
if err := meta.Save(ctx, z); err != nil {
|
||||
return toObjectErr(err, bucket)
|
||||
}
|
||||
globalBucketMetadataSys.Set(bucket, meta)
|
||||
|
||||
// Success.
|
||||
return nil
|
||||
|
@ -980,9 +979,14 @@ func (z *xlZones) ListObjects(ctx context.Context, bucket, prefix, marker, delim
|
|||
}
|
||||
|
||||
func (z *xlZones) ListMultipartUploads(ctx context.Context, bucket, prefix, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (ListMultipartsInfo, error) {
|
||||
if err := checkListMultipartArgs(ctx, bucket, prefix, keyMarker, uploadIDMarker, delimiter, z); err != nil {
|
||||
return ListMultipartsInfo{}, err
|
||||
}
|
||||
|
||||
if z.SingleZone() {
|
||||
return z.zones[0].ListMultipartUploads(ctx, bucket, prefix, keyMarker, uploadIDMarker, delimiter, maxUploads)
|
||||
}
|
||||
|
||||
var zoneResult = ListMultipartsInfo{}
|
||||
zoneResult.MaxUploads = maxUploads
|
||||
zoneResult.KeyMarker = keyMarker
|
||||
|
@ -1001,6 +1005,10 @@ func (z *xlZones) ListMultipartUploads(ctx context.Context, bucket, prefix, keyM
|
|||
|
||||
// Initiate a new multipart upload on a hashedSet based on object name.
|
||||
func (z *xlZones) NewMultipartUpload(ctx context.Context, bucket, object string, opts ObjectOptions) (string, error) {
|
||||
if err := checkNewMultipartArgs(ctx, bucket, object, z); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if z.SingleZone() {
|
||||
return z.zones[0].NewMultipartUpload(ctx, bucket, object, opts)
|
||||
}
|
||||
|
@ -1009,12 +1017,20 @@ func (z *xlZones) NewMultipartUpload(ctx context.Context, bucket, object string,
|
|||
|
||||
// Copies a part of an object from source hashedSet to destination hashedSet.
|
||||
func (z *xlZones) CopyObjectPart(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, uploadID string, partID int, startOffset int64, length int64, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (PartInfo, error) {
|
||||
if err := checkNewMultipartArgs(ctx, srcBucket, srcObject, z); err != nil {
|
||||
return PartInfo{}, err
|
||||
}
|
||||
|
||||
return z.PutObjectPart(ctx, destBucket, destObject, uploadID, partID,
|
||||
NewPutObjReader(srcInfo.Reader, nil, nil), dstOpts)
|
||||
}
|
||||
|
||||
// PutObjectPart - writes part of an object to hashedSet based on the object name.
|
||||
func (z *xlZones) PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, data *PutObjReader, opts ObjectOptions) (PartInfo, error) {
|
||||
if err := checkPutObjectPartArgs(ctx, bucket, object, z); err != nil {
|
||||
return PartInfo{}, err
|
||||
}
|
||||
|
||||
uploadIDLock := z.NewNSLock(ctx, bucket, pathJoin(object, uploadID))
|
||||
if err := uploadIDLock.GetLock(globalOperationTimeout); err != nil {
|
||||
return PartInfo{}, err
|
||||
|
@ -1043,6 +1059,10 @@ func (z *xlZones) PutObjectPart(ctx context.Context, bucket, object, uploadID st
|
|||
|
||||
// ListObjectParts - lists all uploaded parts to an object in hashedSet.
|
||||
func (z *xlZones) ListObjectParts(ctx context.Context, bucket, object, uploadID string, partNumberMarker int, maxParts int, opts ObjectOptions) (ListPartsInfo, error) {
|
||||
if err := checkListPartsArgs(ctx, bucket, object, z); err != nil {
|
||||
return ListPartsInfo{}, err
|
||||
}
|
||||
|
||||
uploadIDLock := z.NewNSLock(ctx, bucket, pathJoin(object, uploadID))
|
||||
if err := uploadIDLock.GetRLock(globalOperationTimeout); err != nil {
|
||||
return ListPartsInfo{}, err
|
||||
|
@ -1070,6 +1090,10 @@ func (z *xlZones) ListObjectParts(ctx context.Context, bucket, object, uploadID
|
|||
|
||||
// Aborts an in-progress multipart operation on hashedSet based on the object name.
|
||||
func (z *xlZones) AbortMultipartUpload(ctx context.Context, bucket, object, uploadID string) error {
|
||||
if err := checkAbortMultipartArgs(ctx, bucket, object, z); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
uploadIDLock := z.NewNSLock(ctx, bucket, pathJoin(object, uploadID))
|
||||
if err := uploadIDLock.GetLock(globalOperationTimeout); err != nil {
|
||||
return err
|
||||
|
@ -1079,6 +1103,7 @@ func (z *xlZones) AbortMultipartUpload(ctx context.Context, bucket, object, uplo
|
|||
if z.SingleZone() {
|
||||
return z.zones[0].AbortMultipartUpload(ctx, bucket, object, uploadID)
|
||||
}
|
||||
|
||||
for _, zone := range z.zones {
|
||||
result, err := zone.ListMultipartUploads(ctx, bucket, object, "", "", "", maxObjectList)
|
||||
if err != nil {
|
||||
|
@ -1097,6 +1122,10 @@ func (z *xlZones) AbortMultipartUpload(ctx context.Context, bucket, object, uplo
|
|||
|
||||
// CompleteMultipartUpload - completes a pending multipart transaction, on hashedSet based on object name.
|
||||
func (z *xlZones) CompleteMultipartUpload(ctx context.Context, bucket, object, uploadID string, uploadedParts []CompletePart, opts ObjectOptions) (objInfo ObjectInfo, err error) {
|
||||
if err = checkCompleteMultipartArgs(ctx, bucket, object, z); err != nil {
|
||||
return objInfo, err
|
||||
}
|
||||
|
||||
// Hold read-locks to verify uploaded parts, also disallows
|
||||
// parallel part uploads as well.
|
||||
uploadIDLock := z.NewNSLock(ctx, bucket, pathJoin(object, uploadID))
|
||||
|
@ -1141,7 +1170,15 @@ func (z *xlZones) CompleteMultipartUpload(ctx context.Context, bucket, object, u
|
|||
// GetBucketInfo - returns bucket info from one of the erasure coded zones.
|
||||
func (z *xlZones) GetBucketInfo(ctx context.Context, bucket string) (bucketInfo BucketInfo, err error) {
|
||||
if z.SingleZone() {
|
||||
return z.zones[0].GetBucketInfo(ctx, bucket)
|
||||
bucketInfo, err = z.zones[0].GetBucketInfo(ctx, bucket)
|
||||
if err != nil {
|
||||
return bucketInfo, err
|
||||
}
|
||||
meta, err := globalBucketMetadataSys.Get(bucket)
|
||||
if err == nil {
|
||||
bucketInfo.Created = meta.Created
|
||||
}
|
||||
return bucketInfo, nil
|
||||
}
|
||||
for _, zone := range z.zones {
|
||||
bucketInfo, err = zone.GetBucketInfo(ctx, bucket)
|
||||
|
@ -1151,6 +1188,10 @@ func (z *xlZones) GetBucketInfo(ctx context.Context, bucket string) (bucketInfo
|
|||
}
|
||||
return bucketInfo, err
|
||||
}
|
||||
meta, err := globalBucketMetadataSys.Get(bucket)
|
||||
if err == nil {
|
||||
bucketInfo.Created = meta.Created
|
||||
}
|
||||
return bucketInfo, nil
|
||||
}
|
||||
return bucketInfo, BucketNotFound{
|
||||
|
@ -1158,76 +1199,6 @@ func (z *xlZones) GetBucketInfo(ctx context.Context, bucket string) (bucketInfo
|
|||
}
|
||||
}
|
||||
|
||||
// SetBucketPolicy persist the new policy on the bucket.
|
||||
func (z *xlZones) SetBucketPolicy(ctx context.Context, bucket string, policy *policy.Policy) error {
|
||||
return savePolicyConfig(ctx, z, bucket, policy)
|
||||
}
|
||||
|
||||
// GetBucketPolicy will return a policy on a bucket
|
||||
func (z *xlZones) GetBucketPolicy(ctx context.Context, bucket string) (*policy.Policy, error) {
|
||||
return getPolicyConfig(z, bucket)
|
||||
}
|
||||
|
||||
// DeleteBucketPolicy deletes all policies on bucket
|
||||
func (z *xlZones) DeleteBucketPolicy(ctx context.Context, bucket string) error {
|
||||
return removePolicyConfig(ctx, z, bucket)
|
||||
}
|
||||
|
||||
// SetBucketLifecycle zones lifecycle on bucket
|
||||
func (z *xlZones) SetBucketLifecycle(ctx context.Context, bucket string, lifecycle *lifecycle.Lifecycle) error {
|
||||
return saveLifecycleConfig(ctx, z, bucket, lifecycle)
|
||||
}
|
||||
|
||||
// GetBucketLifecycle will get lifecycle on bucket
|
||||
func (z *xlZones) GetBucketLifecycle(ctx context.Context, bucket string) (*lifecycle.Lifecycle, error) {
|
||||
return getLifecycleConfig(z, bucket)
|
||||
}
|
||||
|
||||
// DeleteBucketLifecycle deletes all lifecycle on bucket
|
||||
func (z *xlZones) DeleteBucketLifecycle(ctx context.Context, bucket string) error {
|
||||
return removeLifecycleConfig(ctx, z, bucket)
|
||||
}
|
||||
|
||||
// GetBucketSSEConfig returns bucket encryption config on given bucket
|
||||
func (z *xlZones) GetBucketSSEConfig(ctx context.Context, bucket string) (*bucketsse.BucketSSEConfig, error) {
|
||||
return getBucketSSEConfig(z, bucket)
|
||||
}
|
||||
|
||||
// SetBucketSSEConfig sets bucket encryption config on given bucket
|
||||
func (z *xlZones) SetBucketSSEConfig(ctx context.Context, bucket string, config *bucketsse.BucketSSEConfig) error {
|
||||
return saveBucketSSEConfig(ctx, z, bucket, config)
|
||||
}
|
||||
|
||||
// SetBucketObjectLockConfig enables/clears default object lock configuration
|
||||
func (z *xlZones) SetBucketObjectLockConfig(ctx context.Context, bucket string, config *objectlock.Config) error {
|
||||
return saveBucketObjectLockConfig(ctx, z, bucket, config)
|
||||
}
|
||||
|
||||
// GetBucketObjectLockConfig - returns current defaults for object lock configuration
|
||||
func (z *xlZones) GetBucketObjectLockConfig(ctx context.Context, bucket string) (*objectlock.Config, error) {
|
||||
return readBucketObjectLockConfig(ctx, z, bucket)
|
||||
}
|
||||
|
||||
// SetBucketTagging sets bucket tags on given bucket
|
||||
func (z *xlZones) SetBucketTagging(ctx context.Context, bucket string, t *tags.Tags) error {
|
||||
return saveBucketTagging(ctx, z, bucket, t)
|
||||
}
|
||||
|
||||
// GetBucketTagging get bucket tags set on given bucket
|
||||
func (z *xlZones) GetBucketTagging(ctx context.Context, bucket string) (*tags.Tags, error) {
|
||||
return readBucketTagging(ctx, z, bucket)
|
||||
}
|
||||
|
||||
// DeleteBucketTagging delete bucket tags set if any.
|
||||
func (z *xlZones) DeleteBucketTagging(ctx context.Context, bucket string) error {
|
||||
return deleteBucketTagging(ctx, z, bucket)
|
||||
}
|
||||
|
||||
// DeleteBucketSSEConfig deletes bucket encryption config on given bucket
|
||||
func (z *xlZones) DeleteBucketSSEConfig(ctx context.Context, bucket string) error {
|
||||
return removeBucketSSEConfig(ctx, z, bucket)
|
||||
}
|
||||
|
||||
// IsNotificationSupported returns whether bucket notification is applicable for this layer.
|
||||
func (z *xlZones) IsNotificationSupported() bool {
|
||||
return true
|
||||
|
@ -1321,13 +1292,11 @@ func (z *xlZones) ListBuckets(ctx context.Context) (buckets []BucketInfo, err er
|
|||
return nil, err
|
||||
}
|
||||
for i := range buckets {
|
||||
meta, err := loadBucketMetadata(ctx, z, buckets[i].Name)
|
||||
meta, err := globalBucketMetadataSys.Get(buckets[i].Name)
|
||||
if err == nil {
|
||||
buckets[i].Created = meta.Created
|
||||
}
|
||||
if err != errMetaDataConverted {
|
||||
logger.LogIf(ctx, err)
|
||||
}
|
||||
globalBucketMetadataSys.Set(buckets[i].Name, meta)
|
||||
}
|
||||
return buckets, nil
|
||||
}
|
||||
|
@ -1544,6 +1513,15 @@ func (z *xlZones) ListBucketsHeal(ctx context.Context) ([]BucketInfo, error) {
|
|||
}
|
||||
healBuckets = append(healBuckets, bucketsInfo...)
|
||||
}
|
||||
|
||||
for i := range healBuckets {
|
||||
meta, err := globalBucketMetadataSys.Get(healBuckets[i].Name)
|
||||
if err == nil {
|
||||
healBuckets[i].Created = meta.Created
|
||||
}
|
||||
globalBucketMetadataSys.Set(healBuckets[i].Name, meta)
|
||||
}
|
||||
|
||||
return healBuckets, nil
|
||||
}
|
||||
|
||||
|
|
|
@ -87,7 +87,7 @@ func ParseBucketSSEConfig(r io.Reader) (*BucketSSEConfig, error) {
|
|||
// Validates server-side encryption config rules
|
||||
// Only one rule is allowed on AWS S3
|
||||
if len(config.Rules) != 1 {
|
||||
return nil, errors.New("Only one server-side encryption rule is allowed")
|
||||
return nil, errors.New("only one server-side encryption rule is allowed at a time")
|
||||
}
|
||||
|
||||
for _, rule := range config.Rules {
|
||||
|
@ -98,7 +98,7 @@ func ParseBucketSSEConfig(r io.Reader) (*BucketSSEConfig, error) {
|
|||
}
|
||||
case AWSKms:
|
||||
if rule.DefaultEncryptionAction.MasterKeyID == "" {
|
||||
return nil, errors.New("MasterKeyID is missing")
|
||||
return nil, errors.New("MasterKeyID is missing with aws:kms")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -90,7 +90,7 @@ func TestParseBucketSSEConfig(t *testing.T) {
|
|||
// 3. Invalid - more than one rule
|
||||
{
|
||||
inputXML: `<ServerSideEncryptionConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/"><Rule><ApplyServerSideEncryptionByDefault><SSEAlgorithm>AES256</SSEAlgorithm></ApplyServerSideEncryptionByDefault></Rule><Rule><ApplyServerSideEncryptionByDefault><SSEAlgorithm>AES256</SSEAlgorithm></ApplyServerSideEncryptionByDefault></Rule></ServerSideEncryptionConfiguration>`,
|
||||
expectedErr: errors.New("Only one server-side encryption rule is allowed"),
|
||||
expectedErr: errors.New("only one server-side encryption rule is allowed at a time"),
|
||||
shouldPass: false,
|
||||
},
|
||||
// 4. Invalid XML - master key ID present along with AES256
|
||||
|
@ -102,7 +102,7 @@ func TestParseBucketSSEConfig(t *testing.T) {
|
|||
// 5. Invalid XML - master key ID not provided when algorithm is set to aws:kms algorithm
|
||||
{
|
||||
inputXML: `<ServerSideEncryptionConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/"><Rule><ApplyServerSideEncryptionByDefault><SSEAlgorithm>aws:kms</SSEAlgorithm></ApplyServerSideEncryptionByDefault></Rule></ServerSideEncryptionConfiguration>`,
|
||||
expectedErr: errors.New("MasterKeyID is missing"),
|
||||
expectedErr: errors.New("MasterKeyID is missing with aws:kms"),
|
||||
shouldPass: false,
|
||||
},
|
||||
// 6. Invalid Algorithm
|
||||
|
|
|
@ -137,13 +137,9 @@ func UTCNowNTP() (time.Time, error) {
|
|||
|
||||
// Retention - bucket level retention configuration.
|
||||
type Retention struct {
|
||||
Mode RetMode
|
||||
Validity time.Duration
|
||||
}
|
||||
|
||||
// IsEmpty - returns whether retention is empty or not.
|
||||
func (r Retention) IsEmpty() bool {
|
||||
return !r.Mode.Valid() || r.Validity == 0
|
||||
Mode RetMode
|
||||
Validity time.Duration
|
||||
LockEnabled bool
|
||||
}
|
||||
|
||||
// Retain - check whether given date is retainable by validity time.
|
||||
|
@ -236,7 +232,7 @@ func (config *Config) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error
|
|||
}
|
||||
|
||||
if parsedConfig.ObjectLockEnabled != "Enabled" {
|
||||
return fmt.Errorf("only 'Enabled' value is allowd to ObjectLockEnabled element")
|
||||
return fmt.Errorf("only 'Enabled' value is allowed to ObjectLockEnabled element")
|
||||
}
|
||||
|
||||
*config = Config(parsedConfig)
|
||||
|
@ -244,8 +240,10 @@ func (config *Config) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error
|
|||
}
|
||||
|
||||
// ToRetention - convert to Retention type.
|
||||
func (config *Config) ToRetention() (r *Retention) {
|
||||
r = &Retention{}
|
||||
func (config *Config) ToRetention() Retention {
|
||||
r := Retention{
|
||||
LockEnabled: config.ObjectLockEnabled == "Enabled",
|
||||
}
|
||||
if config.Rule != nil {
|
||||
r.Mode = config.Rule.DefaultRetention.Mode
|
||||
|
||||
|
|
|
@ -154,17 +154,17 @@ func TestParseObjectLockConfig(t *testing.T) {
|
|||
expectErr bool
|
||||
}{
|
||||
{
|
||||
value: "<ObjectLockConfiguration xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\" ><ObjectLockEnabled>yes</ObjectLockEnabled></ObjectLockConfiguration>",
|
||||
expectedErr: fmt.Errorf("only 'Enabled' value is allowd to ObjectLockEnabled element"),
|
||||
value: `<ObjectLockConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/"><ObjectLockEnabled>yes</ObjectLockEnabled></ObjectLockConfiguration>`,
|
||||
expectedErr: fmt.Errorf("only 'Enabled' value is allowed to ObjectLockEnabled element"),
|
||||
expectErr: true,
|
||||
},
|
||||
{
|
||||
value: "<ObjectLockConfiguration xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\"><ObjectLockEnabled>Enabled</ObjectLockEnabled><Rule><DefaultRetention><Mode>COMPLIANCE</Mode><Days>0</Days></DefaultRetention></Rule></ObjectLockConfiguration>",
|
||||
value: `<ObjectLockConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/"><ObjectLockEnabled>Enabled</ObjectLockEnabled><Rule><DefaultRetention><Mode>COMPLIANCE</Mode><Days>0</Days></DefaultRetention></Rule></ObjectLockConfiguration>`,
|
||||
expectedErr: fmt.Errorf("Default retention period must be a positive integer value for 'Days'"),
|
||||
expectErr: true,
|
||||
},
|
||||
{
|
||||
value: "<ObjectLockConfiguration xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\"><ObjectLockEnabled>Enabled</ObjectLockEnabled><Rule><DefaultRetention><Mode>COMPLIANCE</Mode><Days>30</Days></DefaultRetention></Rule></ObjectLockConfiguration>",
|
||||
value: `<ObjectLockConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/"><ObjectLockEnabled>Enabled</ObjectLockEnabled><Rule><DefaultRetention><Mode>COMPLIANCE</Mode><Days>30</Days></DefaultRetention></Rule></ObjectLockConfiguration>`,
|
||||
expectedErr: nil,
|
||||
expectErr: false,
|
||||
},
|
||||
|
@ -190,17 +190,17 @@ func TestParseObjectRetention(t *testing.T) {
|
|||
expectErr bool
|
||||
}{
|
||||
{
|
||||
value: "<?xml version=\"1.0\" encoding=\"UTF-8\"?><Retention xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\"><Mode>string</Mode><RetainUntilDate>2020-01-02T15:04:05Z</RetainUntilDate></Retention>",
|
||||
value: `<?xml version="1.0" encoding="UTF-8"?><Retention xmlns="http://s3.amazonaws.com/doc/2006-03-01/"><Mode>string</Mode><RetainUntilDate>2020-01-02T15:04:05Z</RetainUntilDate></Retention>`,
|
||||
expectedErr: ErrUnknownWORMModeDirective,
|
||||
expectErr: true,
|
||||
},
|
||||
{
|
||||
value: "<?xml version=\"1.0\" encoding=\"UTF-8\"?><Retention xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\"><Mode>COMPLIANCE</Mode><RetainUntilDate>2017-01-02T15:04:05Z</RetainUntilDate></Retention>",
|
||||
value: `<?xml version="1.0" encoding="UTF-8"?><Retention xmlns="http://s3.amazonaws.com/doc/2006-03-01/"><Mode>COMPLIANCE</Mode><RetainUntilDate>2017-01-02T15:04:05Z</RetainUntilDate></Retention>`,
|
||||
expectedErr: ErrPastObjectLockRetainDate,
|
||||
expectErr: true,
|
||||
},
|
||||
{
|
||||
value: "<?xml version=\"1.0\" encoding=\"UTF-8\"?><Retention xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\"><Mode>GOVERNANCE</Mode><RetainUntilDate>2057-01-02T15:04:05Z</RetainUntilDate></Retention>",
|
||||
value: `<?xml version="1.0" encoding="UTF-8"?><Retention xmlns="http://s3.amazonaws.com/doc/2006-03-01/"><Mode>GOVERNANCE</Mode><RetainUntilDate>2057-01-02T15:04:05Z</RetainUntilDate></Retention>`,
|
||||
expectedErr: nil,
|
||||
expectErr: false,
|
||||
},
|
||||
|
@ -458,17 +458,17 @@ func TestParseObjectLegalHold(t *testing.T) {
|
|||
expectErr bool
|
||||
}{
|
||||
{
|
||||
value: "<?xml version=\"1.0\" encoding=\"UTF-8\"?><LegalHold xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\"><Status>string</Status></LegalHold>",
|
||||
value: `<?xml version="1.0" encoding="UTF-8"?><LegalHold xmlns="http://s3.amazonaws.com/doc/2006-03-01/"><Status>string</Status></LegalHold>`,
|
||||
expectedErr: ErrMalformedXML,
|
||||
expectErr: true,
|
||||
},
|
||||
{
|
||||
value: "<?xml version=\"1.0\" encoding=\"UTF-8\"?><LegalHold xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\"><Status>ON</Status></LegalHold>",
|
||||
value: `<?xml version="1.0" encoding="UTF-8"?><LegalHold xmlns="http://s3.amazonaws.com/doc/2006-03-01/"><Status>ON</Status></LegalHold>`,
|
||||
expectedErr: nil,
|
||||
expectErr: false,
|
||||
},
|
||||
{
|
||||
value: "<?xml version=\"1.0\" encoding=\"UTF-8\"?><LegalHold xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\"><Status>On</Status></LegalHold>",
|
||||
value: `<?xml version="1.0" encoding="UTF-8"?><LegalHold xmlns="http://s3.amazonaws.com/doc/2006-03-01/"><Status>On</Status></LegalHold>`,
|
||||
expectedErr: ErrMalformedXML,
|
||||
expectErr: true,
|
||||
},
|
||||
|
|
Loading…
Reference in New Issue