mirror of
https://github.com/minio/minio.git
synced 2025-11-10 14:09:48 -05:00
remove FIFO bucket quota, use ILM expiration instead (#14206)
This commit is contained in:
@@ -24,8 +24,6 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/minio/madmin-go"
|
||||
"github.com/minio/minio/internal/event"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
)
|
||||
|
||||
// BucketQuotaSys - map of bucket and quota configuration.
|
||||
@@ -51,6 +49,35 @@ func NewBucketQuotaSys() *BucketQuotaSys {
|
||||
return &BucketQuotaSys{}
|
||||
}
|
||||
|
||||
// Init initialize bucket quota.
|
||||
func (sys *BucketQuotaSys) Init(objAPI ObjectLayer) {
|
||||
sys.bucketStorageCache.Once.Do(func() {
|
||||
sys.bucketStorageCache.TTL = 1 * time.Second
|
||||
sys.bucketStorageCache.Update = func() (interface{}, error) {
|
||||
ctx, done := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer done()
|
||||
|
||||
return loadDataUsageFromBackend(ctx, objAPI)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// GetBucketUsageInfo return bucket usage info for a given bucket
|
||||
func (sys *BucketQuotaSys) GetBucketUsageInfo(bucket string) (BucketUsageInfo, error) {
|
||||
v, err := sys.bucketStorageCache.Get()
|
||||
if err != nil {
|
||||
return BucketUsageInfo{}, err
|
||||
}
|
||||
|
||||
dui, ok := v.(DataUsageInfo)
|
||||
if !ok {
|
||||
return BucketUsageInfo{}, fmt.Errorf("internal error: Unexpected DUI data type: %T", v)
|
||||
}
|
||||
|
||||
bui := dui.BucketsUsage[bucket]
|
||||
return bui, nil
|
||||
}
|
||||
|
||||
// parseBucketQuota parses BucketQuota from json
|
||||
func parseBucketQuota(bucket string, data []byte) (quotaCfg *madmin.BucketQuota, err error) {
|
||||
quotaCfg = &madmin.BucketQuota{}
|
||||
@@ -63,45 +90,23 @@ func parseBucketQuota(bucket string, data []byte) (quotaCfg *madmin.BucketQuota,
|
||||
return
|
||||
}
|
||||
|
||||
func (sys *BucketQuotaSys) check(ctx context.Context, bucket string, size int64) error {
|
||||
objAPI := newObjectLayerFn()
|
||||
if objAPI == nil {
|
||||
return errServerNotInitialized
|
||||
func (sys *BucketQuotaSys) enforceQuotaHard(ctx context.Context, bucket string, size int64) error {
|
||||
if size < 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
sys.bucketStorageCache.Once.Do(func() {
|
||||
sys.bucketStorageCache.TTL = 1 * time.Second
|
||||
sys.bucketStorageCache.Update = func() (interface{}, error) {
|
||||
ctx, done := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer done()
|
||||
return loadDataUsageFromBackend(ctx, objAPI)
|
||||
}
|
||||
})
|
||||
|
||||
q, err := sys.Get(bucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if q != nil && q.Type == madmin.HardQuota && q.Quota > 0 {
|
||||
v, err := sys.bucketStorageCache.Get()
|
||||
bui, err := sys.GetBucketUsageInfo(bucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
dui, ok := v.(DataUsageInfo)
|
||||
if !ok {
|
||||
return fmt.Errorf("internal error: Unexpected DUI data type: %T", v)
|
||||
}
|
||||
|
||||
bui, ok := dui.BucketsUsage[bucket]
|
||||
if !ok {
|
||||
// bucket not found, cannot enforce quota
|
||||
// call will fail anyways later.
|
||||
return nil
|
||||
}
|
||||
|
||||
if (bui.Size + uint64(size)) >= q.Quota {
|
||||
if bui.Size > 0 && ((bui.Size + uint64(size)) >= q.Quota) {
|
||||
return BucketQuotaExceeded{Bucket: bucket}
|
||||
}
|
||||
}
|
||||
@@ -109,118 +114,9 @@ func (sys *BucketQuotaSys) check(ctx context.Context, bucket string, size int64)
|
||||
return nil
|
||||
}
|
||||
|
||||
func enforceBucketQuota(ctx context.Context, bucket string, size int64) error {
|
||||
if size < 0 {
|
||||
func enforceBucketQuotaHard(ctx context.Context, bucket string, size int64) error {
|
||||
if globalBucketQuotaSys == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return globalBucketQuotaSys.check(ctx, bucket, size)
|
||||
}
|
||||
|
||||
// enforceFIFOQuota deletes objects in FIFO order until sufficient objects
|
||||
// have been deleted so as to bring bucket usage within quota.
|
||||
func enforceFIFOQuotaBucket(ctx context.Context, objectAPI ObjectLayer, bucket string, bui BucketUsageInfo) {
|
||||
// Check if the current bucket has quota restrictions, if not skip it
|
||||
cfg, err := globalBucketQuotaSys.Get(bucket)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if cfg.Type != madmin.FIFOQuota {
|
||||
return
|
||||
}
|
||||
|
||||
var toFree uint64
|
||||
if bui.Size > cfg.Quota && cfg.Quota > 0 {
|
||||
toFree = bui.Size - cfg.Quota
|
||||
}
|
||||
|
||||
if toFree <= 0 {
|
||||
return
|
||||
}
|
||||
|
||||
// Allocate new results channel to receive ObjectInfo.
|
||||
objInfoCh := make(chan ObjectInfo)
|
||||
|
||||
versioned := globalBucketVersioningSys.Enabled(bucket)
|
||||
|
||||
// Walk through all objects
|
||||
if err := objectAPI.Walk(ctx, bucket, "", objInfoCh, ObjectOptions{WalkVersions: versioned}); err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return
|
||||
}
|
||||
|
||||
// reuse the fileScorer used by disk cache to score entries by
|
||||
// ModTime to find the oldest objects in bucket to delete. In
|
||||
// the context of bucket quota enforcement - number of hits are
|
||||
// irrelevant.
|
||||
scorer, err := newFileScorer(toFree, UTCNow().Unix(), 1)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return
|
||||
}
|
||||
|
||||
rcfg, _ := globalBucketObjectLockSys.Get(bucket)
|
||||
for obj := range objInfoCh {
|
||||
// skip objects currently under retention
|
||||
if rcfg.LockEnabled && enforceRetentionForDeletion(ctx, obj) {
|
||||
continue
|
||||
}
|
||||
scorer.addFileWithObjInfo(obj, 1)
|
||||
}
|
||||
|
||||
// If we saw less than quota we are good.
|
||||
if scorer.seenBytes <= cfg.Quota {
|
||||
return
|
||||
}
|
||||
|
||||
// Calculate how much we want to delete now.
|
||||
toFreeNow := scorer.seenBytes - cfg.Quota
|
||||
// We were less over quota than we thought. Adjust so we delete less.
|
||||
// If we are more over, leave it for the next run to pick up.
|
||||
if toFreeNow < toFree {
|
||||
if !scorer.adjustSaveBytes(int64(toFreeNow) - int64(toFree)) {
|
||||
// We got below or at quota.
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
var objects []ObjectToDelete
|
||||
numKeys := len(scorer.fileObjInfos())
|
||||
for i, obj := range scorer.fileObjInfos() {
|
||||
objects = append(objects, ObjectToDelete{
|
||||
ObjectV: ObjectV{
|
||||
ObjectName: obj.Name,
|
||||
VersionID: obj.VersionID,
|
||||
},
|
||||
})
|
||||
if len(objects) < maxDeleteList && (i < numKeys-1) {
|
||||
// skip deletion until maxDeleteList or end of slice
|
||||
continue
|
||||
}
|
||||
|
||||
if len(objects) == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
// Deletes a list of objects.
|
||||
_, deleteErrs := objectAPI.DeleteObjects(ctx, bucket, objects, ObjectOptions{
|
||||
Versioned: versioned,
|
||||
})
|
||||
for j := range deleteErrs {
|
||||
if deleteErrs[j] != nil {
|
||||
logger.LogIf(ctx, deleteErrs[j])
|
||||
continue
|
||||
}
|
||||
|
||||
// Notify object deleted event.
|
||||
sendEvent(eventArgs{
|
||||
EventName: event.ObjectRemovedDelete,
|
||||
BucketName: bucket,
|
||||
Object: obj,
|
||||
Host: "Internal: [FIFO-QUOTA-EXPIRY]",
|
||||
})
|
||||
}
|
||||
objects = nil
|
||||
}
|
||||
return globalBucketQuotaSys.enforceQuotaHard(ctx, bucket, size)
|
||||
}
|
||||
|
||||
@@ -798,35 +798,6 @@ func (d *dataUsageCache) bucketsUsageInfo(buckets []BucketInfo) map[string]Bucke
|
||||
return dst
|
||||
}
|
||||
|
||||
// bucketUsageInfo returns the buckets usage info.
|
||||
// If not found all values returned are zero values.
|
||||
func (d *dataUsageCache) bucketUsageInfo(bucket string) BucketUsageInfo {
|
||||
e := d.find(bucket)
|
||||
if e == nil {
|
||||
return BucketUsageInfo{}
|
||||
}
|
||||
flat := d.flatten(*e)
|
||||
bui := BucketUsageInfo{
|
||||
Size: uint64(flat.Size),
|
||||
ObjectsCount: flat.Objects,
|
||||
ObjectSizesHistogram: flat.ObjSizes.toMap(),
|
||||
}
|
||||
if flat.ReplicationStats != nil {
|
||||
bui.ReplicaSize = flat.ReplicationStats.ReplicaSize
|
||||
bui.ReplicationInfo = make(map[string]BucketTargetUsageInfo, len(flat.ReplicationStats.Targets))
|
||||
for arn, stat := range flat.ReplicationStats.Targets {
|
||||
bui.ReplicationInfo[arn] = BucketTargetUsageInfo{
|
||||
ReplicationPendingSize: stat.PendingSize,
|
||||
ReplicatedSize: stat.ReplicatedSize,
|
||||
ReplicationFailedSize: stat.FailedSize,
|
||||
ReplicationPendingCount: stat.PendingCount,
|
||||
ReplicationFailedCount: stat.FailedCount,
|
||||
}
|
||||
}
|
||||
}
|
||||
return bui
|
||||
}
|
||||
|
||||
// sizeRecursive returns the path as a flattened entry.
|
||||
func (d *dataUsageCache) sizeRecursive(path string) *dataUsageEntry {
|
||||
root := d.find(path)
|
||||
|
||||
@@ -477,22 +477,6 @@ func (f *fileScorer) trimQueue() {
|
||||
}
|
||||
}
|
||||
|
||||
// fileObjInfos returns all queued file object infos
|
||||
func (f *fileScorer) fileObjInfos() []ObjectInfo {
|
||||
res := make([]ObjectInfo, 0, f.queue.Len())
|
||||
e := f.queue.Front()
|
||||
for e != nil {
|
||||
qfile := e.Value.(queuedFile)
|
||||
res = append(res, ObjectInfo{
|
||||
Name: qfile.name,
|
||||
Size: int64(qfile.size),
|
||||
VersionID: qfile.versionID,
|
||||
})
|
||||
e = e.Next()
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func (f *fileScorer) purgeFunc(p func(qfile queuedFile)) {
|
||||
e := f.queue.Front()
|
||||
for e != nil {
|
||||
|
||||
@@ -619,12 +619,6 @@ func (z *erasureServerPools) NSScanner(ctx context.Context, bf *bloomFilter, upd
|
||||
return
|
||||
case v := <-updateCloser:
|
||||
update()
|
||||
// Enforce quotas when all is done.
|
||||
if firstErr == nil {
|
||||
for _, b := range allBuckets {
|
||||
enforceFIFOQuotaBucket(ctx, z, b.Name, allMerged.bucketUsageInfo(b.Name))
|
||||
}
|
||||
}
|
||||
close(v)
|
||||
return
|
||||
case <-updateTicker.C:
|
||||
|
||||
@@ -329,7 +329,7 @@ func (fs *FSObjects) NSScanner(ctx context.Context, bf *bloomFilter, updates cha
|
||||
logger.LogIf(ctx, totalCache.save(ctx, fs, dataUsageCacheName))
|
||||
cloned := totalCache.clone()
|
||||
updates <- cloned.dui(dataUsageRoot, buckets)
|
||||
enforceFIFOQuotaBucket(ctx, fs, b.Name, cloned.bucketUsageInfo(b.Name))
|
||||
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
@@ -46,7 +46,6 @@ type ObjectOptions struct {
|
||||
ServerSideEncryption encrypt.ServerSide
|
||||
VersionSuspended bool // indicates if the bucket was previously versioned but is currently suspended.
|
||||
Versioned bool // indicates if the bucket is versioned
|
||||
WalkVersions bool // indicates if the we are interested in walking versions
|
||||
VersionID string // Specifies the versionID which needs to be overwritten or read
|
||||
MTime time.Time // Is only set in POST/PUT operations
|
||||
Expires time.Time // Is only used in POST/PUT operations
|
||||
|
||||
@@ -1106,7 +1106,7 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re
|
||||
length := actualSize
|
||||
|
||||
if !cpSrcDstSame {
|
||||
if err := enforceBucketQuota(ctx, dstBucket, actualSize); err != nil {
|
||||
if err := enforceBucketQuotaHard(ctx, dstBucket, actualSize); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -1659,7 +1659,7 @@ func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Req
|
||||
}
|
||||
}
|
||||
|
||||
if err := enforceBucketQuota(ctx, bucket, size); err != nil {
|
||||
if err := enforceBucketQuotaHard(ctx, bucket, size); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -1996,7 +1996,7 @@ func (api objectAPIHandlers) PutObjectExtractHandler(w http.ResponseWriter, r *h
|
||||
return
|
||||
}
|
||||
|
||||
if err := enforceBucketQuota(ctx, bucket, size); err != nil {
|
||||
if err := enforceBucketQuotaHard(ctx, bucket, size); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -2465,7 +2465,7 @@ func (api objectAPIHandlers) CopyObjectPartHandler(w http.ResponseWriter, r *htt
|
||||
return
|
||||
}
|
||||
|
||||
if err := enforceBucketQuota(ctx, dstBucket, actualPartSize); err != nil {
|
||||
if err := enforceBucketQuotaHard(ctx, dstBucket, actualPartSize); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -2754,7 +2754,7 @@ func (api objectAPIHandlers) PutObjectPartHandler(w http.ResponseWriter, r *http
|
||||
}
|
||||
}
|
||||
|
||||
if err := enforceBucketQuota(ctx, bucket, size); err != nil {
|
||||
if err := enforceBucketQuotaHard(ctx, bucket, size); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -577,6 +577,9 @@ func serverMain(ctx *cli.Context) {
|
||||
// Initialize site replication manager.
|
||||
globalSiteReplicationSys.Init(GlobalContext, newObject)
|
||||
|
||||
// Initialize quota manager.
|
||||
globalBucketQuotaSys.Init(newObject)
|
||||
|
||||
// Initialize users credentials and policies in background right after config has initialized.
|
||||
go globalIAMSys.Init(GlobalContext, newObject, globalEtcdClient, globalNotificationSys, globalRefreshIAMInterval)
|
||||
|
||||
|
||||
Reference in New Issue
Block a user