2021-04-18 15:41:13 -04:00
// Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
2020-04-30 18:55:54 -04:00
package cmd
import (
"context"
"encoding/json"
2022-02-17 15:49:46 -05:00
"errors"
2020-04-30 18:55:54 -04:00
"fmt"
"time"
2023-06-19 20:53:08 -04:00
"github.com/minio/madmin-go/v3"
2022-02-17 15:49:46 -05:00
"github.com/minio/minio/internal/logger"
2020-04-30 18:55:54 -04:00
)
// BucketQuotaSys - map of bucket and quota configuration.
2020-05-27 09:45:43 -04:00
type BucketQuotaSys struct {
bucketStorageCache timedValue
}
2020-04-30 18:55:54 -04:00
// Get - Get quota configuration.
2022-01-31 20:27:43 -05:00
func ( sys * BucketQuotaSys ) Get ( ctx context . Context , bucketName string ) ( * madmin . BucketQuota , error ) {
2022-04-24 05:36:31 -04:00
qCfg , _ , err := globalBucketMetadataSys . GetQuotaConfig ( ctx , bucketName )
return qCfg , err
2020-04-30 18:55:54 -04:00
}
// NewBucketQuotaSys returns initialized BucketQuotaSys
func NewBucketQuotaSys ( ) * BucketQuotaSys {
2020-05-20 13:18:15 -04:00
return & BucketQuotaSys { }
2020-04-30 18:55:54 -04:00
}
2022-01-31 14:07:04 -05:00
// Init initialize bucket quota.
func ( sys * BucketQuotaSys ) Init ( objAPI ObjectLayer ) {
sys . bucketStorageCache . Once . Do ( func ( ) {
allow quota enforcement to rely on older values (#17351)
PUT calls cannot afford to have large latency build-ups due
to contentious usage.json, or worse letting them fail with
some unexpected error, this can happen when this file is
concurrently being updated via scanner or it is being
healed during a disk replacement heal.
However, these are fairly quick in theory, stressed clusters
can quickly show visible latency this can add up leading to
invalid errors returned during PUT.
It is perhaps okay for us to relax this error return requirement
instead, make sure that we log that we are proceeding to take in
the requests while the quota is using an older value for the quota
enforcement. These things will reconcile themselves eventually,
via scanner making sure to overwrite the usage.json.
Bonus: make sure that storage-rest-client sets ExpectTimeouts to
be 'true', such that DiskInfo() call with contextTimeout does
not prematurely disconnect the servers leading to a longer
healthCheck, back-off routine. This can easily pile up while also
causing active callers to disconnect, leading to quorum loss.
DiskInfo is actively used in the PUT, Multipart call path for
upgrading parity when disks are down, it in-turn shouldn't cause
more disks to go down.
2023-06-05 19:56:35 -04:00
// Set this to 10 secs since its enough, as scanner
// does not update the bucket usage values frequently.
sys . bucketStorageCache . TTL = 10 * time . Second
// Rely on older value if usage loading fails from disk.
sys . bucketStorageCache . Relax = true
2022-01-31 14:07:04 -05:00
sys . bucketStorageCache . Update = func ( ) ( interface { } , error ) {
allow quota enforcement to rely on older values (#17351)
PUT calls cannot afford to have large latency build-ups due
to contentious usage.json, or worse letting them fail with
some unexpected error, this can happen when this file is
concurrently being updated via scanner or it is being
healed during a disk replacement heal.
However, these are fairly quick in theory, stressed clusters
can quickly show visible latency this can add up leading to
invalid errors returned during PUT.
It is perhaps okay for us to relax this error return requirement
instead, make sure that we log that we are proceeding to take in
the requests while the quota is using an older value for the quota
enforcement. These things will reconcile themselves eventually,
via scanner making sure to overwrite the usage.json.
Bonus: make sure that storage-rest-client sets ExpectTimeouts to
be 'true', such that DiskInfo() call with contextTimeout does
not prematurely disconnect the servers leading to a longer
healthCheck, back-off routine. This can easily pile up while also
causing active callers to disconnect, leading to quorum loss.
DiskInfo is actively used in the PUT, Multipart call path for
upgrading parity when disks are down, it in-turn shouldn't cause
more disks to go down.
2023-06-05 19:56:35 -04:00
ctx , done := context . WithTimeout ( context . Background ( ) , 1 * time . Second )
2022-01-31 14:07:04 -05:00
defer done ( )
return loadDataUsageFromBackend ( ctx , objAPI )
}
} )
}
// GetBucketUsageInfo return bucket usage info for a given bucket
func ( sys * BucketQuotaSys ) GetBucketUsageInfo ( bucket string ) ( BucketUsageInfo , error ) {
v , err := sys . bucketStorageCache . Get ( )
allow quota enforcement to rely on older values (#17351)
PUT calls cannot afford to have large latency build-ups due
to contentious usage.json, or worse letting them fail with
some unexpected error, this can happen when this file is
concurrently being updated via scanner or it is being
healed during a disk replacement heal.
However, these are fairly quick in theory, stressed clusters
can quickly show visible latency this can add up leading to
invalid errors returned during PUT.
It is perhaps okay for us to relax this error return requirement
instead, make sure that we log that we are proceeding to take in
the requests while the quota is using an older value for the quota
enforcement. These things will reconcile themselves eventually,
via scanner making sure to overwrite the usage.json.
Bonus: make sure that storage-rest-client sets ExpectTimeouts to
be 'true', such that DiskInfo() call with contextTimeout does
not prematurely disconnect the servers leading to a longer
healthCheck, back-off routine. This can easily pile up while also
causing active callers to disconnect, leading to quorum loss.
DiskInfo is actively used in the PUT, Multipart call path for
upgrading parity when disks are down, it in-turn shouldn't cause
more disks to go down.
2023-06-05 19:56:35 -04:00
if err != nil && v != nil {
logger . LogIf ( GlobalContext , fmt . Errorf ( "unable to retrieve usage information for bucket: %s, relying on older value cached in-memory: err(%v)" , bucket , err ) )
}
if v == nil {
logger . LogIf ( GlobalContext , errors . New ( "unable to retrieve usage information for bucket: %s, no reliable usage value available - quota will not be enforced" ) )
2022-01-31 14:07:04 -05:00
}
allow quota enforcement to rely on older values (#17351)
PUT calls cannot afford to have large latency build-ups due
to contentious usage.json, or worse letting them fail with
some unexpected error, this can happen when this file is
concurrently being updated via scanner or it is being
healed during a disk replacement heal.
However, these are fairly quick in theory, stressed clusters
can quickly show visible latency this can add up leading to
invalid errors returned during PUT.
It is perhaps okay for us to relax this error return requirement
instead, make sure that we log that we are proceeding to take in
the requests while the quota is using an older value for the quota
enforcement. These things will reconcile themselves eventually,
via scanner making sure to overwrite the usage.json.
Bonus: make sure that storage-rest-client sets ExpectTimeouts to
be 'true', such that DiskInfo() call with contextTimeout does
not prematurely disconnect the servers leading to a longer
healthCheck, back-off routine. This can easily pile up while also
causing active callers to disconnect, leading to quorum loss.
DiskInfo is actively used in the PUT, Multipart call path for
upgrading parity when disks are down, it in-turn shouldn't cause
more disks to go down.
2023-06-05 19:56:35 -04:00
var bui BucketUsageInfo
2022-01-31 14:07:04 -05:00
dui , ok := v . ( DataUsageInfo )
allow quota enforcement to rely on older values (#17351)
PUT calls cannot afford to have large latency build-ups due
to contentious usage.json, or worse letting them fail with
some unexpected error, this can happen when this file is
concurrently being updated via scanner or it is being
healed during a disk replacement heal.
However, these are fairly quick in theory, stressed clusters
can quickly show visible latency this can add up leading to
invalid errors returned during PUT.
It is perhaps okay for us to relax this error return requirement
instead, make sure that we log that we are proceeding to take in
the requests while the quota is using an older value for the quota
enforcement. These things will reconcile themselves eventually,
via scanner making sure to overwrite the usage.json.
Bonus: make sure that storage-rest-client sets ExpectTimeouts to
be 'true', such that DiskInfo() call with contextTimeout does
not prematurely disconnect the servers leading to a longer
healthCheck, back-off routine. This can easily pile up while also
causing active callers to disconnect, leading to quorum loss.
DiskInfo is actively used in the PUT, Multipart call path for
upgrading parity when disks are down, it in-turn shouldn't cause
more disks to go down.
2023-06-05 19:56:35 -04:00
if ok {
bui = dui . BucketsUsage [ bucket ]
2022-01-31 14:07:04 -05:00
}
return bui , nil
}
2020-04-30 18:55:54 -04:00
// parseBucketQuota parses BucketQuota from json
2020-05-20 13:18:15 -04:00
func parseBucketQuota ( bucket string , data [ ] byte ) ( quotaCfg * madmin . BucketQuota , err error ) {
quotaCfg = & madmin . BucketQuota { }
if err = json . Unmarshal ( data , quotaCfg ) ; err != nil {
2020-05-19 16:53:54 -04:00
return quotaCfg , err
2020-04-30 18:55:54 -04:00
}
2020-05-20 13:18:15 -04:00
if ! quotaCfg . IsValid ( ) {
2022-02-17 15:49:46 -05:00
if quotaCfg . Type == "fifo" {
logger . LogIf ( GlobalContext , errors . New ( "Detected older 'fifo' quota config, 'fifo' feature is removed and not supported anymore. Please clear your quota configs using 'mc admin bucket quota alias/bucket --clear' and use 'mc ilm add' for expiration of objects" ) )
2023-04-15 04:22:18 -04:00
return quotaCfg , fmt . Errorf ( "invalid quota type 'fifo'" )
2022-02-17 15:49:46 -05:00
}
2020-05-20 13:18:15 -04:00
return quotaCfg , fmt . Errorf ( "Invalid quota config %#v" , quotaCfg )
2020-04-30 18:55:54 -04:00
}
return
}
2022-01-31 14:07:04 -05:00
func ( sys * BucketQuotaSys ) enforceQuotaHard ( ctx context . Context , bucket string , size int64 ) error {
if size < 0 {
return nil
2020-05-27 09:45:43 -04:00
}
2022-01-31 20:27:43 -05:00
q , err := sys . Get ( ctx , bucket )
2020-05-27 09:45:43 -04:00
if err != nil {
return err
}
2020-07-24 15:24:21 -04:00
if q != nil && q . Type == madmin . HardQuota && q . Quota > 0 {
allow quota enforcement to rely on older values (#17351)
PUT calls cannot afford to have large latency build-ups due
to contentious usage.json, or worse letting them fail with
some unexpected error, this can happen when this file is
concurrently being updated via scanner or it is being
healed during a disk replacement heal.
However, these are fairly quick in theory, stressed clusters
can quickly show visible latency this can add up leading to
invalid errors returned during PUT.
It is perhaps okay for us to relax this error return requirement
instead, make sure that we log that we are proceeding to take in
the requests while the quota is using an older value for the quota
enforcement. These things will reconcile themselves eventually,
via scanner making sure to overwrite the usage.json.
Bonus: make sure that storage-rest-client sets ExpectTimeouts to
be 'true', such that DiskInfo() call with contextTimeout does
not prematurely disconnect the servers leading to a longer
healthCheck, back-off routine. This can easily pile up while also
causing active callers to disconnect, leading to quorum loss.
DiskInfo is actively used in the PUT, Multipart call path for
upgrading parity when disks are down, it in-turn shouldn't cause
more disks to go down.
2023-06-05 19:56:35 -04:00
if uint64 ( size ) >= q . Quota { // check if file size already exceeds the quota
return BucketQuotaExceeded { Bucket : bucket }
}
2022-01-31 14:07:04 -05:00
bui , err := sys . GetBucketUsageInfo ( bucket )
2020-07-24 15:24:21 -04:00
if err != nil {
return err
}
2020-05-27 09:45:43 -04:00
2022-01-31 14:07:04 -05:00
if bui . Size > 0 && ( ( bui . Size + uint64 ( size ) ) >= q . Quota ) {
2020-07-24 15:24:21 -04:00
return BucketQuotaExceeded { Bucket : bucket }
}
2020-04-30 18:55:54 -04:00
}
2020-05-27 09:45:43 -04:00
2020-04-30 18:55:54 -04:00
return nil
}
2020-05-27 09:45:43 -04:00
2022-01-31 14:07:04 -05:00
func enforceBucketQuotaHard ( ctx context . Context , bucket string , size int64 ) error {
if globalBucketQuotaSys == nil {
2020-04-30 18:55:54 -04:00
return nil
}
2022-01-31 14:07:04 -05:00
return globalBucketQuotaSys . enforceQuotaHard ( ctx , bucket , size )
2020-04-30 18:55:54 -04:00
}