mirror of https://github.com/minio/minio.git
feat: allow retaining parity SLA to be configurable (#19260)
at scale customers might start with failed drives, causing skew in the overall usage ratio per EC set. make this configurable such that customers can turn this off as needed depending on how comfortable they are.
This commit is contained in:
parent
5c32058ff3
commit
ce1c640ce0
|
@ -401,36 +401,33 @@ func (er erasureObjects) newMultipartUpload(ctx context.Context, bucket string,
|
|||
parityDrives = er.defaultParityCount
|
||||
}
|
||||
|
||||
// If we have offline disks upgrade the number of erasure codes for this object.
|
||||
parityOrig := parityDrives
|
||||
if globalStorageClass.AvailabilityOptimized() {
|
||||
// If we have offline disks upgrade the number of erasure codes for this object.
|
||||
parityOrig := parityDrives
|
||||
|
||||
var offlineDrives int
|
||||
for _, disk := range onlineDisks {
|
||||
if disk == nil {
|
||||
parityDrives++
|
||||
offlineDrives++
|
||||
continue
|
||||
var offlineDrives int
|
||||
for _, disk := range onlineDisks {
|
||||
if disk == nil || !disk.IsOnline() {
|
||||
parityDrives++
|
||||
offlineDrives++
|
||||
continue
|
||||
}
|
||||
}
|
||||
if !disk.IsOnline() {
|
||||
parityDrives++
|
||||
offlineDrives++
|
||||
continue
|
||||
|
||||
if offlineDrives >= (len(onlineDisks)+1)/2 {
|
||||
// if offline drives are more than 50% of the drives
|
||||
// we have no quorum, we shouldn't proceed just
|
||||
// fail at that point.
|
||||
return nil, toObjectErr(errErasureWriteQuorum, bucket, object)
|
||||
}
|
||||
}
|
||||
|
||||
if offlineDrives >= (len(onlineDisks)+1)/2 {
|
||||
// if offline drives are more than 50% of the drives
|
||||
// we have no quorum, we shouldn't proceed just
|
||||
// fail at that point.
|
||||
return nil, toObjectErr(errErasureWriteQuorum, bucket, object)
|
||||
}
|
||||
if parityDrives >= len(onlineDisks)/2 {
|
||||
parityDrives = len(onlineDisks) / 2
|
||||
}
|
||||
|
||||
if parityDrives >= len(onlineDisks)/2 {
|
||||
parityDrives = len(onlineDisks) / 2
|
||||
}
|
||||
|
||||
if parityOrig != parityDrives {
|
||||
userDefined[minIOErasureUpgraded] = strconv.Itoa(parityOrig) + "->" + strconv.Itoa(parityDrives)
|
||||
if parityOrig != parityDrives {
|
||||
userDefined[minIOErasureUpgraded] = strconv.Itoa(parityOrig) + "->" + strconv.Itoa(parityDrives)
|
||||
}
|
||||
}
|
||||
|
||||
dataDrives := len(onlineDisks) - parityDrives
|
||||
|
|
|
@ -1297,25 +1297,21 @@ func (er erasureObjects) putObject(ctx context.Context, bucket string, object st
|
|||
|
||||
storageDisks := er.getDisks()
|
||||
|
||||
parityDrives := len(storageDisks) / 2
|
||||
if !opts.MaxParity {
|
||||
// Get parity and data drive count based on storage class metadata
|
||||
parityDrives = globalStorageClass.GetParityForSC(userDefined[xhttp.AmzStorageClass])
|
||||
if parityDrives < 0 {
|
||||
parityDrives = er.defaultParityCount
|
||||
}
|
||||
|
||||
// Get parity and data drive count based on storage class metadata
|
||||
parityDrives := globalStorageClass.GetParityForSC(userDefined[xhttp.AmzStorageClass])
|
||||
if parityDrives < 0 {
|
||||
parityDrives = er.defaultParityCount
|
||||
}
|
||||
if opts.MaxParity {
|
||||
parityDrives = len(storageDisks) / 2
|
||||
}
|
||||
if !opts.MaxParity && globalStorageClass.AvailabilityOptimized() {
|
||||
// If we have offline disks upgrade the number of erasure codes for this object.
|
||||
parityOrig := parityDrives
|
||||
|
||||
var offlineDrives int
|
||||
for _, disk := range storageDisks {
|
||||
if disk == nil {
|
||||
parityDrives++
|
||||
offlineDrives++
|
||||
continue
|
||||
}
|
||||
if !disk.IsOnline() {
|
||||
if disk == nil || !disk.IsOnline() {
|
||||
parityDrives++
|
||||
offlineDrives++
|
||||
continue
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright (c) 2015-2021 MinIO, Inc.
|
||||
// Copyright (c) 2015-2024 MinIO, Inc.
|
||||
//
|
||||
// This file is part of MinIO Object Storage stack
|
||||
//
|
||||
|
@ -38,6 +38,12 @@ var (
|
|||
Optional: true,
|
||||
Type: "string",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: ClassOptimize,
|
||||
Description: `optimize parity calculation for standard storage class, set 'capacity' for capacity optimized (no additional parity)` + defaultHelpPostfix(ClassOptimize),
|
||||
Optional: true,
|
||||
Type: "string",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: config.Comment,
|
||||
Description: config.DefaultComment,
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright (c) 2015-2021 MinIO, Inc.
|
||||
// Copyright (c) 2015-2024 MinIO, Inc.
|
||||
//
|
||||
// This file is part of MinIO Object Storage stack
|
||||
//
|
||||
|
@ -40,11 +40,14 @@ const (
|
|||
const (
|
||||
ClassStandard = "standard"
|
||||
ClassRRS = "rrs"
|
||||
ClassOptimize = "optimize"
|
||||
|
||||
// Reduced redundancy storage class environment variable
|
||||
RRSEnv = "MINIO_STORAGE_CLASS_RRS"
|
||||
// Standard storage class environment variable
|
||||
StandardEnv = "MINIO_STORAGE_CLASS_STANDARD"
|
||||
// Optimize storage class environment variable
|
||||
OptimizeEnv = "MINIO_STORAGE_CLASS_OPTIMIZE"
|
||||
|
||||
// Supported storage class scheme is EC
|
||||
schemePrefix = "EC"
|
||||
|
@ -67,6 +70,10 @@ var (
|
|||
Key: ClassRRS,
|
||||
Value: "EC:1",
|
||||
},
|
||||
config.KV{
|
||||
Key: ClassOptimize,
|
||||
Value: "availability",
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
|
@ -82,6 +89,7 @@ var ConfigLock sync.RWMutex
|
|||
type Config struct {
|
||||
Standard StorageClass `json:"standard"`
|
||||
RRS StorageClass `json:"rrs"`
|
||||
Optimize string `json:"optimize"`
|
||||
initialized bool
|
||||
}
|
||||
|
||||
|
@ -245,12 +253,40 @@ func (sCfg *Config) GetParityForSC(sc string) (parity int) {
|
|||
}
|
||||
}
|
||||
|
||||
// CapacityOptimized - returns true if the storage-class is capacity optimized
|
||||
// meaning we will not use additional parities when drives are offline.
|
||||
//
|
||||
// Default is "availability" optimized, unless this is configured.
|
||||
func (sCfg *Config) CapacityOptimized() bool {
|
||||
ConfigLock.RLock()
|
||||
defer ConfigLock.RUnlock()
|
||||
if !sCfg.initialized {
|
||||
return false
|
||||
}
|
||||
return sCfg.Optimize == "capacity"
|
||||
}
|
||||
|
||||
// AvailabilityOptimized - returns true if the storage-class is availability
|
||||
// optimized, meaning we will use additional parities when drives are offline
|
||||
// to retain parity SLA.
|
||||
//
|
||||
// Default is "availability" optimized.
|
||||
func (sCfg *Config) AvailabilityOptimized() bool {
|
||||
ConfigLock.RLock()
|
||||
defer ConfigLock.RUnlock()
|
||||
if !sCfg.initialized {
|
||||
return true
|
||||
}
|
||||
return sCfg.Optimize == "availability" || sCfg.Optimize == ""
|
||||
}
|
||||
|
||||
// Update update storage-class with new config
|
||||
func (sCfg *Config) Update(newCfg Config) {
|
||||
ConfigLock.Lock()
|
||||
defer ConfigLock.Unlock()
|
||||
sCfg.RRS = newCfg.RRS
|
||||
sCfg.Standard = newCfg.Standard
|
||||
sCfg.Optimize = newCfg.Optimize
|
||||
sCfg.initialized = true
|
||||
}
|
||||
|
||||
|
@ -320,5 +356,6 @@ func LookupConfig(kvs config.KVS, setDriveCount int) (cfg Config, err error) {
|
|||
}
|
||||
|
||||
cfg.initialized = true
|
||||
cfg.Optimize = env.Get(OptimizeEnv, kvs.Get(ClassOptimize))
|
||||
return cfg, nil
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue