tolerate listing with only readQuorum disks (#10357)

We can reduce this further in the future, but this is a good
value to keep around. With the advent of continuous healing,
we can be assured that namespace will eventually be
consistent so we are okay to avoid the necessity to
a list across all drives on all sets.

Bonus Pop()'s in parallel seem to have the potential to
wait too on large drive setups and cause more slowness
instead of gaining any performance remove it for now.

Also, implement load balanced reply for local disks,
ensuring that local disks have an affinity for

- cleanupStaleMultipartUploads()
This commit is contained in:
Harshavardhana
2020-08-26 19:29:35 -07:00
committed by GitHub
parent 0a2e6d58a5
commit a359e36e35
14 changed files with 162 additions and 204 deletions

View File

@@ -156,7 +156,7 @@ func parseStorageClass(storageClassEnv string) (sc StorageClass, err error) {
}
// Validates the parity disks.
func validateParity(ssParity, rrsParity, drivesPerSet int) (err error) {
func validateParity(ssParity, rrsParity, setDriveCount int) (err error) {
if ssParity == 0 && rrsParity == 0 {
return nil
}
@@ -174,12 +174,12 @@ func validateParity(ssParity, rrsParity, drivesPerSet int) (err error) {
return fmt.Errorf("Reduced redundancy storage class parity %d should be greater than or equal to %d", rrsParity, minParityDisks)
}
if ssParity > drivesPerSet/2 {
return fmt.Errorf("Standard storage class parity %d should be less than or equal to %d", ssParity, drivesPerSet/2)
if ssParity > setDriveCount/2 {
return fmt.Errorf("Standard storage class parity %d should be less than or equal to %d", ssParity, setDriveCount/2)
}
if rrsParity > drivesPerSet/2 {
return fmt.Errorf("Reduced redundancy storage class parity %d should be less than or equal to %d", rrsParity, drivesPerSet/2)
if rrsParity > setDriveCount/2 {
return fmt.Errorf("Reduced redundancy storage class parity %d should be less than or equal to %d", rrsParity, setDriveCount/2)
}
if ssParity > 0 && rrsParity > 0 {
@@ -220,9 +220,9 @@ func Enabled(kvs config.KVS) bool {
}
// LookupConfig - lookup storage class config and override with valid environment settings if any.
func LookupConfig(kvs config.KVS, drivesPerSet int) (cfg Config, err error) {
func LookupConfig(kvs config.KVS, setDriveCount int) (cfg Config, err error) {
cfg = Config{}
cfg.Standard.Parity = drivesPerSet / 2
cfg.Standard.Parity = setDriveCount / 2
cfg.RRS.Parity = defaultRRSParity
if err = config.CheckValidKeys(config.StorageClassSubSys, kvs, DefaultKVS); err != nil {
@@ -239,7 +239,7 @@ func LookupConfig(kvs config.KVS, drivesPerSet int) (cfg Config, err error) {
}
}
if cfg.Standard.Parity == 0 {
cfg.Standard.Parity = drivesPerSet / 2
cfg.Standard.Parity = setDriveCount / 2
}
if rrsc != "" {
@@ -254,7 +254,7 @@ func LookupConfig(kvs config.KVS, drivesPerSet int) (cfg Config, err error) {
// Validation is done after parsing both the storage classes. This is needed because we need one
// storage class value to deduce the correct value of the other storage class.
if err = validateParity(cfg.Standard.Parity, cfg.RRS.Parity, drivesPerSet); err != nil {
if err = validateParity(cfg.Standard.Parity, cfg.RRS.Parity, setDriveCount); err != nil {
return Config{}, err
}