mirror of
https://github.com/minio/minio.git
synced 2024-12-24 22:25:54 -05:00
askDisks for strict quorum to be equal to read quorum (#14623)
This commit is contained in:
parent
4d2fc530d0
commit
5cfedcfe33
@ -190,7 +190,7 @@ var (
|
||||
globalBucketTargetSys *BucketTargetSys
|
||||
// globalAPIConfig controls S3 API requests throttling,
|
||||
// healthcheck readiness deadlines and cors settings.
|
||||
globalAPIConfig = apiConfig{listQuorum: -1}
|
||||
globalAPIConfig = apiConfig{listQuorum: "strict"}
|
||||
|
||||
globalStorageClass storageclass.Config
|
||||
globalLDAPConfig xldap.Config
|
||||
|
@ -38,7 +38,7 @@ type apiConfig struct {
|
||||
requestsDeadline time.Duration
|
||||
requestsPool chan struct{}
|
||||
clusterDeadline time.Duration
|
||||
listQuorum int
|
||||
listQuorum string
|
||||
corsAllowOrigins []string
|
||||
// total drives per erasure set across pools.
|
||||
totalDriveCount int
|
||||
@ -136,7 +136,7 @@ func (t *apiConfig) init(cfg api.Config, setDriveCounts []int) {
|
||||
t.requestsPool = make(chan struct{}, apiRequestsMaxPerNode)
|
||||
}
|
||||
t.requestsDeadline = cfg.RequestsDeadline
|
||||
t.listQuorum = cfg.GetListQuorum()
|
||||
t.listQuorum = cfg.ListQuorum
|
||||
if globalReplicationPool != nil &&
|
||||
cfg.ReplicationWorkers != t.replicationWorkers {
|
||||
globalReplicationPool.ResizeFailedWorkers(cfg.ReplicationFailedWorkers)
|
||||
@ -170,7 +170,7 @@ func (t *apiConfig) shouldGzipObjects() bool {
|
||||
return t.gzipObjects
|
||||
}
|
||||
|
||||
func (t *apiConfig) getListQuorum() int {
|
||||
func (t *apiConfig) getListQuorum() string {
|
||||
t.mu.RLock()
|
||||
defer t.mu.RUnlock()
|
||||
|
||||
|
@ -42,7 +42,7 @@ func Benchmark_bucketMetacache_findCache(b *testing.B) {
|
||||
FilterPrefix: "",
|
||||
Marker: "",
|
||||
Limit: 0,
|
||||
AskDisks: 0,
|
||||
AskDisks: "strict",
|
||||
Recursive: false,
|
||||
Separator: slashSeparator,
|
||||
Create: true,
|
||||
@ -59,7 +59,7 @@ func Benchmark_bucketMetacache_findCache(b *testing.B) {
|
||||
FilterPrefix: "",
|
||||
Marker: "",
|
||||
Limit: 0,
|
||||
AskDisks: 0,
|
||||
AskDisks: "strict",
|
||||
Recursive: false,
|
||||
Separator: slashSeparator,
|
||||
Create: true,
|
||||
|
@ -65,10 +65,8 @@ type listPathOptions struct {
|
||||
// Limit the number of results.
|
||||
Limit int
|
||||
|
||||
// The number of disks to ask. Special values:
|
||||
// 0 uses default number of disks.
|
||||
// -1 use at least 50% of disks or at least the default number.
|
||||
AskDisks int
|
||||
// The number of disks to ask.
|
||||
AskDisks string
|
||||
|
||||
// InclDeleted will keep all entries where latest version is a delete marker.
|
||||
InclDeleted bool
|
||||
@ -541,19 +539,38 @@ func (er *erasureObjects) streamMetadataParts(ctx context.Context, o listPathOpt
|
||||
}
|
||||
}
|
||||
|
||||
// getListQuorum interprets list quorum values and returns appropriate
|
||||
// acceptable quorum expected for list operations
|
||||
func getListQuorum(quorum string, driveCount int) int {
|
||||
switch quorum {
|
||||
case "disk":
|
||||
// smallest possible value, generally meant for testing.
|
||||
return 1
|
||||
case "reduced":
|
||||
return 2
|
||||
case "strict":
|
||||
return -1
|
||||
}
|
||||
// Defaults to (driveCount+1)/2 drives per set, defaults to "optimal" value
|
||||
if driveCount > 0 {
|
||||
return (driveCount + 1) / 2
|
||||
} // "3" otherwise.
|
||||
return 3
|
||||
}
|
||||
|
||||
// Will return io.EOF if continuing would not yield more results.
|
||||
func (er *erasureObjects) listPath(ctx context.Context, o listPathOptions, results chan<- metaCacheEntry) (err error) {
|
||||
defer close(results)
|
||||
o.debugf(color.Green("listPath:")+" with options: %#v", o)
|
||||
|
||||
askDisks := o.AskDisks
|
||||
listingQuorum := o.AskDisks - 1
|
||||
askDisks := getListQuorum(o.AskDisks, er.setDriveCount)
|
||||
listingQuorum := askDisks - 1
|
||||
disks := er.getDisks()
|
||||
var fallbackDisks []StorageAPI
|
||||
|
||||
// Special case: ask all disks if the drive count is 4
|
||||
if askDisks <= 0 || er.setDriveCount == 4 {
|
||||
askDisks = len(disks) // with 'strict' quorum list on all online disks.
|
||||
askDisks = len(disks) // with 'strict' quorum list on all drives.
|
||||
listingQuorum = (len(disks) + 1) / 2 // keep this such that we can list all objects with different quorum ratio.
|
||||
}
|
||||
if askDisks > 0 && len(disks) > askDisks {
|
||||
@ -822,7 +839,7 @@ func listPathRaw(ctx context.Context, opts listPathRawOptions) (err error) {
|
||||
for _, fd := range fdsCopy {
|
||||
// Grab a fallback disk
|
||||
fds = fds[1:]
|
||||
if fd != nil {
|
||||
if fd != nil && fd.IsOnline() {
|
||||
return fd
|
||||
}
|
||||
}
|
||||
|
@ -300,6 +300,8 @@ func configRetriableErrors(err error) bool {
|
||||
}
|
||||
|
||||
func initServer(ctx context.Context, newObject ObjectLayer) error {
|
||||
t1 := time.Now()
|
||||
|
||||
// Once the config is fully loaded, initialize the new object layer.
|
||||
setObjectLayer(newObject)
|
||||
|
||||
@ -352,7 +354,7 @@ func initServer(ctx context.Context, newObject ObjectLayer) error {
|
||||
// All successful return.
|
||||
if globalIsDistErasure {
|
||||
// These messages only meant primarily for distributed setup, so only log during distributed setup.
|
||||
logger.Info("All MinIO sub-systems initialized successfully")
|
||||
logger.Info("All MinIO sub-systems initialized successfully in %s", time.Since(t1))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -162,22 +162,6 @@ func (sCfg *Config) UnmarshalJSON(data []byte) error {
|
||||
return json.Unmarshal(data, &aux)
|
||||
}
|
||||
|
||||
// GetListQuorum interprets list quorum values and returns appropriate
|
||||
// acceptable quorum expected for list operations
|
||||
func (sCfg Config) GetListQuorum() int {
|
||||
switch sCfg.ListQuorum {
|
||||
case "reduced":
|
||||
return 2
|
||||
case "disk":
|
||||
// smallest possible value, generally meant for testing.
|
||||
return 1
|
||||
case "strict":
|
||||
return -1
|
||||
}
|
||||
// Defaults to 3 drives per set, defaults to "optimal" value
|
||||
return 3
|
||||
}
|
||||
|
||||
// LookupConfig - lookup api config and override with valid environment settings if any.
|
||||
func LookupConfig(kvs config.KVS) (cfg Config, err error) {
|
||||
// remove this since we have removed this already.
|
||||
|
Loading…
Reference in New Issue
Block a user