mirror of
https://github.com/minio/minio.git
synced 2024-12-24 22:25:54 -05:00
askDisks for strict quorum to be equal to read quorum (#14623)
This commit is contained in:
parent
4d2fc530d0
commit
5cfedcfe33
@ -190,7 +190,7 @@ var (
|
|||||||
globalBucketTargetSys *BucketTargetSys
|
globalBucketTargetSys *BucketTargetSys
|
||||||
// globalAPIConfig controls S3 API requests throttling,
|
// globalAPIConfig controls S3 API requests throttling,
|
||||||
// healthcheck readiness deadlines and cors settings.
|
// healthcheck readiness deadlines and cors settings.
|
||||||
globalAPIConfig = apiConfig{listQuorum: -1}
|
globalAPIConfig = apiConfig{listQuorum: "strict"}
|
||||||
|
|
||||||
globalStorageClass storageclass.Config
|
globalStorageClass storageclass.Config
|
||||||
globalLDAPConfig xldap.Config
|
globalLDAPConfig xldap.Config
|
||||||
|
@ -38,7 +38,7 @@ type apiConfig struct {
|
|||||||
requestsDeadline time.Duration
|
requestsDeadline time.Duration
|
||||||
requestsPool chan struct{}
|
requestsPool chan struct{}
|
||||||
clusterDeadline time.Duration
|
clusterDeadline time.Duration
|
||||||
listQuorum int
|
listQuorum string
|
||||||
corsAllowOrigins []string
|
corsAllowOrigins []string
|
||||||
// total drives per erasure set across pools.
|
// total drives per erasure set across pools.
|
||||||
totalDriveCount int
|
totalDriveCount int
|
||||||
@ -136,7 +136,7 @@ func (t *apiConfig) init(cfg api.Config, setDriveCounts []int) {
|
|||||||
t.requestsPool = make(chan struct{}, apiRequestsMaxPerNode)
|
t.requestsPool = make(chan struct{}, apiRequestsMaxPerNode)
|
||||||
}
|
}
|
||||||
t.requestsDeadline = cfg.RequestsDeadline
|
t.requestsDeadline = cfg.RequestsDeadline
|
||||||
t.listQuorum = cfg.GetListQuorum()
|
t.listQuorum = cfg.ListQuorum
|
||||||
if globalReplicationPool != nil &&
|
if globalReplicationPool != nil &&
|
||||||
cfg.ReplicationWorkers != t.replicationWorkers {
|
cfg.ReplicationWorkers != t.replicationWorkers {
|
||||||
globalReplicationPool.ResizeFailedWorkers(cfg.ReplicationFailedWorkers)
|
globalReplicationPool.ResizeFailedWorkers(cfg.ReplicationFailedWorkers)
|
||||||
@ -170,7 +170,7 @@ func (t *apiConfig) shouldGzipObjects() bool {
|
|||||||
return t.gzipObjects
|
return t.gzipObjects
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *apiConfig) getListQuorum() int {
|
func (t *apiConfig) getListQuorum() string {
|
||||||
t.mu.RLock()
|
t.mu.RLock()
|
||||||
defer t.mu.RUnlock()
|
defer t.mu.RUnlock()
|
||||||
|
|
||||||
|
@ -42,7 +42,7 @@ func Benchmark_bucketMetacache_findCache(b *testing.B) {
|
|||||||
FilterPrefix: "",
|
FilterPrefix: "",
|
||||||
Marker: "",
|
Marker: "",
|
||||||
Limit: 0,
|
Limit: 0,
|
||||||
AskDisks: 0,
|
AskDisks: "strict",
|
||||||
Recursive: false,
|
Recursive: false,
|
||||||
Separator: slashSeparator,
|
Separator: slashSeparator,
|
||||||
Create: true,
|
Create: true,
|
||||||
@ -59,7 +59,7 @@ func Benchmark_bucketMetacache_findCache(b *testing.B) {
|
|||||||
FilterPrefix: "",
|
FilterPrefix: "",
|
||||||
Marker: "",
|
Marker: "",
|
||||||
Limit: 0,
|
Limit: 0,
|
||||||
AskDisks: 0,
|
AskDisks: "strict",
|
||||||
Recursive: false,
|
Recursive: false,
|
||||||
Separator: slashSeparator,
|
Separator: slashSeparator,
|
||||||
Create: true,
|
Create: true,
|
||||||
|
@ -65,10 +65,8 @@ type listPathOptions struct {
|
|||||||
// Limit the number of results.
|
// Limit the number of results.
|
||||||
Limit int
|
Limit int
|
||||||
|
|
||||||
// The number of disks to ask. Special values:
|
// The number of disks to ask.
|
||||||
// 0 uses default number of disks.
|
AskDisks string
|
||||||
// -1 use at least 50% of disks or at least the default number.
|
|
||||||
AskDisks int
|
|
||||||
|
|
||||||
// InclDeleted will keep all entries where latest version is a delete marker.
|
// InclDeleted will keep all entries where latest version is a delete marker.
|
||||||
InclDeleted bool
|
InclDeleted bool
|
||||||
@ -541,19 +539,38 @@ func (er *erasureObjects) streamMetadataParts(ctx context.Context, o listPathOpt
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// getListQuorum interprets list quorum values and returns appropriate
|
||||||
|
// acceptable quorum expected for list operations
|
||||||
|
func getListQuorum(quorum string, driveCount int) int {
|
||||||
|
switch quorum {
|
||||||
|
case "disk":
|
||||||
|
// smallest possible value, generally meant for testing.
|
||||||
|
return 1
|
||||||
|
case "reduced":
|
||||||
|
return 2
|
||||||
|
case "strict":
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
// Defaults to (driveCount+1)/2 drives per set, defaults to "optimal" value
|
||||||
|
if driveCount > 0 {
|
||||||
|
return (driveCount + 1) / 2
|
||||||
|
} // "3" otherwise.
|
||||||
|
return 3
|
||||||
|
}
|
||||||
|
|
||||||
// Will return io.EOF if continuing would not yield more results.
|
// Will return io.EOF if continuing would not yield more results.
|
||||||
func (er *erasureObjects) listPath(ctx context.Context, o listPathOptions, results chan<- metaCacheEntry) (err error) {
|
func (er *erasureObjects) listPath(ctx context.Context, o listPathOptions, results chan<- metaCacheEntry) (err error) {
|
||||||
defer close(results)
|
defer close(results)
|
||||||
o.debugf(color.Green("listPath:")+" with options: %#v", o)
|
o.debugf(color.Green("listPath:")+" with options: %#v", o)
|
||||||
|
|
||||||
askDisks := o.AskDisks
|
askDisks := getListQuorum(o.AskDisks, er.setDriveCount)
|
||||||
listingQuorum := o.AskDisks - 1
|
listingQuorum := askDisks - 1
|
||||||
disks := er.getDisks()
|
disks := er.getDisks()
|
||||||
var fallbackDisks []StorageAPI
|
var fallbackDisks []StorageAPI
|
||||||
|
|
||||||
// Special case: ask all disks if the drive count is 4
|
// Special case: ask all disks if the drive count is 4
|
||||||
if askDisks <= 0 || er.setDriveCount == 4 {
|
if askDisks <= 0 || er.setDriveCount == 4 {
|
||||||
askDisks = len(disks) // with 'strict' quorum list on all online disks.
|
askDisks = len(disks) // with 'strict' quorum list on all drives.
|
||||||
listingQuorum = (len(disks) + 1) / 2 // keep this such that we can list all objects with different quorum ratio.
|
listingQuorum = (len(disks) + 1) / 2 // keep this such that we can list all objects with different quorum ratio.
|
||||||
}
|
}
|
||||||
if askDisks > 0 && len(disks) > askDisks {
|
if askDisks > 0 && len(disks) > askDisks {
|
||||||
@ -822,7 +839,7 @@ func listPathRaw(ctx context.Context, opts listPathRawOptions) (err error) {
|
|||||||
for _, fd := range fdsCopy {
|
for _, fd := range fdsCopy {
|
||||||
// Grab a fallback disk
|
// Grab a fallback disk
|
||||||
fds = fds[1:]
|
fds = fds[1:]
|
||||||
if fd != nil {
|
if fd != nil && fd.IsOnline() {
|
||||||
return fd
|
return fd
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -300,6 +300,8 @@ func configRetriableErrors(err error) bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func initServer(ctx context.Context, newObject ObjectLayer) error {
|
func initServer(ctx context.Context, newObject ObjectLayer) error {
|
||||||
|
t1 := time.Now()
|
||||||
|
|
||||||
// Once the config is fully loaded, initialize the new object layer.
|
// Once the config is fully loaded, initialize the new object layer.
|
||||||
setObjectLayer(newObject)
|
setObjectLayer(newObject)
|
||||||
|
|
||||||
@ -352,7 +354,7 @@ func initServer(ctx context.Context, newObject ObjectLayer) error {
|
|||||||
// All successful return.
|
// All successful return.
|
||||||
if globalIsDistErasure {
|
if globalIsDistErasure {
|
||||||
// These messages only meant primarily for distributed setup, so only log during distributed setup.
|
// These messages only meant primarily for distributed setup, so only log during distributed setup.
|
||||||
logger.Info("All MinIO sub-systems initialized successfully")
|
logger.Info("All MinIO sub-systems initialized successfully in %s", time.Since(t1))
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -162,22 +162,6 @@ func (sCfg *Config) UnmarshalJSON(data []byte) error {
|
|||||||
return json.Unmarshal(data, &aux)
|
return json.Unmarshal(data, &aux)
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetListQuorum interprets list quorum values and returns appropriate
|
|
||||||
// acceptable quorum expected for list operations
|
|
||||||
func (sCfg Config) GetListQuorum() int {
|
|
||||||
switch sCfg.ListQuorum {
|
|
||||||
case "reduced":
|
|
||||||
return 2
|
|
||||||
case "disk":
|
|
||||||
// smallest possible value, generally meant for testing.
|
|
||||||
return 1
|
|
||||||
case "strict":
|
|
||||||
return -1
|
|
||||||
}
|
|
||||||
// Defaults to 3 drives per set, defaults to "optimal" value
|
|
||||||
return 3
|
|
||||||
}
|
|
||||||
|
|
||||||
// LookupConfig - lookup api config and override with valid environment settings if any.
|
// LookupConfig - lookup api config and override with valid environment settings if any.
|
||||||
func LookupConfig(kvs config.KVS) (cfg Config, err error) {
|
func LookupConfig(kvs config.KVS) (cfg Config, err error) {
|
||||||
// remove this since we have removed this already.
|
// remove this since we have removed this already.
|
||||||
|
Loading…
Reference in New Issue
Block a user