mirror of
https://github.com/minio/minio.git
synced 2024-12-24 22:25:54 -05:00
fix: handle a crash when AskDisks is set to -1 (#10777)
This commit is contained in:
parent
e0655e24f2
commit
4bf90ca67f
@ -403,7 +403,7 @@ func (d *dataUpdateTracker) deserialize(src io.Reader, newerThan time.Time) erro
|
||||
return err
|
||||
}
|
||||
switch tmp[0] {
|
||||
case 1, 2:
|
||||
case 1, 2, 3:
|
||||
logger.Info(color.Green("dataUpdateTracker: ") + "deprecated data version, updating.")
|
||||
return nil
|
||||
case dataUpdateTrackerVersion:
|
||||
|
@ -124,6 +124,9 @@ func (z *erasureServerSets) listPath(ctx context.Context, o listPathOptions) (en
|
||||
case "disk":
|
||||
// Ask single disk.
|
||||
o.AskDisks = 1
|
||||
default:
|
||||
// By default asks at max 3 disks.
|
||||
o.AskDisks = 3
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -542,17 +542,9 @@ func (er *erasureObjects) listPath(ctx context.Context, o listPathOptions) (entr
|
||||
// We need to ask disks.
|
||||
disks := er.getOnlineDisks()
|
||||
|
||||
var askDisks = o.AskDisks
|
||||
switch askDisks {
|
||||
// 50% or at least 3.
|
||||
case -1:
|
||||
o.AskDisks = getReadQuorum(len(er.getDisks()))
|
||||
if o.AskDisks < 3 {
|
||||
o.AskDisks = 3
|
||||
}
|
||||
// Default is 3 disks.
|
||||
case 0:
|
||||
askDisks = 3
|
||||
askDisks := o.AskDisks
|
||||
if askDisks == -1 {
|
||||
askDisks = getReadQuorum(er.SetDriveCount())
|
||||
}
|
||||
|
||||
if len(disks) < askDisks {
|
||||
@ -565,10 +557,11 @@ func (er *erasureObjects) listPath(ctx context.Context, o listPathOptions) (entr
|
||||
return
|
||||
}
|
||||
|
||||
// Select askDisks random disks, 3 is ok.
|
||||
// Select askDisks random disks.
|
||||
if len(disks) > askDisks {
|
||||
disks = disks[:askDisks]
|
||||
}
|
||||
|
||||
var readers = make([]*metacacheReader, askDisks)
|
||||
for i := range disks {
|
||||
r, w := io.Pipe()
|
||||
|
@ -852,6 +852,10 @@ func (client *peerRESTClient) ConsoleLog(logCh chan interface{}, doneCh <-chan s
|
||||
// but 'all' will contain nil entry for local client.
|
||||
// The 'all' slice will be in the same order across the cluster.
|
||||
func newPeerRestClients(endpoints EndpointServerSets) (remote, all []*peerRESTClient) {
|
||||
if !globalIsDistErasure {
|
||||
// Only useful in distributed setups
|
||||
return nil, nil
|
||||
}
|
||||
hosts := endpoints.hostsSorted()
|
||||
remote = make([]*peerRESTClient, 0, len(hosts))
|
||||
all = make([]*peerRESTClient, len(hosts))
|
||||
|
Loading…
Reference in New Issue
Block a user