mirror of
https://github.com/minio/minio.git
synced 2024-12-24 22:25:54 -05:00
fix: handle a crash when AskDisks is set to -1 (#10777)
This commit is contained in:
parent
e0655e24f2
commit
4bf90ca67f
@ -403,7 +403,7 @@ func (d *dataUpdateTracker) deserialize(src io.Reader, newerThan time.Time) erro
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
switch tmp[0] {
|
switch tmp[0] {
|
||||||
case 1, 2:
|
case 1, 2, 3:
|
||||||
logger.Info(color.Green("dataUpdateTracker: ") + "deprecated data version, updating.")
|
logger.Info(color.Green("dataUpdateTracker: ") + "deprecated data version, updating.")
|
||||||
return nil
|
return nil
|
||||||
case dataUpdateTrackerVersion:
|
case dataUpdateTrackerVersion:
|
||||||
|
@ -124,6 +124,9 @@ func (z *erasureServerSets) listPath(ctx context.Context, o listPathOptions) (en
|
|||||||
case "disk":
|
case "disk":
|
||||||
// Ask single disk.
|
// Ask single disk.
|
||||||
o.AskDisks = 1
|
o.AskDisks = 1
|
||||||
|
default:
|
||||||
|
// By default asks at max 3 disks.
|
||||||
|
o.AskDisks = 3
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -542,17 +542,9 @@ func (er *erasureObjects) listPath(ctx context.Context, o listPathOptions) (entr
|
|||||||
// We need to ask disks.
|
// We need to ask disks.
|
||||||
disks := er.getOnlineDisks()
|
disks := er.getOnlineDisks()
|
||||||
|
|
||||||
var askDisks = o.AskDisks
|
askDisks := o.AskDisks
|
||||||
switch askDisks {
|
if askDisks == -1 {
|
||||||
// 50% or at least 3.
|
askDisks = getReadQuorum(er.SetDriveCount())
|
||||||
case -1:
|
|
||||||
o.AskDisks = getReadQuorum(len(er.getDisks()))
|
|
||||||
if o.AskDisks < 3 {
|
|
||||||
o.AskDisks = 3
|
|
||||||
}
|
|
||||||
// Default is 3 disks.
|
|
||||||
case 0:
|
|
||||||
askDisks = 3
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(disks) < askDisks {
|
if len(disks) < askDisks {
|
||||||
@ -565,10 +557,11 @@ func (er *erasureObjects) listPath(ctx context.Context, o listPathOptions) (entr
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// Select askDisks random disks, 3 is ok.
|
// Select askDisks random disks.
|
||||||
if len(disks) > askDisks {
|
if len(disks) > askDisks {
|
||||||
disks = disks[:askDisks]
|
disks = disks[:askDisks]
|
||||||
}
|
}
|
||||||
|
|
||||||
var readers = make([]*metacacheReader, askDisks)
|
var readers = make([]*metacacheReader, askDisks)
|
||||||
for i := range disks {
|
for i := range disks {
|
||||||
r, w := io.Pipe()
|
r, w := io.Pipe()
|
||||||
|
@ -852,6 +852,10 @@ func (client *peerRESTClient) ConsoleLog(logCh chan interface{}, doneCh <-chan s
|
|||||||
// but 'all' will contain nil entry for local client.
|
// but 'all' will contain nil entry for local client.
|
||||||
// The 'all' slice will be in the same order across the cluster.
|
// The 'all' slice will be in the same order across the cluster.
|
||||||
func newPeerRestClients(endpoints EndpointServerSets) (remote, all []*peerRESTClient) {
|
func newPeerRestClients(endpoints EndpointServerSets) (remote, all []*peerRESTClient) {
|
||||||
|
if !globalIsDistErasure {
|
||||||
|
// Only useful in distributed setups
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
hosts := endpoints.hostsSorted()
|
hosts := endpoints.hostsSorted()
|
||||||
remote = make([]*peerRESTClient, 0, len(hosts))
|
remote = make([]*peerRESTClient, 0, len(hosts))
|
||||||
all = make([]*peerRESTClient, len(hosts))
|
all = make([]*peerRESTClient, len(hosts))
|
||||||
|
Loading…
Reference in New Issue
Block a user