mirror of
https://github.com/minio/minio.git
synced 2025-11-07 12:52:58 -05:00
properly reload a fresh drive when found in a failed state during startup (#20145)
When a drive is in a failed state when a single node multiple drives deployment is started, a replacement of a fresh disk will not be properly healed unless the user restarts the node. Fix this by always adding the new fresh disk to globalLocalDrivesMap. Also remove globalLocalDrives for simplification, a map to store local node drives can still be used since the order of local drives of a node is not defined.
This commit is contained in:
@@ -262,13 +262,7 @@ func (s *erasureSets) connectDisks(log bool) {
|
||||
if globalIsDistErasure {
|
||||
globalLocalSetDrives[s.poolIndex][setIndex][diskIndex] = disk
|
||||
}
|
||||
for i, ldisk := range globalLocalDrives {
|
||||
_, k, l := ldisk.GetDiskLoc()
|
||||
if k == setIndex && l == diskIndex {
|
||||
globalLocalDrives[i] = disk
|
||||
break
|
||||
}
|
||||
}
|
||||
globalLocalDrivesMap[disk.Endpoint().String()] = disk
|
||||
globalLocalDrivesMu.Unlock()
|
||||
}
|
||||
s.erasureDisksMu.Unlock()
|
||||
@@ -1135,13 +1129,7 @@ func (s *erasureSets) HealFormat(ctx context.Context, dryRun bool) (res madmin.H
|
||||
if globalIsDistErasure {
|
||||
globalLocalSetDrives[s.poolIndex][m][n] = disk
|
||||
}
|
||||
for i, ldisk := range globalLocalDrives {
|
||||
_, k, l := ldisk.GetDiskLoc()
|
||||
if k == m && l == n {
|
||||
globalLocalDrives[i] = disk
|
||||
break
|
||||
}
|
||||
}
|
||||
globalLocalDrivesMap[disk.Endpoint().String()] = disk
|
||||
globalLocalDrivesMu.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user