properly reload a fresh drive when found in a failed state during startup (#20145)

When a drive is in a failed state when a single node multiple drives
deployment is started, a replacement of a fresh disk will not be
properly healed unless the user restarts the node.

Fix this by always adding the new fresh disk to globalLocalDrivesMap. Also
remove globalLocalDrives for simplification, a map to store local node
drives can still be used since the order of local drives of a node is
not defined.
This commit is contained in:
Anis Eleuch
2024-07-25 00:30:33 +01:00
committed by GitHub
parent 33c101544d
commit b7f319b62a
9 changed files with 23 additions and 34 deletions

View File

@@ -262,13 +262,7 @@ func (s *erasureSets) connectDisks(log bool) {
if globalIsDistErasure {
globalLocalSetDrives[s.poolIndex][setIndex][diskIndex] = disk
}
for i, ldisk := range globalLocalDrives {
_, k, l := ldisk.GetDiskLoc()
if k == setIndex && l == diskIndex {
globalLocalDrives[i] = disk
break
}
}
globalLocalDrivesMap[disk.Endpoint().String()] = disk
globalLocalDrivesMu.Unlock()
}
s.erasureDisksMu.Unlock()
@@ -1135,13 +1129,7 @@ func (s *erasureSets) HealFormat(ctx context.Context, dryRun bool) (res madmin.H
if globalIsDistErasure {
globalLocalSetDrives[s.poolIndex][m][n] = disk
}
for i, ldisk := range globalLocalDrives {
_, k, l := ldisk.GetDiskLoc()
if k == m && l == n {
globalLocalDrives[i] = disk
break
}
}
globalLocalDrivesMap[disk.Endpoint().String()] = disk
globalLocalDrivesMu.Unlock()
}
}