mirror of
https://github.com/minio/minio.git
synced 2025-11-07 12:52:58 -05:00
properly reload a fresh drive when found in a failed state during startup (#20145)
When a drive is in a failed state when a single node multiple drives deployment is started, a replacement of a fresh disk will not be properly healed unless the user restarts the node. Fix this by always adding the new fresh disk to globalLocalDrivesMap. Also remove globalLocalDrives for simplification, a map to store local node drives can still be used since the order of local drives of a node is not defined.
This commit is contained in:
@@ -3553,7 +3553,7 @@ func (p *ReplicationPool) persistToDrive(ctx context.Context, v MRFReplicateEntr
|
||||
}
|
||||
|
||||
globalLocalDrivesMu.RLock()
|
||||
localDrives := cloneDrives(globalLocalDrives)
|
||||
localDrives := cloneDrives(globalLocalDrivesMap)
|
||||
globalLocalDrivesMu.RUnlock()
|
||||
|
||||
for _, localDrive := range localDrives {
|
||||
@@ -3620,7 +3620,7 @@ func (p *ReplicationPool) loadMRF() (mrfRec MRFReplicateEntries, err error) {
|
||||
}
|
||||
|
||||
globalLocalDrivesMu.RLock()
|
||||
localDrives := cloneDrives(globalLocalDrives)
|
||||
localDrives := cloneDrives(globalLocalDrivesMap)
|
||||
globalLocalDrivesMu.RUnlock()
|
||||
|
||||
for _, localDrive := range localDrives {
|
||||
|
||||
Reference in New Issue
Block a user