mirror of
https://github.com/minio/minio.git
synced 2025-11-07 12:52:58 -05:00
properly reload a fresh drive when found in a failed state during startup (#20145)
When a drive is in a failed state when a single node multiple drives deployment is started, a replacement of a fresh disk will not be properly healed unless the user restarts the node. Fix this by always adding the new fresh disk to globalLocalDrivesMap. Also remove globalLocalDrives for simplification, a map to store local node drives can still be used since the order of local drives of a node is not defined.
This commit is contained in:
@@ -34,7 +34,7 @@ const (
|
||||
|
||||
func healBucketLocal(ctx context.Context, bucket string, opts madmin.HealOpts) (res madmin.HealResultItem, err error) {
|
||||
globalLocalDrivesMu.RLock()
|
||||
localDrives := cloneDrives(globalLocalDrives)
|
||||
localDrives := cloneDrives(globalLocalDrivesMap)
|
||||
globalLocalDrivesMu.RUnlock()
|
||||
|
||||
// Initialize sync waitgroup.
|
||||
@@ -158,7 +158,7 @@ func healBucketLocal(ctx context.Context, bucket string, opts madmin.HealOpts) (
|
||||
|
||||
func listBucketsLocal(ctx context.Context, opts BucketOptions) (buckets []BucketInfo, err error) {
|
||||
globalLocalDrivesMu.RLock()
|
||||
localDrives := cloneDrives(globalLocalDrives)
|
||||
localDrives := cloneDrives(globalLocalDrivesMap)
|
||||
globalLocalDrivesMu.RUnlock()
|
||||
|
||||
quorum := (len(localDrives) / 2)
|
||||
@@ -204,15 +204,17 @@ func listBucketsLocal(ctx context.Context, opts BucketOptions) (buckets []Bucket
|
||||
return buckets, nil
|
||||
}
|
||||
|
||||
func cloneDrives(drives []StorageAPI) []StorageAPI {
|
||||
newDrives := make([]StorageAPI, len(drives))
|
||||
copy(newDrives, drives)
|
||||
return newDrives
|
||||
func cloneDrives(drives map[string]StorageAPI) []StorageAPI {
|
||||
copyDrives := make([]StorageAPI, 0, len(drives))
|
||||
for _, drive := range drives {
|
||||
copyDrives = append(copyDrives, drive)
|
||||
}
|
||||
return copyDrives
|
||||
}
|
||||
|
||||
func getBucketInfoLocal(ctx context.Context, bucket string, opts BucketOptions) (BucketInfo, error) {
|
||||
globalLocalDrivesMu.RLock()
|
||||
localDrives := cloneDrives(globalLocalDrives)
|
||||
localDrives := cloneDrives(globalLocalDrivesMap)
|
||||
globalLocalDrivesMu.RUnlock()
|
||||
|
||||
g := errgroup.WithNErrs(len(localDrives)).WithConcurrency(32)
|
||||
@@ -261,7 +263,7 @@ func getBucketInfoLocal(ctx context.Context, bucket string, opts BucketOptions)
|
||||
|
||||
func deleteBucketLocal(ctx context.Context, bucket string, opts DeleteBucketOptions) error {
|
||||
globalLocalDrivesMu.RLock()
|
||||
localDrives := cloneDrives(globalLocalDrives)
|
||||
localDrives := cloneDrives(globalLocalDrivesMap)
|
||||
globalLocalDrivesMu.RUnlock()
|
||||
|
||||
g := errgroup.WithNErrs(len(localDrives)).WithConcurrency(32)
|
||||
@@ -299,7 +301,7 @@ func deleteBucketLocal(ctx context.Context, bucket string, opts DeleteBucketOpti
|
||||
|
||||
func makeBucketLocal(ctx context.Context, bucket string, opts MakeBucketOptions) error {
|
||||
globalLocalDrivesMu.RLock()
|
||||
localDrives := cloneDrives(globalLocalDrives)
|
||||
localDrives := cloneDrives(globalLocalDrivesMap)
|
||||
globalLocalDrivesMu.RUnlock()
|
||||
|
||||
g := errgroup.WithNErrs(len(localDrives)).WithConcurrency(32)
|
||||
|
||||
Reference in New Issue
Block a user