mirror of
https://github.com/minio/minio.git
synced 2025-04-19 18:17:30 -04:00
heal: Hold lock when reading xl.meta from disks (#12362)
Lock is hold in healObject() after reading xl.meta from disks the first time. This commit will held the lock since the beginning of HealObject() Co-authored-by: Anis Elleuch <anis@min.io>
This commit is contained in:
parent
2baabd455b
commit
abd32065aa
@ -254,17 +254,7 @@ func (er erasureObjects) healObject(ctx context.Context, bucket string, object s
|
|||||||
DataBlocks: len(storageDisks) - er.defaultParityCount,
|
DataBlocks: len(storageDisks) - er.defaultParityCount,
|
||||||
}
|
}
|
||||||
|
|
||||||
if !opts.NoLock {
|
// Re-read with data enabled
|
||||||
lk := er.NewNSLock(bucket, object)
|
|
||||||
lkctx, err := lk.GetLock(ctx, globalOperationTimeout)
|
|
||||||
if err != nil {
|
|
||||||
return result, err
|
|
||||||
}
|
|
||||||
ctx = lkctx.Context()
|
|
||||||
defer lk.Unlock(lkctx.Cancel)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Re-read when we have lock...
|
|
||||||
partsMetadata, errs := readAllFileInfo(ctx, storageDisks, bucket, object, versionID, true)
|
partsMetadata, errs := readAllFileInfo(ctx, storageDisks, bucket, object, versionID, true)
|
||||||
|
|
||||||
// List of disks having latest version of the object er.meta
|
// List of disks having latest version of the object er.meta
|
||||||
@ -644,6 +634,17 @@ func defaultHealResult(lfi FileInfo, storageDisks []StorageAPI, storageEndpoints
|
|||||||
}
|
}
|
||||||
if lfi.IsValid() {
|
if lfi.IsValid() {
|
||||||
result.ObjectSize = lfi.Size
|
result.ObjectSize = lfi.Size
|
||||||
|
result.ParityBlocks = lfi.Erasure.ParityBlocks
|
||||||
|
result.DataBlocks = lfi.Erasure.DataBlocks
|
||||||
|
} else {
|
||||||
|
// Default to most common configuration for erasure blocks.
|
||||||
|
result.ParityBlocks = defaultParityCount
|
||||||
|
result.DataBlocks = len(storageDisks) - defaultParityCount
|
||||||
|
}
|
||||||
|
|
||||||
|
if errs == nil {
|
||||||
|
// No disks related errors are provided, quit
|
||||||
|
return result
|
||||||
}
|
}
|
||||||
|
|
||||||
for index, disk := range storageDisks {
|
for index, disk := range storageDisks {
|
||||||
@ -677,15 +678,6 @@ func defaultHealResult(lfi FileInfo, storageDisks []StorageAPI, storageEndpoints
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
if !lfi.IsValid() {
|
|
||||||
// Default to most common configuration for erasure blocks.
|
|
||||||
result.ParityBlocks = defaultParityCount
|
|
||||||
result.DataBlocks = len(storageDisks) - defaultParityCount
|
|
||||||
} else {
|
|
||||||
result.ParityBlocks = lfi.Erasure.ParityBlocks
|
|
||||||
result.DataBlocks = lfi.Erasure.DataBlocks
|
|
||||||
}
|
|
||||||
|
|
||||||
return result
|
return result
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -869,23 +861,34 @@ func (er erasureObjects) HealObject(ctx context.Context, bucket, object, version
|
|||||||
} else {
|
} else {
|
||||||
newReqInfo = logger.NewReqInfo("", "", globalDeploymentID, "", "Heal", bucket, object)
|
newReqInfo = logger.NewReqInfo("", "", globalDeploymentID, "", "Heal", bucket, object)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Inherit context from the global context to avoid cancellation
|
||||||
healCtx := logger.SetReqInfo(GlobalContext, newReqInfo)
|
healCtx := logger.SetReqInfo(GlobalContext, newReqInfo)
|
||||||
|
|
||||||
|
storageDisks := er.getDisks()
|
||||||
|
storageEndpoints := er.getEndpoints()
|
||||||
|
|
||||||
|
if !opts.NoLock {
|
||||||
|
lk := er.NewNSLock(bucket, object)
|
||||||
|
lkctx, err := lk.GetLock(healCtx, globalOperationTimeout)
|
||||||
|
if err != nil {
|
||||||
|
return defaultHealResult(FileInfo{}, storageDisks, storageEndpoints, nil, bucket, object, versionID, er.defaultParityCount), err
|
||||||
|
}
|
||||||
|
healCtx = lkctx.Context()
|
||||||
|
defer lk.Unlock(lkctx.Cancel)
|
||||||
|
}
|
||||||
|
|
||||||
// Healing directories handle it separately.
|
// Healing directories handle it separately.
|
||||||
if HasSuffix(object, SlashSeparator) {
|
if HasSuffix(object, SlashSeparator) {
|
||||||
return er.healObjectDir(healCtx, bucket, object, opts.DryRun, opts.Remove)
|
return er.healObjectDir(healCtx, bucket, object, opts.DryRun, opts.Remove)
|
||||||
}
|
}
|
||||||
|
|
||||||
storageDisks := er.getDisks()
|
|
||||||
storageEndpoints := er.getEndpoints()
|
|
||||||
|
|
||||||
// Read metadata files from all the disks
|
|
||||||
|
|
||||||
// When versionID is empty, we read directly from the `null` versionID for healing.
|
// When versionID is empty, we read directly from the `null` versionID for healing.
|
||||||
if versionID == "" {
|
if versionID == "" {
|
||||||
versionID = nullVersionID
|
versionID = nullVersionID
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Read metadata files from all the disks
|
||||||
partsMetadata, errs := readAllFileInfo(healCtx, storageDisks, bucket, object, versionID, false)
|
partsMetadata, errs := readAllFileInfo(healCtx, storageDisks, bucket, object, versionID, false)
|
||||||
|
|
||||||
if isAllNotFound(errs) {
|
if isAllNotFound(errs) {
|
||||||
|
Loading…
x
Reference in New Issue
Block a user