remove overzealous check during HEAD() (#19940)

due to a historic bug in CopyObject() where
an inlined object loses its metadata, the
check causes an incorrect fallback verifying
data-dir.

CopyObject() bug was fixed in ffa91f9794 however
the occurrence of this problem is historic, so
the aforementioned check is stretching too much.

Bonus: simplify fileInfoRaw() to read xl.json as well,
also recreate buckets properly.
This commit is contained in:
Harshavardhana
2024-06-17 07:29:18 -07:00
committed by GitHub
parent c91d1ec2e3
commit 7bd1d899bc
11 changed files with 99 additions and 95 deletions

View File

@@ -163,7 +163,10 @@ func (er *erasureObjects) healErasureSet(ctx context.Context, buckets []string,
}
for _, bucket := range healBuckets {
_, err := objAPI.HealBucket(ctx, bucket, madmin.HealOpts{ScanMode: scanMode})
_, err := objAPI.HealBucket(ctx, bucket, madmin.HealOpts{
Recreate: true,
ScanMode: scanMode,
})
if err != nil {
// Log bucket healing error if any, we shall retry again.
healingLogIf(ctx, err)
@@ -262,6 +265,7 @@ func (er *erasureObjects) healErasureSet(ctx context.Context, buckets []string,
// Heal current bucket again in case if it is failed
// in the beginning of erasure set healing
if _, err := objAPI.HealBucket(ctx, bucket, madmin.HealOpts{
Recreate: true,
ScanMode: scanMode,
}); err != nil {
// Set this such that when we return this function
@@ -513,7 +517,7 @@ func (er *erasureObjects) healErasureSet(ctx context.Context, buckets []string,
// we let the caller retry this disk again for the
// buckets it failed to list.
retErr = err
healingLogIf(ctx, err)
healingLogIf(ctx, fmt.Errorf("listing failed with: %v on bucket: %v", err, bucket))
continue
}