mirror of
https://github.com/minio/minio.git
synced 2024-12-24 06:05:55 -05:00
HealObject should succeed when only N/2 disks have data (#3952)
This commit is contained in:
parent
fbfb4fc5a0
commit
417ec0df56
@ -225,10 +225,10 @@ func disksWithAllParts(onlineDisks []StorageAPI, partsMetadata []xlMetaV1, errs
|
||||
// disk has a valid xl.json but may not have all the
|
||||
// parts. This is considered an outdated disk, since
|
||||
// it needs healing too.
|
||||
for pIndex, part := range partsMetadata[index].Parts {
|
||||
for _, part := range partsMetadata[index].Parts {
|
||||
// compute blake2b sum of part.
|
||||
partPath := filepath.Join(object, part.Name)
|
||||
hash := newHash(blake2bAlgo)
|
||||
hash := newHash(partsMetadata[index].Erasure.Algorithm)
|
||||
blakeBytes, hErr := hashSum(onlineDisk, bucket, partPath, hash)
|
||||
if hErr == errFileNotFound {
|
||||
errs[index] = errFileNotFound
|
||||
@ -239,7 +239,7 @@ func disksWithAllParts(onlineDisks []StorageAPI, partsMetadata []xlMetaV1, errs
|
||||
return nil, nil, traceError(hErr)
|
||||
}
|
||||
|
||||
partChecksum := partsMetadata[index].Erasure.Checksum[pIndex].Hash
|
||||
partChecksum := partsMetadata[index].Erasure.GetCheckSumInfo(part.Name).Hash
|
||||
blakeSum := hex.EncodeToString(blakeBytes)
|
||||
// if blake2b sum doesn't match for a part
|
||||
// then this disk is outdated and needs
|
||||
|
@ -518,15 +518,36 @@ func TestHealObjectXL(t *testing.T) {
|
||||
|
||||
bucket := "bucket"
|
||||
object := "object"
|
||||
data := []byte("hello")
|
||||
data := bytes.Repeat([]byte("a"), 5*1024*1024)
|
||||
|
||||
err = obj.MakeBucket(bucket)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to make a bucket - %v", err)
|
||||
}
|
||||
|
||||
_, err = obj.PutObject(bucket, object, int64(len(data)), bytes.NewReader(data), nil, "")
|
||||
// Create an object with multiple parts uploaded in decreasing
|
||||
// part number.
|
||||
uploadID, err := obj.NewMultipartUpload(bucket, object, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to put an object - %v", err)
|
||||
t.Fatalf("Failed to create a multipart upload - %v", err)
|
||||
}
|
||||
|
||||
var uploadedParts []completePart
|
||||
for _, partID := range []int{2, 1} {
|
||||
pInfo, err := obj.PutObjectPart(bucket, object, uploadID, partID,
|
||||
int64(len(data)), bytes.NewReader(data), "", "")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to upload a part - %v", err)
|
||||
}
|
||||
uploadedParts = append(uploadedParts, completePart{
|
||||
PartNumber: pInfo.PartNumber,
|
||||
ETag: pInfo.ETag,
|
||||
})
|
||||
}
|
||||
|
||||
_, err = obj.CompleteMultipartUpload(bucket, object, uploadID, uploadedParts)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to complete multipart upload - %v", err)
|
||||
}
|
||||
|
||||
// Remove the object backend files from the first disk.
|
||||
|
@ -41,11 +41,18 @@ func reduceErrs(errs []error, ignoredErrs []error) (maxCount int, maxErr error)
|
||||
}
|
||||
errorCounts[err]++
|
||||
}
|
||||
|
||||
max := 0
|
||||
for err, count := range errorCounts {
|
||||
if max < count {
|
||||
switch {
|
||||
case max < count:
|
||||
max = count
|
||||
maxErr = err
|
||||
|
||||
// Prefer `nil` over other error values with the same
|
||||
// number of occurrences.
|
||||
case max == count && err == nil:
|
||||
maxErr = err
|
||||
}
|
||||
}
|
||||
return max, maxErr
|
||||
|
@ -82,6 +82,9 @@ func TestReduceErrs(t *testing.T) {
|
||||
errDiskNotFound,
|
||||
}, []error{errDiskNotFound}, errVolumeNotFound},
|
||||
{[]error{}, []error{}, errXLReadQuorum},
|
||||
{[]error{errFileNotFound, errFileNotFound, errFileNotFound,
|
||||
errFileNotFound, errFileNotFound, nil, nil, nil, nil, nil},
|
||||
nil, nil},
|
||||
}
|
||||
// Validates list of all the testcases for returning valid errors.
|
||||
for i, testCase := range testCases {
|
||||
|
Loading…
Reference in New Issue
Block a user