mirror of
https://github.com/minio/minio.git
synced 2025-11-09 13:39:46 -05:00
This change provides new implementations of the XL backend operations: - create file - read file - heal file Further this change adds table based tests for all three operations. This affects also the bitrot algorithm integration. Algorithms are now integrated in an idiomatic way (like crypto.Hash). Fixes #4696 Fixes #4649 Fixes #4359
This commit is contained in:
committed by
Dee Koder
parent
617f2394fb
commit
85fcee1919
@@ -17,11 +17,23 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"crypto/subtle"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"io"
|
||||
)
|
||||
|
||||
// healBufferPool is a pool of reusable buffers used to verify a stream
|
||||
// while healing.
|
||||
var healBufferPool = sync.Pool{
|
||||
New: func() interface{} {
|
||||
b := make([]byte, readSizeV1)
|
||||
return &b
|
||||
},
|
||||
}
|
||||
|
||||
// commonTime returns a maximally occurring time from a list of time.
|
||||
func commonTime(modTimes []time.Time) (modTime time.Time, count int) {
|
||||
var maxima int // Counter for remembering max occurrence of elements.
|
||||
@@ -252,36 +264,32 @@ func xlHealStat(xl xlObjects, partsMetadata []xlMetaV1, errs []error) HealObject
|
||||
// calculating blake2b checksum.
|
||||
func disksWithAllParts(onlineDisks []StorageAPI, partsMetadata []xlMetaV1, errs []error, bucket, object string) ([]StorageAPI, []error, error) {
|
||||
availableDisks := make([]StorageAPI, len(onlineDisks))
|
||||
buffer := healBufferPool.Get().(*[]byte)
|
||||
defer healBufferPool.Put(buffer)
|
||||
|
||||
for diskIndex, onlineDisk := range onlineDisks {
|
||||
if onlineDisk == nil {
|
||||
if onlineDisk == OfflineDisk {
|
||||
continue
|
||||
}
|
||||
// disk has a valid xl.json but may not have all the
|
||||
// parts. This is considered an outdated disk, since
|
||||
// it needs healing too.
|
||||
for _, part := range partsMetadata[diskIndex].Parts {
|
||||
// compute blake2b sum of part.
|
||||
partPath := filepath.Join(object, part.Name)
|
||||
checkSumInfo := partsMetadata[diskIndex].Erasure.GetCheckSumInfo(part.Name)
|
||||
hash := newHash(checkSumInfo.Algorithm)
|
||||
blakeBytes, hErr := hashSum(onlineDisk, bucket, partPath, hash)
|
||||
checkSumInfo := partsMetadata[diskIndex].Erasure.GetChecksumInfo(part.Name)
|
||||
hash := checkSumInfo.Algorithm.New()
|
||||
_, hErr := io.CopyBuffer(hash, StorageReader(onlineDisk, bucket, partPath, 0), *buffer)
|
||||
if hErr == errFileNotFound {
|
||||
errs[diskIndex] = errFileNotFound
|
||||
availableDisks[diskIndex] = nil
|
||||
availableDisks[diskIndex] = OfflineDisk
|
||||
break
|
||||
}
|
||||
|
||||
if hErr != nil && hErr != errFileNotFound {
|
||||
return nil, nil, traceError(hErr)
|
||||
}
|
||||
|
||||
blakeSum := hex.EncodeToString(blakeBytes)
|
||||
// if blake2b sum doesn't match for a part
|
||||
// then this disk is outdated and needs
|
||||
// healing.
|
||||
if blakeSum != checkSumInfo.Hash {
|
||||
if subtle.ConstantTimeCompare(hash.Sum(nil), checkSumInfo.Hash) != 1 {
|
||||
errs[diskIndex] = errFileNotFound
|
||||
availableDisks[diskIndex] = nil
|
||||
availableDisks[diskIndex] = OfflineDisk
|
||||
break
|
||||
}
|
||||
availableDisks[diskIndex] = onlineDisk
|
||||
|
||||
Reference in New Issue
Block a user