erasure: simplify XL backend operations (#4649) (#4758)

This change provides new implementations of the XL backend operations:
 - create file
 - read   file
 - heal   file
Further this change adds table based tests for all three operations.

This affects also the bitrot algorithm integration. Algorithms are now
integrated in an idiomatic way (like crypto.Hash).
Fixes #4696
Fixes #4649
Fixes #4359
This commit is contained in:
Andreas Auernhammer
2017-08-14 18:08:42 -07:00
committed by Dee Koder
parent 617f2394fb
commit 85fcee1919
33 changed files with 1238 additions and 1863 deletions

View File

@@ -87,12 +87,12 @@ func TestCommonTime(t *testing.T) {
// partsMetaFromModTimes - returns slice of modTimes given metadata of
// an object part.
func partsMetaFromModTimes(modTimes []time.Time, checksums []checkSumInfo) []xlMetaV1 {
func partsMetaFromModTimes(modTimes []time.Time, algorithm BitrotAlgorithm, checksums []ChecksumInfo) []xlMetaV1 {
var partsMetadata []xlMetaV1
for _, modTime := range modTimes {
partsMetadata = append(partsMetadata, xlMetaV1{
Erasure: erasureInfo{
Checksum: checksums,
Erasure: ErasureInfo{
Checksums: checksums,
},
Stat: statInfo{
ModTime: modTime,
@@ -270,7 +270,7 @@ func TestListOnlineDisks(t *testing.T) {
}
partsMetadata := partsMetaFromModTimes(test.modTimes, xlMeta.Erasure.Checksum)
partsMetadata := partsMetaFromModTimes(test.modTimes, DefaultBitrotAlgorithm, xlMeta.Erasure.Checksums)
onlineDisks, modTime := listOnlineDisks(xlDisks, partsMetadata, test.errs)
availableDisks, newErrs, _ := disksWithAllParts(onlineDisks, partsMetadata, test.errs, bucket, object)
@@ -357,8 +357,7 @@ func TestDisksWithAllParts(t *testing.T) {
t.Fatalf("Failed to make a bucket %v", err)
}
_, err = obj.PutObject(bucket, object, int64(len(data)),
bytes.NewReader(data), nil, "")
_, err = obj.PutObject(bucket, object, int64(len(data)), bytes.NewReader(data), nil, "")
if err != nil {
t.Fatalf("Failed to putObject %v", err)
}
@@ -368,8 +367,6 @@ func TestDisksWithAllParts(t *testing.T) {
t.Fatalf("Failed to read xl meta data %v", err)
}
// Replace the default blake2b erasure algorithm to HashSha256 and test that
// disks are excluded
diskFailures := make(map[int]string)
// key = disk index, value = part name with hash mismatch
diskFailures[0] = "part.3"
@@ -377,16 +374,15 @@ func TestDisksWithAllParts(t *testing.T) {
diskFailures[15] = "part.2"
for diskIndex, partName := range diskFailures {
for index, info := range partsMetadata[diskIndex].Erasure.Checksum {
for index, info := range partsMetadata[diskIndex].Erasure.Checksums {
if info.Name == partName {
partsMetadata[diskIndex].Erasure.Checksum[index].Algorithm = HashSha256
partsMetadata[diskIndex].Erasure.Checksums[index].Hash[0]++
}
}
}
errs = make([]error, len(xlDisks))
filteredDisks, errs, err :=
disksWithAllParts(xlDisks, partsMetadata, errs, bucket, object)
filteredDisks, errs, err := disksWithAllParts(xlDisks, partsMetadata, errs, bucket, object)
if err != nil {
t.Errorf("Unexpected error: %s", err)
}
@@ -396,7 +392,6 @@ func TestDisksWithAllParts(t *testing.T) {
}
for diskIndex, disk := range filteredDisks {
if _, ok := diskFailures[diskIndex]; ok {
if disk != nil {
t.Errorf("Disk not filtered as expected, disk: %d", diskIndex)
@@ -422,8 +417,7 @@ func TestDisksWithAllParts(t *testing.T) {
t.Fatalf("Failed to read xl meta data %v", err)
}
filteredDisks, errs, err =
disksWithAllParts(xlDisks, partsMetadata, errs, bucket, object)
filteredDisks, errs, err = disksWithAllParts(xlDisks, partsMetadata, errs, bucket, object)
if err != nil {
t.Errorf("Unexpected error: %s", err)
}