2021-04-18 15:41:13 -04:00
|
|
|
// Copyright (c) 2015-2021 MinIO, Inc.
|
|
|
|
//
|
|
|
|
// This file is part of MinIO Object Storage stack
|
|
|
|
//
|
|
|
|
// This program is free software: you can redistribute it and/or modify
|
|
|
|
// it under the terms of the GNU Affero General Public License as published by
|
|
|
|
// the Free Software Foundation, either version 3 of the License, or
|
|
|
|
// (at your option) any later version.
|
|
|
|
//
|
|
|
|
// This program is distributed in the hope that it will be useful
|
|
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
// GNU Affero General Public License for more details.
|
|
|
|
//
|
|
|
|
// You should have received a copy of the GNU Affero General Public License
|
|
|
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
2016-07-12 18:20:31 -04:00
|
|
|
|
2016-08-18 19:23:42 -04:00
|
|
|
package cmd
|
2016-07-12 18:20:31 -04:00
|
|
|
|
|
|
|
import (
|
2017-03-04 17:53:28 -05:00
|
|
|
"bytes"
|
2020-04-14 20:52:38 -04:00
|
|
|
"context"
|
2021-11-22 12:36:29 -05:00
|
|
|
"errors"
|
2020-03-02 19:29:30 -05:00
|
|
|
"fmt"
|
2019-01-17 07:58:18 -05:00
|
|
|
"os"
|
2017-03-04 17:53:28 -05:00
|
|
|
"path/filepath"
|
2016-07-12 18:20:31 -04:00
|
|
|
"testing"
|
|
|
|
"time"
|
2019-03-14 16:08:51 -04:00
|
|
|
|
2022-12-06 16:46:50 -05:00
|
|
|
"github.com/minio/madmin-go/v2"
|
2016-07-12 18:20:31 -04:00
|
|
|
)
|
|
|
|
|
2022-10-12 19:42:45 -04:00
|
|
|
// Returns the latest updated FileInfo files and error in case of failure.
|
|
|
|
func getLatestFileInfo(ctx context.Context, partsMetadata []FileInfo, defaultParityCount int, errs []error) (FileInfo, error) {
|
|
|
|
// There should be atleast half correct entries, if not return failure
|
|
|
|
expectedRQuorum := len(partsMetadata) / 2
|
|
|
|
if defaultParityCount == 0 {
|
|
|
|
// if parity count is '0', we expected all entries to be present.
|
|
|
|
expectedRQuorum = len(partsMetadata)
|
|
|
|
}
|
|
|
|
|
|
|
|
reducedErr := reduceReadQuorumErrs(ctx, errs, objectOpIgnoredErrs, expectedRQuorum)
|
|
|
|
if reducedErr != nil {
|
|
|
|
return FileInfo{}, reducedErr
|
|
|
|
}
|
|
|
|
|
|
|
|
// List all the file commit ids from parts metadata.
|
|
|
|
modTimes := listObjectModtimes(partsMetadata, errs)
|
|
|
|
|
|
|
|
// Count all latest updated FileInfo values
|
|
|
|
var count int
|
|
|
|
var latestFileInfo FileInfo
|
|
|
|
|
|
|
|
// Reduce list of UUIDs to a single common value - i.e. the last updated Time
|
|
|
|
modTime := commonTime(modTimes)
|
|
|
|
|
|
|
|
if modTime.IsZero() || modTime.Equal(timeSentinel) {
|
|
|
|
return FileInfo{}, errErasureReadQuorum
|
|
|
|
}
|
|
|
|
|
|
|
|
// Interate through all the modTimes and count the FileInfo(s) with latest time.
|
|
|
|
for index, t := range modTimes {
|
|
|
|
if partsMetadata[index].IsValid() && t.Equal(modTime) {
|
|
|
|
latestFileInfo = partsMetadata[index]
|
|
|
|
count++
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if !latestFileInfo.IsValid() {
|
|
|
|
return FileInfo{}, errErasureReadQuorum
|
|
|
|
}
|
|
|
|
|
|
|
|
if count < latestFileInfo.Erasure.DataBlocks {
|
|
|
|
return FileInfo{}, errErasureReadQuorum
|
|
|
|
}
|
|
|
|
|
|
|
|
return latestFileInfo, nil
|
|
|
|
}
|
|
|
|
|
2016-07-12 18:20:31 -04:00
|
|
|
// validates functionality provided to find most common
|
|
|
|
// time occurrence from a list of time.
|
|
|
|
func TestCommonTime(t *testing.T) {
|
|
|
|
// List of test cases for common modTime.
|
|
|
|
testCases := []struct {
|
2021-11-21 13:41:30 -05:00
|
|
|
times []time.Time
|
|
|
|
time time.Time
|
2016-07-12 18:20:31 -04:00
|
|
|
}{
|
|
|
|
{
|
|
|
|
// 1. Tests common times when slice has varying time elements.
|
|
|
|
[]time.Time{
|
|
|
|
time.Unix(0, 1).UTC(),
|
|
|
|
time.Unix(0, 2).UTC(),
|
|
|
|
time.Unix(0, 3).UTC(),
|
|
|
|
time.Unix(0, 3).UTC(),
|
|
|
|
time.Unix(0, 2).UTC(),
|
|
|
|
time.Unix(0, 3).UTC(),
|
|
|
|
time.Unix(0, 1).UTC(),
|
2021-11-20 14:26:30 -05:00
|
|
|
},
|
|
|
|
time.Unix(0, 3).UTC(),
|
2016-07-12 18:20:31 -04:00
|
|
|
},
|
|
|
|
{
|
|
|
|
// 2. Tests common time obtained when all elements are equal.
|
|
|
|
[]time.Time{
|
|
|
|
time.Unix(0, 3).UTC(),
|
|
|
|
time.Unix(0, 3).UTC(),
|
|
|
|
time.Unix(0, 3).UTC(),
|
|
|
|
time.Unix(0, 3).UTC(),
|
|
|
|
time.Unix(0, 3).UTC(),
|
|
|
|
time.Unix(0, 3).UTC(),
|
|
|
|
time.Unix(0, 3).UTC(),
|
2021-11-20 14:26:30 -05:00
|
|
|
},
|
|
|
|
time.Unix(0, 3).UTC(),
|
2016-07-12 18:20:31 -04:00
|
|
|
},
|
|
|
|
{
|
|
|
|
// 3. Tests common time obtained when elements have a mixture
|
|
|
|
// of sentinel values.
|
|
|
|
[]time.Time{
|
|
|
|
time.Unix(0, 3).UTC(),
|
|
|
|
time.Unix(0, 3).UTC(),
|
|
|
|
time.Unix(0, 2).UTC(),
|
|
|
|
time.Unix(0, 1).UTC(),
|
|
|
|
time.Unix(0, 3).UTC(),
|
|
|
|
time.Unix(0, 4).UTC(),
|
|
|
|
time.Unix(0, 3).UTC(),
|
|
|
|
timeSentinel,
|
|
|
|
timeSentinel,
|
|
|
|
timeSentinel,
|
2021-11-20 14:26:30 -05:00
|
|
|
},
|
|
|
|
time.Unix(0, 3).UTC(),
|
2016-07-12 18:20:31 -04:00
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
// Tests all the testcases, and validates them against expected
|
|
|
|
// common modtime. Tests fail if modtime does not match.
|
|
|
|
for i, testCase := range testCases {
|
|
|
|
// Obtain a common mod time from modTimes slice.
|
2021-11-21 13:41:30 -05:00
|
|
|
ctime := commonTime(testCase.times)
|
2020-08-24 15:11:20 -04:00
|
|
|
if !testCase.time.Equal(ctime) {
|
2021-11-20 14:26:30 -05:00
|
|
|
t.Errorf("Test case %d, expect to pass but failed. Wanted modTime: %s, got modTime: %s\n", i+1, testCase.time, ctime)
|
2016-07-12 18:20:31 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2017-03-04 17:53:28 -05:00
|
|
|
|
|
|
|
// TestListOnlineDisks - checks if listOnlineDisks and outDatedDisks
|
|
|
|
// are consistent with each other.
|
|
|
|
func TestListOnlineDisks(t *testing.T) {
|
2020-04-14 20:52:38 -04:00
|
|
|
ctx, cancel := context.WithCancel(context.Background())
|
|
|
|
defer cancel()
|
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
obj, disks, err := prepareErasure16(ctx)
|
2017-03-04 17:53:28 -05:00
|
|
|
if err != nil {
|
2020-06-12 23:04:01 -04:00
|
|
|
t.Fatalf("Prepare Erasure backend failed - %v", err)
|
2017-03-04 17:53:28 -05:00
|
|
|
}
|
2020-09-10 12:18:19 -04:00
|
|
|
defer obj.Shutdown(context.Background())
|
2017-03-04 17:53:28 -05:00
|
|
|
defer removeRoots(disks)
|
|
|
|
|
|
|
|
type tamperKind int
|
|
|
|
const (
|
|
|
|
noTamper tamperKind = iota
|
|
|
|
deletePart tamperKind = iota
|
|
|
|
corruptPart tamperKind = iota
|
|
|
|
)
|
|
|
|
threeNanoSecs := time.Unix(0, 3).UTC()
|
|
|
|
fourNanoSecs := time.Unix(0, 4).UTC()
|
|
|
|
modTimesThreeNone := []time.Time{
|
|
|
|
threeNanoSecs, threeNanoSecs, threeNanoSecs, threeNanoSecs,
|
|
|
|
threeNanoSecs, threeNanoSecs, threeNanoSecs,
|
|
|
|
timeSentinel, timeSentinel, timeSentinel, timeSentinel,
|
|
|
|
timeSentinel, timeSentinel, timeSentinel, timeSentinel,
|
|
|
|
timeSentinel,
|
|
|
|
}
|
|
|
|
modTimesThreeFour := []time.Time{
|
|
|
|
threeNanoSecs, threeNanoSecs, threeNanoSecs, threeNanoSecs,
|
|
|
|
threeNanoSecs, threeNanoSecs, threeNanoSecs, threeNanoSecs,
|
|
|
|
fourNanoSecs, fourNanoSecs, fourNanoSecs, fourNanoSecs,
|
|
|
|
fourNanoSecs, fourNanoSecs, fourNanoSecs, fourNanoSecs,
|
|
|
|
}
|
|
|
|
testCases := []struct {
|
|
|
|
modTimes []time.Time
|
|
|
|
expectedTime time.Time
|
|
|
|
errs []error
|
|
|
|
_tamperBackend tamperKind
|
|
|
|
}{
|
|
|
|
{
|
|
|
|
modTimes: modTimesThreeFour,
|
|
|
|
expectedTime: fourNanoSecs,
|
|
|
|
errs: []error{
|
|
|
|
nil, nil, nil, nil, nil, nil, nil, nil, nil,
|
|
|
|
nil, nil, nil, nil, nil, nil, nil,
|
|
|
|
},
|
|
|
|
_tamperBackend: noTamper,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
modTimes: modTimesThreeNone,
|
|
|
|
expectedTime: threeNanoSecs,
|
|
|
|
errs: []error{
|
2020-06-12 23:04:01 -04:00
|
|
|
// Disks that have a valid xl.meta.
|
2017-03-04 17:53:28 -05:00
|
|
|
nil, nil, nil, nil, nil, nil, nil,
|
2020-06-12 23:04:01 -04:00
|
|
|
// Majority of disks don't have xl.meta.
|
2017-03-04 17:53:28 -05:00
|
|
|
errFileNotFound, errFileNotFound,
|
|
|
|
errFileNotFound, errFileNotFound,
|
|
|
|
errFileNotFound, errDiskAccessDenied,
|
|
|
|
errDiskNotFound, errFileNotFound,
|
|
|
|
errFileNotFound,
|
|
|
|
},
|
|
|
|
_tamperBackend: deletePart,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
modTimes: modTimesThreeNone,
|
|
|
|
expectedTime: threeNanoSecs,
|
|
|
|
errs: []error{
|
2020-06-12 23:04:01 -04:00
|
|
|
// Disks that have a valid xl.meta.
|
2017-03-04 17:53:28 -05:00
|
|
|
nil, nil, nil, nil, nil, nil, nil,
|
2020-06-12 23:04:01 -04:00
|
|
|
// Majority of disks don't have xl.meta.
|
2017-03-04 17:53:28 -05:00
|
|
|
errFileNotFound, errFileNotFound,
|
|
|
|
errFileNotFound, errFileNotFound,
|
|
|
|
errFileNotFound, errDiskAccessDenied,
|
|
|
|
errDiskNotFound, errFileNotFound,
|
|
|
|
errFileNotFound,
|
|
|
|
},
|
|
|
|
_tamperBackend: corruptPart,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
bucket := "bucket"
|
2022-12-23 10:46:00 -05:00
|
|
|
err = obj.MakeBucket(ctx, "bucket", MakeBucketOptions{})
|
2020-06-12 23:04:01 -04:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Failed to make a bucket %v", err)
|
|
|
|
}
|
|
|
|
|
2017-03-04 17:53:28 -05:00
|
|
|
object := "object"
|
2021-03-31 12:19:14 -04:00
|
|
|
data := bytes.Repeat([]byte("a"), smallFileThreshold*16)
|
2020-12-01 16:50:33 -05:00
|
|
|
z := obj.(*erasureServerPools)
|
|
|
|
erasureDisks := z.serverPools[0].sets[0].getDisks()
|
2017-03-04 17:53:28 -05:00
|
|
|
for i, test := range testCases {
|
2021-04-21 22:06:08 -04:00
|
|
|
test := test
|
2021-03-29 20:00:55 -04:00
|
|
|
t.Run(fmt.Sprintf("case-%d", i), func(t *testing.T) {
|
|
|
|
_, err = obj.PutObject(ctx, bucket, object, mustGetPutObjReader(t, bytes.NewReader(data), int64(len(data)), "", ""), ObjectOptions{})
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Failed to putObject %v", err)
|
|
|
|
}
|
2020-06-12 23:04:01 -04:00
|
|
|
|
2021-03-29 20:00:55 -04:00
|
|
|
partsMetadata, errs := readAllFileInfo(ctx, erasureDisks, bucket, object, "", false)
|
2022-07-21 10:25:54 -04:00
|
|
|
fi, err := getLatestFileInfo(ctx, partsMetadata, z.serverPools[0].sets[0].defaultParityCount, errs)
|
2021-03-29 20:00:55 -04:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Failed to getLatestFileInfo %v", err)
|
2020-06-12 23:04:01 -04:00
|
|
|
}
|
2017-03-04 17:53:28 -05:00
|
|
|
|
2021-03-29 20:00:55 -04:00
|
|
|
for j := range partsMetadata {
|
|
|
|
if errs[j] != nil {
|
2021-04-21 22:06:08 -04:00
|
|
|
t.Fatalf("expected error to be nil: %s", errs[j])
|
2017-03-04 17:53:28 -05:00
|
|
|
}
|
2021-03-29 20:00:55 -04:00
|
|
|
partsMetadata[j].ModTime = test.modTimes[j]
|
|
|
|
}
|
|
|
|
|
|
|
|
tamperedIndex := -1
|
|
|
|
switch test._tamperBackend {
|
|
|
|
case deletePart:
|
|
|
|
for index, err := range test.errs {
|
|
|
|
if err != nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
// Remove a part from a disk
|
|
|
|
// which has a valid xl.meta,
|
|
|
|
// and check if that disk
|
|
|
|
// appears in outDatedDisks.
|
|
|
|
tamperedIndex = index
|
2022-07-11 12:15:54 -04:00
|
|
|
dErr := erasureDisks[index].Delete(context.Background(), bucket, pathJoin(object, fi.DataDir, "part.1"), DeleteOptions{
|
|
|
|
Recursive: false,
|
|
|
|
Force: false,
|
|
|
|
})
|
2021-03-29 20:00:55 -04:00
|
|
|
if dErr != nil {
|
2021-04-21 22:06:08 -04:00
|
|
|
t.Fatalf("Failed to delete %s - %v", filepath.Join(object, "part.1"), dErr)
|
2021-03-29 20:00:55 -04:00
|
|
|
}
|
|
|
|
break
|
2017-03-04 17:53:28 -05:00
|
|
|
}
|
2021-03-29 20:00:55 -04:00
|
|
|
case corruptPart:
|
|
|
|
for index, err := range test.errs {
|
|
|
|
if err != nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
// Corrupt a part from a disk
|
|
|
|
// which has a valid xl.meta,
|
|
|
|
// and check if that disk
|
|
|
|
// appears in outDatedDisks.
|
|
|
|
tamperedIndex = index
|
|
|
|
filePath := pathJoin(erasureDisks[index].String(), bucket, object, fi.DataDir, "part.1")
|
|
|
|
f, err := os.OpenFile(filePath, os.O_WRONLY|os.O_SYNC, 0)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Failed to open %s: %s\n", filePath, err)
|
|
|
|
}
|
2021-11-16 12:28:29 -05:00
|
|
|
f.WriteString("oops") // Will cause bitrot error
|
2021-03-29 20:00:55 -04:00
|
|
|
f.Close()
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
2017-03-04 17:53:28 -05:00
|
|
|
}
|
2021-03-29 20:00:55 -04:00
|
|
|
|
2021-11-21 13:41:30 -05:00
|
|
|
onlineDisks, modTime := listOnlineDisks(erasureDisks, partsMetadata, test.errs)
|
2021-03-29 20:00:55 -04:00
|
|
|
if !modTime.Equal(test.expectedTime) {
|
2021-04-21 22:06:08 -04:00
|
|
|
t.Fatalf("Expected modTime to be equal to %v but was found to be %v",
|
|
|
|
test.expectedTime, modTime)
|
|
|
|
}
|
2021-12-24 02:01:46 -05:00
|
|
|
availableDisks, newErrs, _ := disksWithAllParts(ctx, onlineDisks, partsMetadata,
|
|
|
|
test.errs, fi, bucket, object, madmin.HealDeepScan)
|
2021-03-29 20:00:55 -04:00
|
|
|
test.errs = newErrs
|
|
|
|
|
|
|
|
if test._tamperBackend != noTamper {
|
|
|
|
if tamperedIndex != -1 && availableDisks[tamperedIndex] != nil {
|
2022-08-04 19:10:08 -04:00
|
|
|
t.Fatalf("Drive (%v) with part.1 missing is not a drive with available data",
|
2021-04-21 22:06:08 -04:00
|
|
|
erasureDisks[tamperedIndex])
|
2017-03-04 17:53:28 -05:00
|
|
|
}
|
2021-03-29 20:00:55 -04:00
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// TestListOnlineDisksSmallObjects - checks if listOnlineDisks and outDatedDisks
|
|
|
|
// are consistent with each other.
|
|
|
|
func TestListOnlineDisksSmallObjects(t *testing.T) {
|
|
|
|
ctx, cancel := context.WithCancel(context.Background())
|
|
|
|
defer cancel()
|
|
|
|
|
|
|
|
obj, disks, err := prepareErasure16(ctx)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Prepare Erasure backend failed - %v", err)
|
|
|
|
}
|
|
|
|
defer obj.Shutdown(context.Background())
|
|
|
|
defer removeRoots(disks)
|
|
|
|
|
|
|
|
type tamperKind int
|
|
|
|
const (
|
|
|
|
noTamper tamperKind = iota
|
|
|
|
deletePart tamperKind = iota
|
|
|
|
corruptPart tamperKind = iota
|
|
|
|
)
|
|
|
|
timeSentinel := time.Unix(1, 0).UTC()
|
|
|
|
threeNanoSecs := time.Unix(3, 0).UTC()
|
|
|
|
fourNanoSecs := time.Unix(4, 0).UTC()
|
|
|
|
modTimesThreeNone := []time.Time{
|
|
|
|
threeNanoSecs, threeNanoSecs, threeNanoSecs, threeNanoSecs,
|
|
|
|
threeNanoSecs, threeNanoSecs, threeNanoSecs,
|
|
|
|
timeSentinel, timeSentinel, timeSentinel, timeSentinel,
|
|
|
|
timeSentinel, timeSentinel, timeSentinel, timeSentinel,
|
|
|
|
timeSentinel,
|
|
|
|
}
|
|
|
|
modTimesThreeFour := []time.Time{
|
|
|
|
threeNanoSecs, threeNanoSecs, threeNanoSecs, threeNanoSecs,
|
|
|
|
threeNanoSecs, threeNanoSecs, threeNanoSecs, threeNanoSecs,
|
|
|
|
fourNanoSecs, fourNanoSecs, fourNanoSecs, fourNanoSecs,
|
|
|
|
fourNanoSecs, fourNanoSecs, fourNanoSecs, fourNanoSecs,
|
|
|
|
}
|
|
|
|
testCases := []struct {
|
|
|
|
modTimes []time.Time
|
|
|
|
expectedTime time.Time
|
|
|
|
errs []error
|
|
|
|
_tamperBackend tamperKind
|
|
|
|
}{
|
|
|
|
{
|
|
|
|
modTimes: modTimesThreeFour,
|
|
|
|
expectedTime: fourNanoSecs,
|
|
|
|
errs: []error{
|
|
|
|
nil, nil, nil, nil, nil, nil, nil, nil, nil,
|
|
|
|
nil, nil, nil, nil, nil, nil, nil,
|
|
|
|
},
|
|
|
|
_tamperBackend: noTamper,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
modTimes: modTimesThreeNone,
|
|
|
|
expectedTime: threeNanoSecs,
|
|
|
|
errs: []error{
|
|
|
|
// Disks that have a valid xl.meta.
|
|
|
|
nil, nil, nil, nil, nil, nil, nil,
|
|
|
|
// Majority of disks don't have xl.meta.
|
|
|
|
errFileNotFound, errFileNotFound,
|
|
|
|
errFileNotFound, errFileNotFound,
|
|
|
|
errFileNotFound, errDiskAccessDenied,
|
|
|
|
errDiskNotFound, errFileNotFound,
|
|
|
|
errFileNotFound,
|
|
|
|
},
|
|
|
|
_tamperBackend: deletePart,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
modTimes: modTimesThreeNone,
|
|
|
|
expectedTime: threeNanoSecs,
|
|
|
|
errs: []error{
|
|
|
|
// Disks that have a valid xl.meta.
|
|
|
|
nil, nil, nil, nil, nil, nil, nil,
|
|
|
|
// Majority of disks don't have xl.meta.
|
|
|
|
errFileNotFound, errFileNotFound,
|
|
|
|
errFileNotFound, errFileNotFound,
|
|
|
|
errFileNotFound, errDiskAccessDenied,
|
|
|
|
errDiskNotFound, errFileNotFound,
|
|
|
|
errFileNotFound,
|
|
|
|
},
|
|
|
|
_tamperBackend: corruptPart,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
bucket := "bucket"
|
2022-12-23 10:46:00 -05:00
|
|
|
err = obj.MakeBucket(ctx, "bucket", MakeBucketOptions{})
|
2021-03-29 20:00:55 -04:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Failed to make a bucket %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
object := "object"
|
|
|
|
data := bytes.Repeat([]byte("a"), smallFileThreshold/2)
|
|
|
|
z := obj.(*erasureServerPools)
|
|
|
|
erasureDisks := z.serverPools[0].sets[0].getDisks()
|
|
|
|
for i, test := range testCases {
|
2021-04-21 22:06:08 -04:00
|
|
|
test := test
|
2021-03-29 20:00:55 -04:00
|
|
|
t.Run(fmt.Sprintf("case-%d", i), func(t *testing.T) {
|
2021-11-21 13:41:30 -05:00
|
|
|
_, err := obj.PutObject(ctx, bucket, object,
|
|
|
|
mustGetPutObjReader(t, bytes.NewReader(data), int64(len(data)), "", ""), ObjectOptions{})
|
2021-03-29 20:00:55 -04:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Failed to putObject %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
partsMetadata, errs := readAllFileInfo(ctx, erasureDisks, bucket, object, "", true)
|
2022-07-21 10:25:54 -04:00
|
|
|
_, err = getLatestFileInfo(ctx, partsMetadata, z.serverPools[0].sets[0].defaultParityCount, errs)
|
2021-03-29 20:00:55 -04:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Failed to getLatestFileInfo %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
for j := range partsMetadata {
|
|
|
|
if errs[j] != nil {
|
2021-04-21 22:06:08 -04:00
|
|
|
t.Fatalf("expected error to be nil: %s", errs[j])
|
2017-03-04 17:53:28 -05:00
|
|
|
}
|
2021-03-29 20:00:55 -04:00
|
|
|
partsMetadata[j].ModTime = test.modTimes[j]
|
2017-03-04 17:53:28 -05:00
|
|
|
}
|
|
|
|
|
2021-03-29 20:00:55 -04:00
|
|
|
if erasureDisks, err = writeUniqueFileInfo(ctx, erasureDisks, bucket, object, partsMetadata, diskCount(erasureDisks)); err != nil {
|
|
|
|
t.Fatal(ctx, err)
|
|
|
|
}
|
2017-03-04 17:53:28 -05:00
|
|
|
|
2021-03-29 20:00:55 -04:00
|
|
|
tamperedIndex := -1
|
|
|
|
switch test._tamperBackend {
|
|
|
|
case deletePart:
|
|
|
|
for index, err := range test.errs {
|
|
|
|
if err != nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
// Remove a part from a disk
|
|
|
|
// which has a valid xl.meta,
|
|
|
|
// and check if that disk
|
|
|
|
// appears in outDatedDisks.
|
|
|
|
tamperedIndex = index
|
2022-07-11 12:15:54 -04:00
|
|
|
dErr := erasureDisks[index].Delete(context.Background(), bucket, pathJoin(object, xlStorageFormatFile), DeleteOptions{
|
|
|
|
Recursive: false,
|
|
|
|
Force: false,
|
|
|
|
})
|
2021-03-29 20:00:55 -04:00
|
|
|
if dErr != nil {
|
2021-04-21 22:06:08 -04:00
|
|
|
t.Fatalf("Failed to delete %s - %v", pathJoin(object, xlStorageFormatFile), dErr)
|
2021-03-29 20:00:55 -04:00
|
|
|
}
|
|
|
|
break
|
|
|
|
}
|
|
|
|
case corruptPart:
|
|
|
|
for index, err := range test.errs {
|
|
|
|
if err != nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
// Corrupt a part from a disk
|
|
|
|
// which has a valid xl.meta,
|
|
|
|
// and check if that disk
|
|
|
|
// appears in outDatedDisks.
|
|
|
|
tamperedIndex = index
|
|
|
|
filePath := pathJoin(erasureDisks[index].String(), bucket, object, xlStorageFormatFile)
|
|
|
|
f, err := os.OpenFile(filePath, os.O_WRONLY|os.O_SYNC, 0)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Failed to open %s: %s\n", filePath, err)
|
|
|
|
}
|
2021-11-16 12:28:29 -05:00
|
|
|
f.WriteString("oops") // Will cause bitrot error
|
2021-03-29 20:00:55 -04:00
|
|
|
f.Close()
|
|
|
|
break
|
|
|
|
}
|
2017-03-04 17:53:28 -05:00
|
|
|
|
2021-03-29 20:00:55 -04:00
|
|
|
}
|
|
|
|
partsMetadata, errs = readAllFileInfo(ctx, erasureDisks, bucket, object, "", true)
|
2022-07-21 10:25:54 -04:00
|
|
|
fi, err := getLatestFileInfo(ctx, partsMetadata, z.serverPools[0].sets[0].defaultParityCount, errs)
|
2021-11-22 12:36:29 -05:00
|
|
|
if !errors.Is(err, errErasureReadQuorum) {
|
|
|
|
t.Fatalf("Failed to getLatestFileInfo, expected %v, got %v", errErasureReadQuorum, err)
|
2021-03-29 20:00:55 -04:00
|
|
|
}
|
2017-03-04 17:53:28 -05:00
|
|
|
|
2021-11-21 13:41:30 -05:00
|
|
|
onlineDisks, modTime := listOnlineDisks(erasureDisks, partsMetadata, test.errs)
|
2021-03-29 20:00:55 -04:00
|
|
|
if !modTime.Equal(test.expectedTime) {
|
2021-04-21 22:06:08 -04:00
|
|
|
t.Fatalf("Expected modTime to be equal to %v but was found to be %v",
|
|
|
|
test.expectedTime, modTime)
|
|
|
|
}
|
|
|
|
|
2021-12-24 02:01:46 -05:00
|
|
|
availableDisks, newErrs, _ := disksWithAllParts(ctx, onlineDisks, partsMetadata,
|
|
|
|
test.errs, fi, bucket, object, madmin.HealDeepScan)
|
2021-03-29 20:00:55 -04:00
|
|
|
test.errs = newErrs
|
|
|
|
|
|
|
|
if test._tamperBackend != noTamper {
|
|
|
|
if tamperedIndex != -1 && availableDisks[tamperedIndex] != nil {
|
2022-08-04 19:10:08 -04:00
|
|
|
t.Fatalf("Drive (%v) with part.1 missing is not a drive with available data",
|
2021-04-21 22:06:08 -04:00
|
|
|
erasureDisks[tamperedIndex])
|
2021-03-29 20:00:55 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
})
|
2017-03-04 17:53:28 -05:00
|
|
|
}
|
|
|
|
}
|
2017-06-14 20:13:02 -04:00
|
|
|
|
|
|
|
func TestDisksWithAllParts(t *testing.T) {
|
2020-04-14 20:52:38 -04:00
|
|
|
ctx, cancel := context.WithCancel(context.Background())
|
|
|
|
defer cancel()
|
2020-06-12 23:04:01 -04:00
|
|
|
obj, disks, err := prepareErasure16(ctx)
|
2017-06-14 20:13:02 -04:00
|
|
|
if err != nil {
|
2020-06-12 23:04:01 -04:00
|
|
|
t.Fatalf("Prepare Erasure backend failed - %v", err)
|
2017-06-14 20:13:02 -04:00
|
|
|
}
|
2020-09-10 12:18:19 -04:00
|
|
|
defer obj.Shutdown(context.Background())
|
2017-06-14 20:13:02 -04:00
|
|
|
defer removeRoots(disks)
|
|
|
|
|
|
|
|
bucket := "bucket"
|
|
|
|
object := "object"
|
|
|
|
// make data with more than one part
|
|
|
|
partCount := 3
|
2019-11-19 20:42:27 -05:00
|
|
|
data := bytes.Repeat([]byte("a"), 6*1024*1024*partCount)
|
2020-12-01 16:50:33 -05:00
|
|
|
z := obj.(*erasureServerPools)
|
|
|
|
s := z.serverPools[0].sets[0]
|
2020-06-12 23:04:01 -04:00
|
|
|
erasureDisks := s.getDisks()
|
2022-12-23 10:46:00 -05:00
|
|
|
err = obj.MakeBucket(ctx, "bucket", MakeBucketOptions{})
|
2017-06-14 20:13:02 -04:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Failed to make a bucket %v", err)
|
|
|
|
}
|
|
|
|
|
2019-02-09 00:31:06 -05:00
|
|
|
_, err = obj.PutObject(ctx, bucket, object, mustGetPutObjReader(t, bytes.NewReader(data), int64(len(data)), "", ""), ObjectOptions{})
|
2017-06-14 20:13:02 -04:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Failed to putObject %v", err)
|
|
|
|
}
|
|
|
|
|
2021-01-07 22:27:31 -05:00
|
|
|
_, errs := readAllFileInfo(ctx, erasureDisks, bucket, object, "", false)
|
2020-06-12 23:04:01 -04:00
|
|
|
readQuorum := len(erasureDisks) / 2
|
2018-04-05 18:04:40 -04:00
|
|
|
if reducedErr := reduceReadQuorumErrs(ctx, errs, objectOpIgnoredErrs, readQuorum); reducedErr != nil {
|
2017-08-23 20:58:52 -04:00
|
|
|
t.Fatalf("Failed to read xl meta data %v", reducedErr)
|
2017-06-14 20:13:02 -04:00
|
|
|
}
|
|
|
|
|
2021-05-19 15:04:08 -04:00
|
|
|
// Test 1: Test that all disks are returned without any failures with
|
2019-01-17 07:58:18 -05:00
|
|
|
// unmodified meta data
|
2021-01-07 22:27:31 -05:00
|
|
|
partsMetadata, errs := readAllFileInfo(ctx, erasureDisks, bucket, object, "", false)
|
2019-01-17 07:58:18 -05:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Failed to read xl meta data %v", err)
|
|
|
|
}
|
|
|
|
|
2022-07-21 10:25:54 -04:00
|
|
|
fi, err := getLatestFileInfo(ctx, partsMetadata, s.defaultParityCount, errs)
|
2021-11-22 12:36:29 -05:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Failed to get quorum consistent fileInfo %v", err)
|
|
|
|
}
|
|
|
|
|
2021-11-21 13:41:30 -05:00
|
|
|
erasureDisks, _ = listOnlineDisks(erasureDisks, partsMetadata, errs)
|
2021-05-19 15:04:08 -04:00
|
|
|
|
2021-12-24 02:01:46 -05:00
|
|
|
filteredDisks, errs, _ := disksWithAllParts(ctx, erasureDisks, partsMetadata,
|
|
|
|
errs, fi, bucket, object, madmin.HealDeepScan)
|
2019-01-17 07:58:18 -05:00
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
if len(filteredDisks) != len(erasureDisks) {
|
2022-08-04 19:10:08 -04:00
|
|
|
t.Errorf("Unexpected number of drives: %d", len(filteredDisks))
|
2019-01-17 07:58:18 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
for diskIndex, disk := range filteredDisks {
|
|
|
|
if errs[diskIndex] != nil {
|
|
|
|
t.Errorf("Unexpected error %s", errs[diskIndex])
|
|
|
|
}
|
|
|
|
|
|
|
|
if disk == nil {
|
2022-08-04 19:10:08 -04:00
|
|
|
t.Errorf("Drive erroneously filtered, driveIndex: %d", diskIndex)
|
2019-01-17 07:58:18 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-05-19 15:04:08 -04:00
|
|
|
// Test 2: Not synchronized modtime
|
|
|
|
partsMetadataBackup := partsMetadata[0]
|
|
|
|
partsMetadata[0].ModTime = partsMetadata[0].ModTime.Add(-1 * time.Hour)
|
|
|
|
|
|
|
|
errs = make([]error, len(erasureDisks))
|
2021-12-24 02:01:46 -05:00
|
|
|
filteredDisks, _, _ = disksWithAllParts(ctx, erasureDisks, partsMetadata,
|
|
|
|
errs, fi, bucket, object, madmin.HealDeepScan)
|
2021-05-19 15:04:08 -04:00
|
|
|
|
|
|
|
if len(filteredDisks) != len(erasureDisks) {
|
2022-08-04 19:10:08 -04:00
|
|
|
t.Errorf("Unexpected number of drives: %d", len(filteredDisks))
|
2021-05-19 15:04:08 -04:00
|
|
|
}
|
|
|
|
for diskIndex, disk := range filteredDisks {
|
|
|
|
if diskIndex == 0 && disk != nil {
|
2022-08-04 19:10:08 -04:00
|
|
|
t.Errorf("Drive not filtered as expected, drive: %d", diskIndex)
|
2021-05-19 15:04:08 -04:00
|
|
|
}
|
|
|
|
if diskIndex != 0 && disk == nil {
|
2022-08-04 19:10:08 -04:00
|
|
|
t.Errorf("Drive erroneously filtered, driveIndex: %d", diskIndex)
|
2021-05-19 15:04:08 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
partsMetadata[0] = partsMetadataBackup // Revert before going to the next test
|
|
|
|
|
|
|
|
// Test 3: Not synchronized DataDir
|
|
|
|
partsMetadataBackup = partsMetadata[1]
|
|
|
|
partsMetadata[1].DataDir = "foo-random"
|
|
|
|
|
|
|
|
errs = make([]error, len(erasureDisks))
|
2021-12-24 02:01:46 -05:00
|
|
|
filteredDisks, _, _ = disksWithAllParts(ctx, erasureDisks, partsMetadata,
|
|
|
|
errs, fi, bucket, object, madmin.HealDeepScan)
|
2021-05-19 15:04:08 -04:00
|
|
|
|
|
|
|
if len(filteredDisks) != len(erasureDisks) {
|
2022-08-04 19:10:08 -04:00
|
|
|
t.Errorf("Unexpected number of drives: %d", len(filteredDisks))
|
2021-05-19 15:04:08 -04:00
|
|
|
}
|
|
|
|
for diskIndex, disk := range filteredDisks {
|
|
|
|
if diskIndex == 1 && disk != nil {
|
2022-08-04 19:10:08 -04:00
|
|
|
t.Errorf("Drive not filtered as expected, drive: %d", diskIndex)
|
2021-05-19 15:04:08 -04:00
|
|
|
}
|
|
|
|
if diskIndex != 1 && disk == nil {
|
2022-08-04 19:10:08 -04:00
|
|
|
t.Errorf("Drive erroneously filtered, driveIndex: %d", diskIndex)
|
2021-05-19 15:04:08 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
partsMetadata[1] = partsMetadataBackup // Revert before going to the next test
|
|
|
|
|
|
|
|
// Test 4: key = disk index, value = part name with hash mismatch
|
2017-06-14 20:13:02 -04:00
|
|
|
diskFailures := make(map[int]string)
|
2019-05-14 15:33:18 -04:00
|
|
|
diskFailures[0] = "part.1"
|
2017-06-14 20:13:02 -04:00
|
|
|
diskFailures[3] = "part.1"
|
2019-05-14 15:33:18 -04:00
|
|
|
diskFailures[15] = "part.1"
|
2017-06-14 20:13:02 -04:00
|
|
|
|
|
|
|
for diskIndex, partName := range diskFailures {
|
2020-03-02 19:29:30 -05:00
|
|
|
for i := range partsMetadata[diskIndex].Erasure.Checksums {
|
|
|
|
if fmt.Sprintf("part.%d", i+1) == partName {
|
2020-06-12 23:04:01 -04:00
|
|
|
filePath := pathJoin(erasureDisks[diskIndex].String(), bucket, object, partsMetadata[diskIndex].DataDir, partName)
|
2019-11-04 12:30:59 -05:00
|
|
|
f, err := os.OpenFile(filePath, os.O_WRONLY|os.O_SYNC, 0)
|
2019-01-17 07:58:18 -05:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Failed to open %s: %s\n", filePath, err)
|
|
|
|
}
|
2021-11-16 12:28:29 -05:00
|
|
|
f.WriteString("oops") // Will cause bitrot error
|
2019-01-17 07:58:18 -05:00
|
|
|
f.Close()
|
2017-06-14 20:13:02 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
errs = make([]error, len(erasureDisks))
|
2021-12-24 02:01:46 -05:00
|
|
|
filteredDisks, errs, _ = disksWithAllParts(ctx, erasureDisks, partsMetadata,
|
|
|
|
errs, fi, bucket, object, madmin.HealDeepScan)
|
2017-06-14 20:13:02 -04:00
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
if len(filteredDisks) != len(erasureDisks) {
|
2022-08-04 19:10:08 -04:00
|
|
|
t.Errorf("Unexpected number of drives: %d", len(filteredDisks))
|
2017-06-14 20:13:02 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
for diskIndex, disk := range filteredDisks {
|
|
|
|
if _, ok := diskFailures[diskIndex]; ok {
|
|
|
|
if disk != nil {
|
2022-08-04 19:10:08 -04:00
|
|
|
t.Errorf("Drive not filtered as expected, drive: %d", diskIndex)
|
2017-06-14 20:13:02 -04:00
|
|
|
}
|
|
|
|
if errs[diskIndex] == nil {
|
2022-08-04 19:10:08 -04:00
|
|
|
t.Errorf("Expected error not received, driveIndex: %d", diskIndex)
|
2017-06-14 20:13:02 -04:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if disk == nil {
|
2022-08-04 19:10:08 -04:00
|
|
|
t.Errorf("Drive erroneously filtered, driveIndex: %d", diskIndex)
|
2017-06-14 20:13:02 -04:00
|
|
|
}
|
|
|
|
if errs[diskIndex] != nil {
|
2022-08-04 19:10:08 -04:00
|
|
|
t.Errorf("Unexpected error, %s, driveIndex: %d", errs[diskIndex], diskIndex)
|
2017-06-14 20:13:02 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2023-04-14 19:23:28 -04:00
|
|
|
|
|
|
|
func TestCommonParities(t *testing.T) {
|
|
|
|
// This test uses two FileInfo values that represent the same object but
|
|
|
|
// have different parities. They occur in equal number of drives, but only
|
|
|
|
// one has read quorum. commonParity should pick the parity corresponding to
|
|
|
|
// the FileInfo which has read quorum.
|
|
|
|
fi1 := FileInfo{
|
|
|
|
Volume: "mybucket",
|
|
|
|
Name: "myobject",
|
|
|
|
VersionID: "",
|
|
|
|
IsLatest: true,
|
|
|
|
Deleted: false,
|
|
|
|
ExpireRestored: false,
|
|
|
|
DataDir: "4a01d9dd-0c5e-4103-88f8-b307c57d212e",
|
|
|
|
XLV1: false,
|
|
|
|
ModTime: time.Date(2023, time.March, 15, 11, 18, 4, 989906961, time.UTC),
|
|
|
|
Size: 329289, Mode: 0x0, WrittenByVersion: 0x63c77756,
|
|
|
|
Metadata: map[string]string{
|
|
|
|
"content-type": "application/octet-stream", "etag": "f205307ef9f50594c4b86d9c246bee86", "x-minio-internal-erasure-upgraded": "5->6", "x-minio-internal-inline-data": "true",
|
|
|
|
},
|
|
|
|
Parts: []ObjectPartInfo{
|
|
|
|
{
|
|
|
|
ETag: "",
|
|
|
|
Number: 1,
|
|
|
|
Size: 329289,
|
|
|
|
ActualSize: 329289,
|
|
|
|
ModTime: time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC),
|
|
|
|
Index: []uint8(nil),
|
|
|
|
Checksums: map[string]string(nil),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
Erasure: ErasureInfo{
|
|
|
|
Algorithm: "ReedSolomon",
|
|
|
|
DataBlocks: 6,
|
|
|
|
ParityBlocks: 6,
|
|
|
|
BlockSize: 1048576,
|
|
|
|
Index: 1,
|
|
|
|
Distribution: []int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12},
|
|
|
|
Checksums: []ChecksumInfo{{PartNumber: 1, Algorithm: 0x3, Hash: []uint8{}}},
|
|
|
|
},
|
|
|
|
NumVersions: 1,
|
|
|
|
Idx: 0,
|
|
|
|
}
|
|
|
|
|
|
|
|
fi2 := FileInfo{
|
|
|
|
Volume: "mybucket",
|
|
|
|
Name: "myobject",
|
|
|
|
VersionID: "",
|
|
|
|
IsLatest: true,
|
|
|
|
Deleted: false,
|
|
|
|
DataDir: "6f5c106d-9d28-4c85-a7f4-eac56225876b",
|
|
|
|
ModTime: time.Date(2023, time.March, 15, 19, 57, 30, 492530160, time.UTC),
|
|
|
|
Size: 329289,
|
|
|
|
Mode: 0x0,
|
|
|
|
WrittenByVersion: 0x63c77756,
|
|
|
|
Metadata: map[string]string{"content-type": "application/octet-stream", "etag": "f205307ef9f50594c4b86d9c246bee86", "x-minio-internal-inline-data": "true"},
|
|
|
|
Parts: []ObjectPartInfo{
|
|
|
|
{
|
|
|
|
ETag: "",
|
|
|
|
Number: 1,
|
|
|
|
Size: 329289,
|
|
|
|
ActualSize: 329289,
|
|
|
|
ModTime: time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC),
|
|
|
|
Index: []uint8(nil),
|
|
|
|
Checksums: map[string]string(nil),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
Erasure: ErasureInfo{
|
|
|
|
Algorithm: "ReedSolomon",
|
|
|
|
DataBlocks: 7,
|
|
|
|
ParityBlocks: 5,
|
|
|
|
BlockSize: 1048576,
|
|
|
|
Index: 2,
|
|
|
|
Distribution: []int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12},
|
|
|
|
Checksums: []ChecksumInfo{
|
|
|
|
{PartNumber: 1, Algorithm: 0x3, Hash: []uint8{}},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
NumVersions: 1,
|
|
|
|
Idx: 0,
|
|
|
|
}
|
|
|
|
|
|
|
|
fiDel := FileInfo{
|
|
|
|
Volume: "mybucket",
|
|
|
|
Name: "myobject",
|
|
|
|
VersionID: "",
|
|
|
|
IsLatest: true,
|
|
|
|
Deleted: true,
|
|
|
|
ModTime: time.Date(2023, time.March, 15, 19, 57, 30, 492530160, time.UTC),
|
|
|
|
Mode: 0x0,
|
|
|
|
WrittenByVersion: 0x63c77756,
|
|
|
|
NumVersions: 1,
|
|
|
|
Idx: 0,
|
|
|
|
}
|
|
|
|
|
|
|
|
tests := []struct {
|
|
|
|
fi1, fi2 FileInfo
|
|
|
|
}{
|
|
|
|
{
|
|
|
|
fi1: fi1,
|
|
|
|
fi2: fi2,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
fi1: fi1,
|
|
|
|
fi2: fiDel,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
for idx, test := range tests {
|
|
|
|
var metaArr []FileInfo
|
|
|
|
for i := 0; i < 12; i++ {
|
|
|
|
fi := test.fi1
|
|
|
|
if i%2 == 0 {
|
|
|
|
fi = test.fi2
|
|
|
|
}
|
|
|
|
metaArr = append(metaArr, fi)
|
|
|
|
}
|
|
|
|
|
|
|
|
parities := listObjectParities(metaArr, make([]error, len(metaArr)))
|
|
|
|
parity := commonParity(parities, 5)
|
|
|
|
var match int
|
|
|
|
for _, fi := range metaArr {
|
|
|
|
if fi.Erasure.ParityBlocks == parity {
|
|
|
|
match++
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if match < len(metaArr)-parity {
|
|
|
|
t.Fatalf("Test %d: Expected %d drives with parity=%d, but got %d", idx, len(metaArr)-parity, parity, match)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|