minio/cmd/erasure-object_test.go

678 lines
21 KiB
Go
Raw Normal View History

/*
* MinIO Cloud Storage, (C) 2016-2020 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"bytes"
"context"
"errors"
"io/ioutil"
"os"
"testing"
humanize "github.com/dustin/go-humanize"
"github.com/minio/minio/cmd/config/storageclass"
)
func TestRepeatPutObjectPart(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
var objLayer ObjectLayer
var disks []string
var err error
var opts ObjectOptions
objLayer, disks, err = prepareErasure16(ctx)
if err != nil {
t.Fatal(err)
}
// cleaning up of temporary test directories
defer objLayer.Shutdown(context.Background())
defer removeRoots(disks)
err = objLayer.MakeBucketWithLocation(ctx, "bucket1", BucketOptions{})
if err != nil {
t.Fatal(err)
}
uploadID, err := objLayer.NewMultipartUpload(ctx, "bucket1", "mpartObj1", opts)
if err != nil {
t.Fatal(err)
}
fiveMBBytes := bytes.Repeat([]byte("a"), 5*humanize.MiByte)
md5Hex := getMD5Hash(fiveMBBytes)
_, err = objLayer.PutObjectPart(ctx, "bucket1", "mpartObj1", uploadID, 1, mustGetPutObjReader(t, bytes.NewReader(fiveMBBytes), 5*humanize.MiByte, md5Hex, ""), opts)
if err != nil {
t.Fatal(err)
}
// PutObjectPart should succeed even if part already exists. ref: https://github.com/minio/minio/issues/1930
_, err = objLayer.PutObjectPart(ctx, "bucket1", "mpartObj1", uploadID, 1, mustGetPutObjReader(t, bytes.NewReader(fiveMBBytes), 5*humanize.MiByte, md5Hex, ""), opts)
if err != nil {
t.Fatal(err)
}
}
func TestErasureDeleteObjectBasic(t *testing.T) {
testCases := []struct {
bucket string
object string
expectedErr error
}{
{".test", "dir/obj", BucketNameInvalid{Bucket: ".test"}},
{"----", "dir/obj", BucketNameInvalid{Bucket: "----"}},
{"bucket", "", ObjectNameInvalid{Bucket: "bucket", Object: ""}},
{"bucket", "doesnotexist", ObjectNotFound{Bucket: "bucket", Object: "doesnotexist"}},
{"bucket", "dir/doesnotexist", ObjectNotFound{Bucket: "bucket", Object: "dir/doesnotexist"}},
{"bucket", "dir", ObjectNotFound{Bucket: "bucket", Object: "dir"}},
{"bucket", "dir/", ObjectNotFound{Bucket: "bucket", Object: "dir/"}},
{"bucket", "dir/obj", nil},
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
// Create an instance of xl backend
xl, fsDirs, err := prepareErasure16(ctx)
if err != nil {
t.Fatal(err)
}
defer xl.Shutdown(context.Background())
err = xl.MakeBucketWithLocation(ctx, "bucket", BucketOptions{})
if err != nil {
t.Fatal(err)
}
// Create object "dir/obj" under bucket "bucket" for Test 7 to pass
_, err = xl.PutObject(ctx, "bucket", "dir/obj", mustGetPutObjReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), ObjectOptions{})
2016-08-12 03:26:30 -04:00
if err != nil {
t.Fatalf("Erasure Object upload failed: <ERROR> %s", err)
2016-08-12 03:26:30 -04:00
}
for _, test := range testCases {
test := test
t.Run("", func(t *testing.T) {
_, actualErr := xl.DeleteObject(ctx, test.bucket, test.object, ObjectOptions{})
if test.expectedErr != nil && actualErr != test.expectedErr {
t.Errorf("Expected to fail with %s, but failed with %s", test.expectedErr, actualErr)
}
if test.expectedErr == nil && actualErr != nil {
t.Errorf("Expected to pass, but failed with %s", actualErr)
}
})
}
// Cleanup backend directories
removeRoots(fsDirs)
}
func TestErasureDeleteObjectsErasureSet(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
var objs []*erasureObjects
for i := 0; i < 32; i++ {
obj, fsDirs, err := prepareErasure(ctx, 16)
if err != nil {
t.Fatal("Unable to initialize 'Erasure' object layer.", err)
}
// Remove all dirs.
for _, dir := range fsDirs {
defer os.RemoveAll(dir)
}
2020-12-01 16:50:33 -05:00
z := obj.(*erasureServerPools)
xl := z.serverPools[0].sets[0]
2019-11-19 20:42:27 -05:00
objs = append(objs, xl)
}
erasureSets := &erasureSets{sets: objs, distributionAlgo: "CRCMOD"}
type testCaseType struct {
bucket string
object string
}
bucketName := "bucket"
testCases := []testCaseType{
{bucketName, "dir/obj1"},
{bucketName, "dir/obj2"},
{bucketName, "obj3"},
{bucketName, "obj_4"},
}
err := erasureSets.MakeBucketWithLocation(ctx, bucketName, BucketOptions{})
if err != nil {
t.Fatal(err)
}
for _, testCase := range testCases {
_, err = erasureSets.PutObject(ctx, testCase.bucket, testCase.object,
mustGetPutObjReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), ObjectOptions{})
if err != nil {
t.Fatalf("Erasure Object upload failed: <ERROR> %s", err)
}
}
toObjectNames := func(testCases []testCaseType) []ObjectToDelete {
names := make([]ObjectToDelete, len(testCases))
for i := range testCases {
names[i] = ObjectToDelete{ObjectName: testCases[i].object}
}
return names
}
objectNames := toObjectNames(testCases)
_, delErrs := erasureSets.DeleteObjects(ctx, bucketName, objectNames, ObjectOptions{})
for i := range delErrs {
if delErrs[i] != nil {
t.Errorf("Failed to remove object `%v` with the error: `%v`", objectNames[i], delErrs[i])
}
}
for _, test := range testCases {
_, statErr := erasureSets.GetObjectInfo(ctx, test.bucket, test.object, ObjectOptions{})
switch statErr.(type) {
case ObjectNotFound:
default:
t.Fatalf("Object %s is not removed", test.bucket+SlashSeparator+test.object)
}
}
}
func TestErasureDeleteObjectDiskNotFound(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
// Create an instance of xl backend.
obj, fsDirs, err := prepareErasure16(ctx)
if err != nil {
t.Fatal(err)
}
2019-11-19 20:42:27 -05:00
// Cleanup backend directories
defer obj.Shutdown(context.Background())
2019-11-19 20:42:27 -05:00
defer removeRoots(fsDirs)
2020-12-01 16:50:33 -05:00
z := obj.(*erasureServerPools)
xl := z.serverPools[0].sets[0]
// Create "bucket"
err = obj.MakeBucketWithLocation(ctx, "bucket", BucketOptions{})
if err != nil {
t.Fatal(err)
}
bucket := "bucket"
object := "object"
opts := ObjectOptions{}
// Create object "obj" under bucket "bucket".
_, err = obj.PutObject(ctx, bucket, object, mustGetPutObjReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), opts)
2016-09-02 02:10:50 -04:00
if err != nil {
t.Fatal(err)
}
// for a 16 disk setup, quorum is 9. To simulate disks not found yet
// quorum is available, we remove disks leaving quorum disks behind.
erasureDisks := xl.getDisks()
2020-12-01 16:50:33 -05:00
z.serverPools[0].erasureDisksMu.Lock()
2019-11-19 20:42:27 -05:00
xl.getDisks = func() []StorageAPI {
for i := range erasureDisks[:7] {
erasureDisks[i] = newNaughtyDisk(erasureDisks[i], nil, errFaultyDisk)
2019-11-19 20:42:27 -05:00
}
return erasureDisks
}
2020-12-01 16:50:33 -05:00
z.serverPools[0].erasureDisksMu.Unlock()
_, err = obj.DeleteObject(ctx, bucket, object, ObjectOptions{})
if err != nil {
t.Fatal(err)
}
// Create "obj" under "bucket".
_, err = obj.PutObject(ctx, bucket, object, mustGetPutObjReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), opts)
if err != nil {
t.Fatal(err)
}
// Remove one more disk to 'lose' quorum, by setting it to nil.
erasureDisks = xl.getDisks()
2020-12-01 16:50:33 -05:00
z.serverPools[0].erasureDisksMu.Lock()
2019-11-19 20:42:27 -05:00
xl.getDisks = func() []StorageAPI {
erasureDisks[7] = nil
erasureDisks[8] = nil
return erasureDisks
2019-11-19 20:42:27 -05:00
}
2020-12-01 16:50:33 -05:00
z.serverPools[0].erasureDisksMu.Unlock()
_, err = obj.DeleteObject(ctx, bucket, object, ObjectOptions{})
// since majority of disks are not available, metaquorum is not achieved and hence errErasureWriteQuorum error
if !errors.Is(err, errErasureWriteQuorum) {
t.Errorf("Expected deleteObject to fail with %v, but failed with %v", toObjectErr(errErasureWriteQuorum, bucket, object), err)
}
}
func TestGetObjectNoQuorum(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
// Create an instance of xl backend.
obj, fsDirs, err := prepareErasure16(ctx)
if err != nil {
t.Fatal(err)
}
2019-11-19 20:42:27 -05:00
// Cleanup backend directories.
defer obj.Shutdown(context.Background())
2019-11-19 20:42:27 -05:00
defer removeRoots(fsDirs)
2020-12-01 16:50:33 -05:00
z := obj.(*erasureServerPools)
xl := z.serverPools[0].sets[0]
// Create "bucket"
err = obj.MakeBucketWithLocation(ctx, "bucket", BucketOptions{})
if err != nil {
t.Fatal(err)
}
bucket := "bucket"
object := "object"
opts := ObjectOptions{}
// Test use case 1: All disks are online, xl.meta are present, but data are missing
_, err = obj.PutObject(ctx, bucket, object, mustGetPutObjReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), opts)
if err != nil {
t.Fatal(err)
}
for _, disk := range xl.getDisks() {
files, _ := disk.ListDir(ctx, bucket, object, -1)
for _, file := range files {
if file != "xl.meta" {
disk.Delete(ctx, bucket, pathJoin(object, file), true)
}
}
}
err = xl.GetObject(ctx, bucket, object, 0, int64(len("abcd")), ioutil.Discard, "", opts)
if err != toObjectErr(errFileNotFound, bucket, object) {
t.Errorf("Expected GetObject to fail with %v, but failed with %v", toObjectErr(errErasureWriteQuorum, bucket, object), err)
}
// Test use case 2: Make 9 disks offline, which leaves less than quorum number of disks
// in a 16 disk Erasure setup. The original disks are 'replaced' with
// naughtyDisks that fail after 'f' successful StorageAPI method
// invocations, where f - [0,2)
// Create "object" under "bucket".
_, err = obj.PutObject(ctx, bucket, object, mustGetPutObjReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), opts)
if err != nil {
t.Fatal(err)
}
for f := 0; f < 2; f++ {
diskErrors := make(map[int]error)
for i := 0; i <= f; i++ {
diskErrors[i] = nil
}
erasureDisks := xl.getDisks()
for i := range erasureDisks[:9] {
switch diskType := erasureDisks[i].(type) {
case *naughtyDisk:
erasureDisks[i] = newNaughtyDisk(diskType.disk, diskErrors, errFaultyDisk)
default:
erasureDisks[i] = newNaughtyDisk(erasureDisks[i], diskErrors, errFaultyDisk)
}
}
2020-12-01 16:50:33 -05:00
z.serverPools[0].erasureDisksMu.Lock()
2019-11-19 20:42:27 -05:00
xl.getDisks = func() []StorageAPI {
return erasureDisks
2019-11-19 20:42:27 -05:00
}
2020-12-01 16:50:33 -05:00
z.serverPools[0].erasureDisksMu.Unlock()
// Fetch object from store.
err = xl.GetObject(ctx, bucket, object, 0, int64(len("abcd")), ioutil.Discard, "", opts)
if err != toObjectErr(errErasureReadQuorum, bucket, object) {
t.Errorf("Expected GetObject to fail with %v, but failed with %v", toObjectErr(errErasureWriteQuorum, bucket, object), err)
}
}
}
func TestHeadObjectNoQuorum(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
// Create an instance of xl backend.
obj, fsDirs, err := prepareErasure16(ctx)
if err != nil {
t.Fatal(err)
}
// Cleanup backend directories.
defer obj.Shutdown(context.Background())
defer removeRoots(fsDirs)
z := obj.(*erasureServerPools)
xl := z.serverPools[0].sets[0]
// Create "bucket"
err = obj.MakeBucketWithLocation(ctx, "bucket", BucketOptions{})
if err != nil {
t.Fatal(err)
}
bucket := "bucket"
object := "object"
opts := ObjectOptions{}
// Test use case 1: All disks are online, xl.meta are present, but data are missing
_, err = obj.PutObject(ctx, bucket, object, mustGetPutObjReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), opts)
if err != nil {
t.Fatal(err)
}
for _, disk := range xl.getDisks() {
files, _ := disk.ListDir(ctx, bucket, object, -1)
for _, file := range files {
if file != "xl.meta" {
disk.Delete(ctx, bucket, pathJoin(object, file), true)
}
}
}
_, err = xl.GetObjectInfo(ctx, bucket, object, opts)
if err != nil {
t.Errorf("Expected StatObject to succeed if data dir are not found, but failed with %v", err)
}
// Test use case 2: Make 9 disks offline, which leaves less than quorum number of disks
// in a 16 disk Erasure setup. The original disks are 'replaced' with
// naughtyDisks that fail after 'f' successful StorageAPI method
// invocations, where f - [0,2)
// Create "object" under "bucket".
_, err = obj.PutObject(ctx, bucket, object, mustGetPutObjReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), opts)
if err != nil {
t.Fatal(err)
}
erasureDisks := xl.getDisks()
for i := range erasureDisks[:10] {
erasureDisks[i] = nil
}
z.serverPools[0].erasureDisksMu.Lock()
xl.getDisks = func() []StorageAPI {
return erasureDisks
}
z.serverPools[0].erasureDisksMu.Unlock()
// Fetch object from store.
_, err = xl.GetObjectInfo(ctx, bucket, object, opts)
if err != toObjectErr(errErasureReadQuorum, bucket, object) {
t.Errorf("Expected getObjectInfo to fail with %v, but failed with %v", toObjectErr(errErasureWriteQuorum, bucket, object), err)
}
}
func TestPutObjectNoQuorum(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
// Create an instance of xl backend.
obj, fsDirs, err := prepareErasure16(ctx)
if err != nil {
t.Fatal(err)
}
2019-11-19 20:42:27 -05:00
// Cleanup backend directories.
defer obj.Shutdown(context.Background())
2019-11-19 20:42:27 -05:00
defer removeRoots(fsDirs)
2020-12-01 16:50:33 -05:00
z := obj.(*erasureServerPools)
xl := z.serverPools[0].sets[0]
// Create "bucket"
err = obj.MakeBucketWithLocation(ctx, "bucket", BucketOptions{})
if err != nil {
t.Fatal(err)
}
bucket := "bucket"
object := "object"
opts := ObjectOptions{}
// Create "object" under "bucket".
_, err = obj.PutObject(ctx, bucket, object, mustGetPutObjReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), opts)
if err != nil {
t.Fatal(err)
}
// Make 9 disks offline, which leaves less than quorum number of disks
// in a 16 disk Erasure setup. The original disks are 'replaced' with
// naughtyDisks that fail after 'f' successful StorageAPI method
// invocations, where f - [0,3)
for f := 0; f < 3; f++ {
diskErrors := make(map[int]error)
for i := 0; i <= f; i++ {
diskErrors[i] = nil
}
erasureDisks := xl.getDisks()
for i := range erasureDisks[:9] {
switch diskType := erasureDisks[i].(type) {
case *naughtyDisk:
erasureDisks[i] = newNaughtyDisk(diskType.disk, diskErrors, errFaultyDisk)
default:
erasureDisks[i] = newNaughtyDisk(erasureDisks[i], diskErrors, errFaultyDisk)
}
}
2020-12-01 16:50:33 -05:00
z.serverPools[0].erasureDisksMu.Lock()
2019-11-19 20:42:27 -05:00
xl.getDisks = func() []StorageAPI {
return erasureDisks
2019-11-19 20:42:27 -05:00
}
2020-12-01 16:50:33 -05:00
z.serverPools[0].erasureDisksMu.Unlock()
// Upload new content to same object "object"
_, err = obj.PutObject(ctx, bucket, object, mustGetPutObjReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), opts)
if !errors.Is(err, errErasureWriteQuorum) {
t.Errorf("Expected putObject to fail with %v, but failed with %v", toObjectErr(errErasureWriteQuorum, bucket, object), err)
}
}
}
func TestObjectQuorumFromMeta(t *testing.T) {
ExecObjectLayerTestWithDirs(t, testObjectQuorumFromMeta)
}
func testObjectQuorumFromMeta(obj ObjectLayer, instanceType string, dirs []string, t TestErrHandler) {
restoreGlobalStorageClass := globalStorageClass
defer func() {
globalStorageClass = restoreGlobalStorageClass
}()
bucket := getRandomBucketName()
var opts ObjectOptions
// make data with more than one part
partCount := 3
2019-11-19 20:42:27 -05:00
data := bytes.Repeat([]byte("a"), 6*1024*1024*partCount)
2020-12-01 16:50:33 -05:00
z := obj.(*erasureServerPools)
xl := z.serverPools[0].sets[0]
erasureDisks := xl.getDisks()
ctx, cancel := context.WithCancel(GlobalContext)
defer cancel()
err := obj.MakeBucketWithLocation(ctx, bucket, BucketOptions{})
if err != nil {
t.Fatalf("Failed to make a bucket %v", err)
}
// Object for test case 1 - No StorageClass defined, no MetaData in PutObject
object1 := "object1"
_, err = obj.PutObject(ctx, bucket, object1, mustGetPutObjReader(t, bytes.NewReader(data), int64(len(data)), "", ""), opts)
if err != nil {
t.Fatalf("Failed to putObject %v", err)
}
parts1, errs1 := readAllFileInfo(ctx, erasureDisks, bucket, object1, "")
parts1SC := globalStorageClass
// Object for test case 2 - No StorageClass defined, MetaData in PutObject requesting RRS Class
object2 := "object2"
metadata2 := make(map[string]string)
metadata2["x-amz-storage-class"] = storageclass.RRS
_, err = obj.PutObject(ctx, bucket, object2, mustGetPutObjReader(t, bytes.NewReader(data), int64(len(data)), "", ""), ObjectOptions{UserDefined: metadata2})
if err != nil {
t.Fatalf("Failed to putObject %v", err)
}
parts2, errs2 := readAllFileInfo(ctx, erasureDisks, bucket, object2, "")
parts2SC := globalStorageClass
// Object for test case 3 - No StorageClass defined, MetaData in PutObject requesting Standard Storage Class
object3 := "object3"
metadata3 := make(map[string]string)
metadata3["x-amz-storage-class"] = storageclass.STANDARD
_, err = obj.PutObject(ctx, bucket, object3, mustGetPutObjReader(t, bytes.NewReader(data), int64(len(data)), "", ""), ObjectOptions{UserDefined: metadata3})
if err != nil {
t.Fatalf("Failed to putObject %v", err)
}
parts3, errs3 := readAllFileInfo(ctx, erasureDisks, bucket, object3, "")
parts3SC := globalStorageClass
// Object for test case 4 - Standard StorageClass defined as Parity 6, MetaData in PutObject requesting Standard Storage Class
object4 := "object4"
metadata4 := make(map[string]string)
metadata4["x-amz-storage-class"] = storageclass.STANDARD
globalStorageClass = storageclass.Config{
Standard: storageclass.StorageClass{
Parity: 6,
},
}
_, err = obj.PutObject(ctx, bucket, object4, mustGetPutObjReader(t, bytes.NewReader(data), int64(len(data)), "", ""), ObjectOptions{UserDefined: metadata4})
if err != nil {
t.Fatalf("Failed to putObject %v", err)
}
parts4, errs4 := readAllFileInfo(ctx, erasureDisks, bucket, object4, "")
parts4SC := storageclass.Config{
Standard: storageclass.StorageClass{
Parity: 6,
},
}
// Object for test case 5 - RRS StorageClass defined as Parity 2, MetaData in PutObject requesting RRS Class
// Reset global storage class flags
object5 := "object5"
metadata5 := make(map[string]string)
metadata5["x-amz-storage-class"] = storageclass.RRS
globalStorageClass = storageclass.Config{
RRS: storageclass.StorageClass{
Parity: 2,
},
}
_, err = obj.PutObject(ctx, bucket, object5, mustGetPutObjReader(t, bytes.NewReader(data), int64(len(data)), "", ""), ObjectOptions{UserDefined: metadata5})
if err != nil {
t.Fatalf("Failed to putObject %v", err)
}
parts5, errs5 := readAllFileInfo(ctx, erasureDisks, bucket, object5, "")
parts5SC := storageclass.Config{
RRS: storageclass.StorageClass{
Parity: 2,
},
}
// Object for test case 6 - RRS StorageClass defined as Parity 2, MetaData in PutObject requesting Standard Storage Class
object6 := "object6"
metadata6 := make(map[string]string)
metadata6["x-amz-storage-class"] = storageclass.STANDARD
globalStorageClass = storageclass.Config{
RRS: storageclass.StorageClass{
Parity: 2,
},
}
_, err = obj.PutObject(ctx, bucket, object6, mustGetPutObjReader(t, bytes.NewReader(data), int64(len(data)), "", ""), ObjectOptions{UserDefined: metadata6})
if err != nil {
t.Fatalf("Failed to putObject %v", err)
}
parts6, errs6 := readAllFileInfo(ctx, erasureDisks, bucket, object6, "")
parts6SC := storageclass.Config{
RRS: storageclass.StorageClass{
Parity: 2,
},
}
// Object for test case 7 - Standard StorageClass defined as Parity 5, MetaData in PutObject requesting RRS Class
// Reset global storage class flags
object7 := "object7"
metadata7 := make(map[string]string)
metadata7["x-amz-storage-class"] = storageclass.RRS
globalStorageClass = storageclass.Config{
Standard: storageclass.StorageClass{
Parity: 5,
},
}
_, err = obj.PutObject(ctx, bucket, object7, mustGetPutObjReader(t, bytes.NewReader(data), int64(len(data)), "", ""), ObjectOptions{UserDefined: metadata7})
if err != nil {
t.Fatalf("Failed to putObject %v", err)
}
parts7, errs7 := readAllFileInfo(ctx, erasureDisks, bucket, object7, "")
parts7SC := storageclass.Config{
Standard: storageclass.StorageClass{
Parity: 5,
},
}
tests := []struct {
parts []FileInfo
errs []error
expectedReadQuorum int
expectedWriteQuorum int
storageClassCfg storageclass.Config
expectedError error
}{
{parts1, errs1, 8, 9, parts1SC, nil},
{parts2, errs2, 14, 14, parts2SC, nil},
{parts3, errs3, 8, 9, parts3SC, nil},
{parts4, errs4, 10, 10, parts4SC, nil},
{parts5, errs5, 14, 14, parts5SC, nil},
{parts6, errs6, 8, 9, parts6SC, nil},
{parts7, errs7, 14, 14, parts7SC, nil},
}
for _, tt := range tests {
tt := tt
t.(*testing.T).Run("", func(t *testing.T) {
globalStorageClass = tt.storageClassCfg
actualReadQuorum, actualWriteQuorum, err := objectQuorumFromMeta(ctx, *xl, tt.parts, tt.errs)
if tt.expectedError != nil && err == nil {
t.Errorf("Expected %s, got %s", tt.expectedError, err)
}
if tt.expectedError == nil && err != nil {
t.Errorf("Expected %s, got %s", tt.expectedError, err)
}
if tt.expectedReadQuorum != actualReadQuorum {
t.Errorf("Expected Read Quorum %d, got %d", tt.expectedReadQuorum, actualReadQuorum)
}
if tt.expectedWriteQuorum != actualWriteQuorum {
t.Errorf("Expected Write Quorum %d, got %d", tt.expectedWriteQuorum, actualWriteQuorum)
}
})
}
}