mirror of
https://github.com/minio/minio.git
synced 2025-11-09 21:49:46 -05:00
This change provides new implementations of the XL backend operations: - create file - read file - heal file Further this change adds table based tests for all three operations. This affects also the bitrot algorithm integration. Algorithms are now integrated in an idiomatic way (like crypto.Hash). Fixes #4696 Fixes #4649 Fixes #4359
This commit is contained in:
committed by
Dee Koder
parent
617f2394fb
commit
85fcee1919
@@ -19,111 +19,124 @@ package cmd
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/rand"
|
||||
"os"
|
||||
"path"
|
||||
"io"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
humanize "github.com/dustin/go-humanize"
|
||||
)
|
||||
|
||||
// Test erasureHealFile()
|
||||
var erasureHealFileTests = []struct {
|
||||
dataBlocks int
|
||||
disks, offDisks, badDisks, badOffDisks int
|
||||
blocksize, size int64
|
||||
algorithm BitrotAlgorithm
|
||||
shouldFail bool
|
||||
shouldFailQuorum bool
|
||||
}{
|
||||
{dataBlocks: 2, disks: 4, offDisks: 1, badDisks: 0, badOffDisks: 0, blocksize: int64(blockSizeV1), size: oneMiByte, algorithm: SHA256, shouldFail: false, shouldFailQuorum: false}, // 0
|
||||
{dataBlocks: 3, disks: 6, offDisks: 2, badDisks: 0, badOffDisks: 0, blocksize: int64(blockSizeV1), size: oneMiByte, algorithm: BLAKE2b512, shouldFail: false, shouldFailQuorum: false}, // 1
|
||||
{dataBlocks: 4, disks: 8, offDisks: 2, badDisks: 1, badOffDisks: 0, blocksize: int64(blockSizeV1), size: oneMiByte, algorithm: BLAKE2b512, shouldFail: false, shouldFailQuorum: false}, // 2
|
||||
{dataBlocks: 5, disks: 10, offDisks: 3, badDisks: 1, badOffDisks: 0, blocksize: int64(blockSizeV1), size: oneMiByte, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: false}, // 3
|
||||
{dataBlocks: 6, disks: 12, offDisks: 2, badDisks: 3, badOffDisks: 0, blocksize: int64(blockSizeV1), size: oneMiByte, algorithm: SHA256, shouldFail: false, shouldFailQuorum: false}, // 4
|
||||
{dataBlocks: 7, disks: 14, offDisks: 4, badDisks: 1, badOffDisks: 0, blocksize: int64(blockSizeV1), size: oneMiByte, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: false}, // 5
|
||||
{dataBlocks: 8, disks: 16, offDisks: 6, badDisks: 1, badOffDisks: 1, blocksize: int64(blockSizeV1), size: oneMiByte, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: true}, // 6
|
||||
{dataBlocks: 7, disks: 14, offDisks: 2, badDisks: 3, badOffDisks: 0, blocksize: int64(oneMiByte / 2), size: oneMiByte, algorithm: BLAKE2b512, shouldFail: true, shouldFailQuorum: false}, // 7
|
||||
{dataBlocks: 6, disks: 12, offDisks: 1, badDisks: 0, badOffDisks: 1, blocksize: int64(oneMiByte - 1), size: oneMiByte, algorithm: DefaultBitrotAlgorithm, shouldFail: true, shouldFailQuorum: false}, // 8
|
||||
{dataBlocks: 5, disks: 10, offDisks: 3, badDisks: 0, badOffDisks: 3, blocksize: int64(oneMiByte / 2), size: oneMiByte, algorithm: SHA256, shouldFail: true, shouldFailQuorum: false}, // 9
|
||||
{dataBlocks: 4, disks: 8, offDisks: 1, badDisks: 1, badOffDisks: 0, blocksize: int64(blockSizeV1), size: oneMiByte, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: false}, // 10
|
||||
{dataBlocks: 2, disks: 4, offDisks: 1, badDisks: 0, badOffDisks: 1, blocksize: int64(blockSizeV1), size: oneMiByte, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: true}, // 11
|
||||
{dataBlocks: 6, disks: 12, offDisks: 8, badDisks: 3, badOffDisks: 0, blocksize: int64(blockSizeV1), size: oneMiByte, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: true}, // 12
|
||||
{dataBlocks: 7, disks: 14, offDisks: 3, badDisks: 4, badOffDisks: 0, blocksize: int64(blockSizeV1), size: oneMiByte, algorithm: BLAKE2b512, shouldFail: false, shouldFailQuorum: false}, // 13
|
||||
{dataBlocks: 7, disks: 14, offDisks: 6, badDisks: 1, badOffDisks: 0, blocksize: int64(blockSizeV1), size: oneMiByte, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: false}, // 14
|
||||
{dataBlocks: 8, disks: 16, offDisks: 4, badDisks: 5, badOffDisks: 0, blocksize: int64(blockSizeV1), size: oneMiByte, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: true}, // 15
|
||||
{dataBlocks: 2, disks: 4, offDisks: 0, badDisks: 0, badOffDisks: 0, blocksize: int64(blockSizeV1), size: oneMiByte, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: false}, // 16
|
||||
{dataBlocks: 2, disks: 4, offDisks: 0, badDisks: 0, badOffDisks: 0, blocksize: int64(blockSizeV1), size: oneMiByte, algorithm: 0, shouldFail: true, shouldFailQuorum: false}, // 17
|
||||
{dataBlocks: 12, disks: 16, offDisks: 2, badDisks: 1, badOffDisks: 0, blocksize: int64(blockSizeV1), size: oneMiByte, algorithm: DefaultBitrotAlgorithm, shouldFail: false, shouldFailQuorum: false}, // 18
|
||||
{dataBlocks: 6, disks: 8, offDisks: 1, badDisks: 0, badOffDisks: 0, blocksize: int64(blockSizeV1), size: oneMiByte, algorithm: BLAKE2b512, shouldFail: false, shouldFailQuorum: false}, // 19
|
||||
{dataBlocks: 7, disks: 10, offDisks: 1, badDisks: 0, badOffDisks: 0, blocksize: int64(blockSizeV1), size: oneMiByte, algorithm: 0, shouldFail: true, shouldFailQuorum: false}, // 20
|
||||
}
|
||||
|
||||
func TestErasureHealFile(t *testing.T) {
|
||||
// Initialize environment needed for the test.
|
||||
dataBlocks := 7
|
||||
parityBlocks := 7
|
||||
blockSize := int64(blockSizeV1)
|
||||
setup, err := newErasureTestSetup(dataBlocks, parityBlocks, blockSize)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
defer setup.Remove()
|
||||
|
||||
disks := setup.disks
|
||||
|
||||
// Prepare a slice of 1MiB with random data.
|
||||
data := make([]byte, 1*humanize.MiByte)
|
||||
_, err = rand.Read(data)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// Create a test file.
|
||||
_, size, checkSums, err := erasureCreateFile(disks, "testbucket", "testobject1", bytes.NewReader(data), true, blockSize, dataBlocks, parityBlocks, bitRotAlgo, dataBlocks+1)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if size != int64(len(data)) {
|
||||
t.Errorf("erasureCreateFile returned %d, expected %d", size, len(data))
|
||||
}
|
||||
|
||||
latest := make([]StorageAPI, len(disks)) // Slice of latest disks
|
||||
outDated := make([]StorageAPI, len(disks)) // Slice of outdated disks
|
||||
|
||||
// Test case when one part needs to be healed.
|
||||
dataPath := path.Join(setup.diskPaths[0], "testbucket", "testobject1")
|
||||
err = os.Remove(dataPath)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
copy(latest, disks)
|
||||
latest[0] = nil
|
||||
outDated[0] = disks[0]
|
||||
|
||||
healCheckSums, err := erasureHealFile(latest, outDated, "testbucket", "testobject1", "testbucket", "testobject1", 1*humanize.MiByte, blockSize, dataBlocks, parityBlocks, bitRotAlgo)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// Checksum of the healed file should match.
|
||||
if checkSums[0] != healCheckSums[0] {
|
||||
t.Error("Healing failed, data does not match.")
|
||||
}
|
||||
|
||||
// Test case when parityBlocks number of disks need to be healed.
|
||||
// Should succeed.
|
||||
copy(latest, disks)
|
||||
for index := 0; index < parityBlocks; index++ {
|
||||
dataPath := path.Join(setup.diskPaths[index], "testbucket", "testobject1")
|
||||
err = os.Remove(dataPath)
|
||||
for i, test := range erasureHealFileTests {
|
||||
setup, err := newErasureTestSetup(test.dataBlocks, test.disks-test.dataBlocks, test.blocksize)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
t.Fatalf("Test %d: failed to setup XL environment: %v", i, err)
|
||||
}
|
||||
|
||||
latest[index] = nil
|
||||
outDated[index] = disks[index]
|
||||
}
|
||||
|
||||
healCheckSums, err = erasureHealFile(latest, outDated, "testbucket", "testobject1", "testbucket", "testobject1", 1*humanize.MiByte, blockSize, dataBlocks, parityBlocks, bitRotAlgo)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Checksums of the healed files should match.
|
||||
for index := 0; index < parityBlocks; index++ {
|
||||
if checkSums[index] != healCheckSums[index] {
|
||||
t.Error("Healing failed, data does not match.")
|
||||
}
|
||||
}
|
||||
for index := dataBlocks; index < len(disks); index++ {
|
||||
if healCheckSums[index] != "" {
|
||||
t.Errorf("expected healCheckSums[%d] to be empty", index)
|
||||
}
|
||||
}
|
||||
|
||||
// Test case when parityBlocks+1 number of disks need to be healed.
|
||||
// Should fail.
|
||||
copy(latest, disks)
|
||||
for index := 0; index < parityBlocks+1; index++ {
|
||||
dataPath := path.Join(setup.diskPaths[index], "testbucket", "testobject1")
|
||||
err = os.Remove(dataPath)
|
||||
storage, err := NewErasureStorage(setup.disks, test.dataBlocks, test.disks-test.dataBlocks)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
setup.Remove()
|
||||
t.Fatalf("Test %d: failed to create ErasureStorage: %v", i, err)
|
||||
}
|
||||
offline := make([]StorageAPI, len(storage.disks))
|
||||
copy(offline, storage.disks)
|
||||
|
||||
data := make([]byte, test.size)
|
||||
if _, err = io.ReadFull(rand.Reader, data); err != nil {
|
||||
setup.Remove()
|
||||
t.Fatalf("Test %d: failed to create random test data: %v", i, err)
|
||||
}
|
||||
|
||||
latest[index] = nil
|
||||
outDated[index] = disks[index]
|
||||
}
|
||||
_, err = erasureHealFile(latest, outDated, "testbucket", "testobject1", "testbucket", "testobject1", 1*humanize.MiByte, blockSize, dataBlocks, parityBlocks, bitRotAlgo)
|
||||
if err == nil {
|
||||
t.Error("Expected erasureHealFile() to fail when the number of available disks <= parityBlocks")
|
||||
algorithm := test.algorithm
|
||||
if !algorithm.Available() {
|
||||
algorithm = DefaultBitrotAlgorithm
|
||||
}
|
||||
buffer := make([]byte, test.blocksize, 2*test.blocksize)
|
||||
file, err := storage.CreateFile(bytes.NewReader(data), "testbucket", "testobject", buffer, algorithm, test.dataBlocks+1)
|
||||
if err != nil {
|
||||
setup.Remove()
|
||||
t.Fatalf("Test %d: failed to create random test data: %v", i, err)
|
||||
}
|
||||
|
||||
info, err := storage.HealFile(offline, "testbucket", "testobject", test.blocksize, "testbucket", "healedobject", test.size, test.algorithm, file.Checksums)
|
||||
if err != nil && !test.shouldFail {
|
||||
t.Errorf("Test %d: should pass but it failed with: %v", i, err)
|
||||
}
|
||||
if err == nil && test.shouldFail {
|
||||
t.Errorf("Test %d: should fail but it passed", i)
|
||||
}
|
||||
if err == nil {
|
||||
if info.Size != test.size {
|
||||
t.Errorf("Test %d: healed wrong number of bytes: got: #%d want: #%d", i, info.Size, test.size)
|
||||
}
|
||||
if info.Algorithm != test.algorithm {
|
||||
t.Errorf("Test %d: healed with wrong algorithm: got: %v want: %v", i, info.Algorithm, test.algorithm)
|
||||
}
|
||||
if !reflect.DeepEqual(info.Checksums, file.Checksums) {
|
||||
t.Errorf("Test %d: heal returned different bitrot keys", i)
|
||||
}
|
||||
}
|
||||
if err == nil && !test.shouldFail {
|
||||
for j := 0; j < len(storage.disks); j++ {
|
||||
if j < test.offDisks {
|
||||
storage.disks[j] = OfflineDisk
|
||||
} else {
|
||||
offline[j] = OfflineDisk
|
||||
}
|
||||
}
|
||||
for j := 0; j < test.badDisks; j++ {
|
||||
storage.disks[test.offDisks+j] = badDisk{nil}
|
||||
}
|
||||
for j := 0; j < test.badOffDisks; j++ {
|
||||
offline[j] = badDisk{nil}
|
||||
}
|
||||
info, err := storage.HealFile(offline, "testbucket", "testobject", test.blocksize, "testbucket", "healedobject", test.size, test.algorithm, file.Checksums)
|
||||
if err != nil && !test.shouldFailQuorum {
|
||||
t.Errorf("Test %d: should pass but it failed with: %v", i, err)
|
||||
}
|
||||
if err == nil && test.shouldFailQuorum {
|
||||
t.Errorf("Test %d: should fail but it passed", i)
|
||||
}
|
||||
if err == nil {
|
||||
if info.Size != test.size {
|
||||
t.Errorf("Test %d: healed wrong number of bytes: got: #%d want: #%d", i, info.Size, test.size)
|
||||
}
|
||||
if info.Algorithm != test.algorithm {
|
||||
t.Errorf("Test %d: healed with wrong algorithm: got: %v want: %v", i, info.Algorithm, test.algorithm)
|
||||
}
|
||||
if !reflect.DeepEqual(info.Checksums, file.Checksums) {
|
||||
t.Errorf("Test %d: heal returned different bitrot checksums", i)
|
||||
}
|
||||
}
|
||||
}
|
||||
setup.Remove()
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user