2016-07-05 14:41:25 -04:00
|
|
|
/*
|
2017-03-18 14:28:41 -04:00
|
|
|
* Minio Cloud Storage, (C) 2016, 2017 Minio, Inc.
|
2016-07-05 14:41:25 -04:00
|
|
|
*
|
|
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
* you may not use this file except in compliance with the License.
|
|
|
|
* You may obtain a copy of the License at
|
|
|
|
*
|
|
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
*
|
|
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
* See the License for the specific language governing permissions and
|
|
|
|
* limitations under the License.
|
|
|
|
*/
|
|
|
|
|
2016-08-18 19:23:42 -04:00
|
|
|
package cmd
|
2016-07-05 14:41:25 -04:00
|
|
|
|
2016-07-19 02:56:16 -04:00
|
|
|
import (
|
|
|
|
"bytes"
|
2016-07-20 04:30:30 -04:00
|
|
|
"math/rand"
|
2016-07-19 02:56:16 -04:00
|
|
|
"testing"
|
2016-07-25 17:17:01 -04:00
|
|
|
|
2016-11-22 21:18:22 -05:00
|
|
|
"reflect"
|
|
|
|
|
|
|
|
humanize "github.com/dustin/go-humanize"
|
2016-07-25 17:17:01 -04:00
|
|
|
"github.com/minio/minio/pkg/bpool"
|
2016-07-19 02:56:16 -04:00
|
|
|
)
|
2016-07-05 14:41:25 -04:00
|
|
|
|
|
|
|
// Tests getReadDisks which returns readable disks slice from which we can
|
|
|
|
// read parallelly.
|
2016-11-16 19:42:23 -05:00
|
|
|
func testGetReadDisks(t *testing.T, xl *xlObjects) {
|
2016-07-05 14:41:25 -04:00
|
|
|
d := xl.storageDisks
|
|
|
|
testCases := []struct {
|
|
|
|
index int // index argument for getReadDisks
|
|
|
|
argDisks []StorageAPI // disks argument for getReadDisks
|
|
|
|
retDisks []StorageAPI // disks return value from getReadDisks
|
|
|
|
nextIndex int // return value from getReadDisks
|
|
|
|
err error // error return value from getReadDisks
|
|
|
|
}{
|
2016-08-15 04:25:41 -04:00
|
|
|
// Test case - 1.
|
|
|
|
// When all disks are available, should return data disks.
|
2016-07-05 14:41:25 -04:00
|
|
|
{
|
|
|
|
0,
|
|
|
|
[]StorageAPI{d[0], d[1], d[2], d[3], d[4], d[5], d[6], d[7], d[8], d[9], d[10], d[11], d[12], d[13], d[14], d[15]},
|
|
|
|
[]StorageAPI{d[0], d[1], d[2], d[3], d[4], d[5], d[6], d[7], nil, nil, nil, nil, nil, nil, nil, nil},
|
|
|
|
8,
|
|
|
|
nil,
|
|
|
|
},
|
2016-08-15 04:25:41 -04:00
|
|
|
// Test case - 2.
|
|
|
|
// If a parity disk is down, should return all data disks.
|
2016-07-05 14:41:25 -04:00
|
|
|
{
|
|
|
|
0,
|
|
|
|
[]StorageAPI{d[0], d[1], d[2], d[3], d[4], d[5], d[6], d[7], d[8], nil, d[10], d[11], d[12], d[13], d[14], d[15]},
|
|
|
|
[]StorageAPI{d[0], d[1], d[2], d[3], d[4], d[5], d[6], d[7], nil, nil, nil, nil, nil, nil, nil, nil},
|
|
|
|
8,
|
|
|
|
nil,
|
|
|
|
},
|
2016-08-15 04:25:41 -04:00
|
|
|
// Test case - 3.
|
|
|
|
// If a data disk is down, should return 7 data and 1 parity.
|
2016-07-05 14:41:25 -04:00
|
|
|
{
|
|
|
|
0,
|
|
|
|
[]StorageAPI{nil, d[1], d[2], d[3], d[4], d[5], d[6], d[7], d[8], d[9], d[10], d[11], d[12], d[13], d[14], d[15]},
|
2016-08-15 04:25:41 -04:00
|
|
|
[]StorageAPI{nil, d[1], d[2], d[3], d[4], d[5], d[6], d[7], d[8], nil, nil, nil, nil, nil, nil, nil},
|
|
|
|
9,
|
2016-07-05 14:41:25 -04:00
|
|
|
nil,
|
|
|
|
},
|
2016-08-15 04:25:41 -04:00
|
|
|
// Test case - 4.
|
|
|
|
// If 7 data disks are down, should return 1 data and 7 parity.
|
2016-07-05 14:41:25 -04:00
|
|
|
{
|
|
|
|
0,
|
|
|
|
[]StorageAPI{nil, nil, nil, nil, nil, nil, nil, d[7], d[8], d[9], d[10], d[11], d[12], d[13], d[14], d[15]},
|
2016-08-15 04:25:41 -04:00
|
|
|
[]StorageAPI{nil, nil, nil, nil, nil, nil, nil, d[7], d[8], d[9], d[10], d[11], d[12], d[13], d[14], nil},
|
|
|
|
15,
|
2016-07-05 14:41:25 -04:00
|
|
|
nil,
|
|
|
|
},
|
2016-08-15 04:25:41 -04:00
|
|
|
// Test case - 5.
|
|
|
|
// When 2 disks fail during parallelRead, next call to getReadDisks should return 3 disks
|
2016-07-05 14:41:25 -04:00
|
|
|
{
|
|
|
|
8,
|
|
|
|
[]StorageAPI{nil, nil, d[2], d[3], d[4], d[5], d[6], d[7], d[8], d[9], d[10], d[11], d[12], d[13], d[14], d[15]},
|
2016-08-15 04:25:41 -04:00
|
|
|
[]StorageAPI{nil, nil, nil, nil, nil, nil, nil, nil, d[8], d[9], nil, nil, nil, nil, nil, nil},
|
|
|
|
10,
|
2016-07-05 14:41:25 -04:00
|
|
|
nil,
|
|
|
|
},
|
2016-08-15 04:25:41 -04:00
|
|
|
// Test case - 6.
|
|
|
|
// If 2 disks again fail from the 3 disks returned previously, return next 2 disks
|
2016-07-05 14:41:25 -04:00
|
|
|
{
|
|
|
|
11,
|
|
|
|
[]StorageAPI{nil, nil, d[2], d[3], d[4], d[5], d[6], d[7], nil, nil, d[10], d[11], d[12], d[13], d[14], d[15]},
|
2016-08-15 04:25:41 -04:00
|
|
|
[]StorageAPI{nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, d[11], nil, nil, nil, nil},
|
|
|
|
12,
|
2016-07-05 14:41:25 -04:00
|
|
|
nil,
|
|
|
|
},
|
2016-08-15 04:25:41 -04:00
|
|
|
// Test case - 7.
|
|
|
|
// No more disks are available for read, return error
|
2016-07-05 14:41:25 -04:00
|
|
|
{
|
|
|
|
13,
|
|
|
|
[]StorageAPI{nil, nil, d[2], d[3], d[4], d[5], d[6], d[7], nil, nil, d[10], nil, nil, nil, nil, nil},
|
|
|
|
nil,
|
|
|
|
0,
|
|
|
|
errXLReadQuorum,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
for i, test := range testCases {
|
|
|
|
disks, nextIndex, err := getReadDisks(test.argDisks, test.index, xl.dataBlocks)
|
2016-08-25 12:39:01 -04:00
|
|
|
if errorCause(err) != test.err {
|
2016-07-05 14:41:25 -04:00
|
|
|
t.Errorf("test-case %d - expected error : %s, got : %s", i+1, test.err, err)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if test.nextIndex != nextIndex {
|
|
|
|
t.Errorf("test-case %d - expected nextIndex: %d, got : %d", i+1, test.nextIndex, nextIndex)
|
|
|
|
continue
|
|
|
|
}
|
2016-08-16 10:57:14 -04:00
|
|
|
if !reflect.DeepEqual(test.retDisks, disks) {
|
2016-08-15 04:25:41 -04:00
|
|
|
t.Errorf("test-case %d : incorrect disks returned. expected %+v, got %+v", i+1, test.retDisks, disks)
|
2016-07-05 14:41:25 -04:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Test for isSuccessDataBlocks and isSuccessDecodeBlocks.
|
|
|
|
func TestIsSuccessBlocks(t *testing.T) {
|
|
|
|
dataBlocks := 8
|
|
|
|
testCases := []struct {
|
|
|
|
enBlocks [][]byte // data and parity blocks.
|
|
|
|
successData bool // expected return value of isSuccessDataBlocks()
|
|
|
|
successDecode bool // expected return value of isSuccessDecodeBlocks()
|
|
|
|
}{
|
|
|
|
{
|
|
|
|
// When all data and partity blocks are available.
|
|
|
|
[][]byte{
|
2016-07-08 14:05:08 -04:00
|
|
|
{'a'}, {'a'}, {'a'}, {'a'}, {'a'}, {'a'}, {'a'}, {'a'},
|
|
|
|
{'a'}, {'a'}, {'a'}, {'a'}, {'a'}, {'a'}, {'a'}, {'a'},
|
2016-07-05 14:41:25 -04:00
|
|
|
},
|
|
|
|
true,
|
|
|
|
true,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
// When one data block is not available.
|
|
|
|
[][]byte{
|
2016-07-08 14:05:08 -04:00
|
|
|
nil, {'a'}, {'a'}, {'a'}, {'a'}, {'a'}, {'a'}, {'a'},
|
|
|
|
{'a'}, {'a'}, {'a'}, {'a'}, {'a'}, {'a'}, {'a'}, {'a'},
|
2016-07-05 14:41:25 -04:00
|
|
|
},
|
|
|
|
false,
|
|
|
|
true,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
// When one data and all parity are available, enough for reedsolomon.Reconstruct()
|
|
|
|
[][]byte{
|
2016-07-08 14:05:08 -04:00
|
|
|
nil, nil, nil, nil, nil, nil, nil, {'a'},
|
|
|
|
{'a'}, {'a'}, {'a'}, {'a'}, {'a'}, {'a'}, {'a'}, {'a'},
|
2016-07-05 14:41:25 -04:00
|
|
|
},
|
|
|
|
false,
|
|
|
|
true,
|
|
|
|
},
|
|
|
|
{
|
2016-08-15 04:25:41 -04:00
|
|
|
// When all data disks are not available, enough for reedsolomon.Reconstruct()
|
2016-07-05 14:41:25 -04:00
|
|
|
[][]byte{
|
|
|
|
nil, nil, nil, nil, nil, nil, nil, nil,
|
2016-07-08 14:05:08 -04:00
|
|
|
{'a'}, {'a'}, {'a'}, {'a'}, {'a'}, {'a'}, {'a'}, {'a'},
|
2016-07-05 14:41:25 -04:00
|
|
|
},
|
|
|
|
false,
|
2016-08-15 04:25:41 -04:00
|
|
|
true,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
// Not enough disks for reedsolomon.Reconstruct()
|
|
|
|
[][]byte{
|
|
|
|
nil, nil, nil, nil, nil, nil, nil, nil,
|
|
|
|
nil, {'a'}, {'a'}, {'a'}, {'a'}, {'a'}, {'a'}, {'a'},
|
|
|
|
},
|
|
|
|
false,
|
2016-07-05 14:41:25 -04:00
|
|
|
false,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
for i, test := range testCases {
|
|
|
|
got := isSuccessDataBlocks(test.enBlocks, dataBlocks)
|
|
|
|
if test.successData != got {
|
|
|
|
t.Errorf("test-case %d : expected %v got %v", i+1, test.successData, got)
|
|
|
|
}
|
|
|
|
got = isSuccessDecodeBlocks(test.enBlocks, dataBlocks)
|
|
|
|
if test.successDecode != got {
|
|
|
|
t.Errorf("test-case %d : expected %v got %v", i+1, test.successDecode, got)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-02-24 12:20:40 -05:00
|
|
|
// Wrapper function for testGetReadDisks, testShuffleDisks.
|
2016-07-19 02:56:16 -04:00
|
|
|
func TestErasureReadUtils(t *testing.T) {
|
2016-08-30 22:22:27 -04:00
|
|
|
nDisks := 16
|
|
|
|
disks, err := getRandomDisks(nDisks)
|
2016-07-05 14:41:25 -04:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2017-04-11 18:44:27 -04:00
|
|
|
objLayer, _, err := initObjectLayer(mustGetNewEndpointList(disks...))
|
2016-08-30 22:22:27 -04:00
|
|
|
if err != nil {
|
2016-10-05 15:48:07 -04:00
|
|
|
removeRoots(disks)
|
2016-08-30 22:22:27 -04:00
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
defer removeRoots(disks)
|
2016-11-16 19:42:23 -05:00
|
|
|
xl := objLayer.(*xlObjects)
|
2016-07-05 14:41:25 -04:00
|
|
|
testGetReadDisks(t, xl)
|
|
|
|
}
|
2016-07-19 02:56:16 -04:00
|
|
|
|
|
|
|
// Simulates a faulty disk for ReadFile()
|
|
|
|
type ReadDiskDown struct {
|
|
|
|
*posix
|
|
|
|
}
|
|
|
|
|
|
|
|
func (r ReadDiskDown) ReadFile(volume string, path string, offset int64, buf []byte) (n int64, err error) {
|
|
|
|
return 0, errFaultyDisk
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestErasureReadFileDiskFail(t *testing.T) {
|
|
|
|
// Initialize environment needed for the test.
|
|
|
|
dataBlocks := 7
|
|
|
|
parityBlocks := 7
|
|
|
|
blockSize := int64(blockSizeV1)
|
|
|
|
setup, err := newErasureTestSetup(dataBlocks, parityBlocks, blockSize)
|
|
|
|
if err != nil {
|
|
|
|
t.Error(err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
defer setup.Remove()
|
|
|
|
|
|
|
|
disks := setup.disks
|
|
|
|
|
2016-11-22 21:18:22 -05:00
|
|
|
// Prepare a slice of 1humanize.MiByte with random data.
|
|
|
|
data := make([]byte, 1*humanize.MiByte)
|
2016-07-19 02:56:16 -04:00
|
|
|
length := int64(len(data))
|
|
|
|
_, err = rand.Read(data)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Create a test file to read from.
|
2017-01-30 18:44:42 -05:00
|
|
|
size, checkSums, err := erasureCreateFile(disks, "testbucket", "testobject", bytes.NewReader(data), true, blockSize, dataBlocks, parityBlocks, bitRotAlgo, dataBlocks+1)
|
2016-07-19 02:56:16 -04:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
if size != length {
|
|
|
|
t.Errorf("erasureCreateFile returned %d, expected %d", size, length)
|
|
|
|
}
|
|
|
|
|
2016-07-25 17:17:01 -04:00
|
|
|
// create byte pool which will be used by erasureReadFile for
|
|
|
|
// reading from disks and erasure decoding.
|
|
|
|
chunkSize := getChunkSize(blockSize, dataBlocks)
|
|
|
|
pool := bpool.NewBytePool(chunkSize, len(disks))
|
|
|
|
|
2016-07-19 02:56:16 -04:00
|
|
|
buf := &bytes.Buffer{}
|
2016-08-12 03:26:30 -04:00
|
|
|
_, err = erasureReadFile(buf, disks, "testbucket", "testobject", 0, length, length, blockSize, dataBlocks, parityBlocks, checkSums, bitRotAlgo, pool)
|
2016-07-19 02:56:16 -04:00
|
|
|
if err != nil {
|
|
|
|
t.Error(err)
|
|
|
|
}
|
|
|
|
if !bytes.Equal(buf.Bytes(), data) {
|
|
|
|
t.Error("Contents of the erasure coded file differs")
|
|
|
|
}
|
|
|
|
|
|
|
|
// 2 disks down. Read should succeed.
|
|
|
|
disks[4] = ReadDiskDown{disks[4].(*posix)}
|
|
|
|
disks[5] = ReadDiskDown{disks[5].(*posix)}
|
|
|
|
|
|
|
|
buf.Reset()
|
2016-08-12 03:26:30 -04:00
|
|
|
_, err = erasureReadFile(buf, disks, "testbucket", "testobject", 0, length, length, blockSize, dataBlocks, parityBlocks, checkSums, bitRotAlgo, pool)
|
2016-07-19 02:56:16 -04:00
|
|
|
if err != nil {
|
|
|
|
t.Error(err)
|
|
|
|
}
|
|
|
|
if !bytes.Equal(buf.Bytes(), data) {
|
|
|
|
t.Error("Contents of the erasure coded file differs")
|
|
|
|
}
|
|
|
|
|
|
|
|
// 4 more disks down. 6 disks down in total. Read should succeed.
|
|
|
|
disks[6] = ReadDiskDown{disks[6].(*posix)}
|
|
|
|
disks[8] = ReadDiskDown{disks[8].(*posix)}
|
|
|
|
disks[9] = ReadDiskDown{disks[9].(*posix)}
|
|
|
|
disks[11] = ReadDiskDown{disks[11].(*posix)}
|
|
|
|
|
|
|
|
buf.Reset()
|
2016-08-12 03:26:30 -04:00
|
|
|
_, err = erasureReadFile(buf, disks, "testbucket", "testobject", 0, length, length, blockSize, dataBlocks, parityBlocks, checkSums, bitRotAlgo, pool)
|
2016-07-19 02:56:16 -04:00
|
|
|
if err != nil {
|
|
|
|
t.Error(err)
|
|
|
|
}
|
|
|
|
if !bytes.Equal(buf.Bytes(), data) {
|
|
|
|
t.Error("Contents of the erasure coded file differs")
|
|
|
|
}
|
|
|
|
|
2016-08-15 04:25:41 -04:00
|
|
|
// 2 more disk down. 8 disks down in total. Read should fail.
|
2016-07-19 02:56:16 -04:00
|
|
|
disks[12] = ReadDiskDown{disks[12].(*posix)}
|
2016-08-15 04:25:41 -04:00
|
|
|
disks[13] = ReadDiskDown{disks[13].(*posix)}
|
2016-07-19 02:56:16 -04:00
|
|
|
buf.Reset()
|
2016-08-12 03:26:30 -04:00
|
|
|
_, err = erasureReadFile(buf, disks, "testbucket", "testobject", 0, length, length, blockSize, dataBlocks, parityBlocks, checkSums, bitRotAlgo, pool)
|
2016-08-25 12:39:01 -04:00
|
|
|
if errorCause(err) != errXLReadQuorum {
|
2016-07-19 02:56:16 -04:00
|
|
|
t.Fatal("expected errXLReadQuorum error")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestErasureReadFileOffsetLength(t *testing.T) {
|
|
|
|
// Initialize environment needed for the test.
|
|
|
|
dataBlocks := 7
|
|
|
|
parityBlocks := 7
|
2016-11-22 21:18:22 -05:00
|
|
|
blockSize := int64(1 * humanize.MiByte)
|
2016-07-19 02:56:16 -04:00
|
|
|
setup, err := newErasureTestSetup(dataBlocks, parityBlocks, blockSize)
|
|
|
|
if err != nil {
|
|
|
|
t.Error(err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
defer setup.Remove()
|
|
|
|
|
|
|
|
disks := setup.disks
|
|
|
|
|
2016-11-22 21:18:22 -05:00
|
|
|
// Prepare a slice of 5humanize.MiByte with random data.
|
|
|
|
data := make([]byte, 5*humanize.MiByte)
|
2016-07-19 02:56:16 -04:00
|
|
|
length := int64(len(data))
|
|
|
|
_, err = rand.Read(data)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Create a test file to read from.
|
2017-01-30 18:44:42 -05:00
|
|
|
size, checkSums, err := erasureCreateFile(disks, "testbucket", "testobject", bytes.NewReader(data), true, blockSize, dataBlocks, parityBlocks, bitRotAlgo, dataBlocks+1)
|
2016-07-19 02:56:16 -04:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
if size != length {
|
|
|
|
t.Errorf("erasureCreateFile returned %d, expected %d", size, length)
|
|
|
|
}
|
|
|
|
|
|
|
|
testCases := []struct {
|
|
|
|
offset, length int64
|
|
|
|
}{
|
|
|
|
// Full file.
|
|
|
|
{0, length},
|
2016-07-20 04:30:30 -04:00
|
|
|
// Read nothing.
|
|
|
|
{length, 0},
|
2016-07-19 02:56:16 -04:00
|
|
|
// 2nd block.
|
|
|
|
{blockSize, blockSize},
|
|
|
|
// Test cases for random offsets and lengths.
|
|
|
|
{blockSize - 1, 2},
|
|
|
|
{blockSize - 1, blockSize + 1},
|
|
|
|
{blockSize + 1, blockSize - 1},
|
|
|
|
{blockSize + 1, blockSize},
|
|
|
|
{blockSize + 1, blockSize + 1},
|
2016-07-20 04:30:30 -04:00
|
|
|
{blockSize*2 - 1, blockSize + 1},
|
2016-07-19 02:56:16 -04:00
|
|
|
{length - 1, 1},
|
|
|
|
{length - blockSize, blockSize},
|
|
|
|
{length - blockSize - 1, blockSize},
|
|
|
|
{length - blockSize - 1, blockSize + 1},
|
|
|
|
}
|
2016-07-25 17:17:01 -04:00
|
|
|
chunkSize := getChunkSize(blockSize, dataBlocks)
|
|
|
|
pool := bpool.NewBytePool(chunkSize, len(disks))
|
|
|
|
|
2016-07-19 02:56:16 -04:00
|
|
|
// Compare the data read from file with "data" byte array.
|
|
|
|
for i, testCase := range testCases {
|
|
|
|
expected := data[testCase.offset:(testCase.offset + testCase.length)]
|
|
|
|
buf := &bytes.Buffer{}
|
2016-08-12 03:26:30 -04:00
|
|
|
_, err = erasureReadFile(buf, disks, "testbucket", "testobject", testCase.offset, testCase.length, length, blockSize, dataBlocks, parityBlocks, checkSums, bitRotAlgo, pool)
|
2016-07-19 02:56:16 -04:00
|
|
|
if err != nil {
|
|
|
|
t.Error(err)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
got := buf.Bytes()
|
|
|
|
if !bytes.Equal(expected, got) {
|
|
|
|
t.Errorf("Test %d : read data is different from what was expected", i+1)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2016-07-20 04:30:30 -04:00
|
|
|
|
|
|
|
// Test erasureReadFile with random offset and lengths.
|
|
|
|
// This test is t.Skip()ed as it a long time to run, hence should be run
|
|
|
|
// explicitly after commenting out t.Skip()
|
|
|
|
func TestErasureReadFileRandomOffsetLength(t *testing.T) {
|
|
|
|
// Comment the following line to run this test.
|
|
|
|
t.SkipNow()
|
|
|
|
// Initialize environment needed for the test.
|
|
|
|
dataBlocks := 7
|
|
|
|
parityBlocks := 7
|
2016-11-22 21:18:22 -05:00
|
|
|
blockSize := int64(1 * humanize.MiByte)
|
2016-07-20 04:30:30 -04:00
|
|
|
setup, err := newErasureTestSetup(dataBlocks, parityBlocks, blockSize)
|
|
|
|
if err != nil {
|
|
|
|
t.Error(err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
defer setup.Remove()
|
|
|
|
|
|
|
|
disks := setup.disks
|
|
|
|
|
2016-11-22 21:18:22 -05:00
|
|
|
// Prepare a slice of 5MiB with random data.
|
|
|
|
data := make([]byte, 5*humanize.MiByte)
|
2016-07-20 04:30:30 -04:00
|
|
|
length := int64(len(data))
|
|
|
|
_, err = rand.Read(data)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// 10000 iterations with random offsets and lengths.
|
|
|
|
iterations := 10000
|
|
|
|
|
|
|
|
// Create a test file to read from.
|
2017-01-30 18:44:42 -05:00
|
|
|
size, checkSums, err := erasureCreateFile(disks, "testbucket", "testobject", bytes.NewReader(data), true, blockSize, dataBlocks, parityBlocks, bitRotAlgo, dataBlocks+1)
|
2016-07-20 04:30:30 -04:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
if size != length {
|
|
|
|
t.Errorf("erasureCreateFile returned %d, expected %d", size, length)
|
|
|
|
}
|
|
|
|
|
|
|
|
// To generate random offset/length.
|
2017-03-18 14:28:41 -04:00
|
|
|
r := rand.New(rand.NewSource(UTCNow().UnixNano()))
|
2016-07-20 04:30:30 -04:00
|
|
|
|
2016-07-25 17:17:01 -04:00
|
|
|
// create pool buffer which will be used by erasureReadFile for
|
|
|
|
// reading from disks and erasure decoding.
|
|
|
|
chunkSize := getChunkSize(blockSize, dataBlocks)
|
|
|
|
pool := bpool.NewBytePool(chunkSize, len(disks))
|
|
|
|
|
2016-07-20 04:30:30 -04:00
|
|
|
buf := &bytes.Buffer{}
|
|
|
|
|
|
|
|
// Verify erasureReadFile() for random offsets and lengths.
|
|
|
|
for i := 0; i < iterations; i++ {
|
|
|
|
offset := r.Int63n(length)
|
|
|
|
readLen := r.Int63n(length - offset)
|
|
|
|
|
|
|
|
expected := data[offset : offset+readLen]
|
|
|
|
|
2016-08-12 03:26:30 -04:00
|
|
|
_, err = erasureReadFile(buf, disks, "testbucket", "testobject", offset, readLen, length, blockSize, dataBlocks, parityBlocks, checkSums, bitRotAlgo, pool)
|
2016-07-20 04:30:30 -04:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err, offset, readLen)
|
|
|
|
}
|
|
|
|
got := buf.Bytes()
|
|
|
|
if !bytes.Equal(expected, got) {
|
|
|
|
t.Fatalf("read data is different from what was expected, offset=%d length=%d", offset, readLen)
|
|
|
|
}
|
|
|
|
buf.Reset()
|
|
|
|
}
|
|
|
|
}
|