2016-07-09 16:01:32 -04:00
/ *
2019-04-09 14:39:42 -04:00
* MinIO Cloud Storage , ( C ) 2015 , 2016 , 2017 MinIO , Inc .
2016-07-09 16:01:32 -04:00
*
* Licensed under the Apache License , Version 2.0 ( the "License" ) ;
* you may not use this file except in compliance with the License .
* You may obtain a copy of the License at
*
* http : //www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing , software
* distributed under the License is distributed on an "AS IS" BASIS ,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied .
* See the License for the specific language governing permissions and
* limitations under the License .
* /
2016-08-18 19:23:42 -04:00
package cmd
2016-07-09 16:01:32 -04:00
2016-07-21 22:07:00 -04:00
import (
2017-08-14 21:08:42 -04:00
"bytes"
"encoding/hex"
2016-09-09 01:38:18 -04:00
"encoding/json"
2016-07-21 22:07:00 -04:00
"reflect"
"testing"
2017-01-30 18:44:42 -05:00
humanize "github.com/dustin/go-humanize"
2016-07-21 22:07:00 -04:00
)
2016-07-09 16:01:32 -04:00
2016-09-16 16:44:52 -04:00
// Tests caclculating disk count.
func TestDiskCount ( t * testing . T ) {
testCases := [ ] struct {
disks [ ] StorageAPI
diskCount int
} {
// Test case - 1
{
disks : [ ] StorageAPI { & posix { } , & posix { } , & posix { } , & posix { } } ,
diskCount : 4 ,
} ,
// Test case - 2
{
disks : [ ] StorageAPI { nil , & posix { } , & posix { } , & posix { } } ,
diskCount : 3 ,
} ,
}
for i , testCase := range testCases {
cdiskCount := diskCount ( testCase . disks )
if cdiskCount != testCase . diskCount {
t . Errorf ( "Test %d: Expected %d, got %d" , i + 1 , testCase . diskCount , cdiskCount )
}
}
}
2016-07-21 22:07:00 -04:00
// Test for reduceErrs, reduceErr reduces collection
// of errors into a single maximal error with in the list.
2016-07-09 16:01:32 -04:00
func TestReduceErrs ( t * testing . T ) {
2016-07-19 22:24:32 -04:00
// List all of all test cases to validate various cases of reduce errors.
2016-07-09 16:01:32 -04:00
testCases := [ ] struct {
2016-07-19 22:24:32 -04:00
errs [ ] error
ignoredErrs [ ] error
err error
2016-07-09 16:01:32 -04:00
} {
2016-07-19 22:24:32 -04:00
// Validate if have reduced properly.
{ [ ] error {
errDiskNotFound ,
errDiskNotFound ,
errDiskFull ,
2016-11-21 04:47:26 -05:00
} , [ ] error { } , errXLReadQuorum } ,
2016-07-19 22:24:32 -04:00
// Validate if have no consensus.
{ [ ] error {
errDiskFull ,
errDiskNotFound ,
nil , nil ,
2016-11-21 04:47:26 -05:00
} , [ ] error { } , errXLReadQuorum } ,
2016-07-19 22:24:32 -04:00
// Validate if have consensus and errors ignored.
{ [ ] error {
2016-11-21 04:47:26 -05:00
errVolumeNotFound ,
errVolumeNotFound ,
2016-07-19 22:24:32 -04:00
errVolumeNotFound ,
errVolumeNotFound ,
errVolumeNotFound ,
errDiskNotFound ,
errDiskNotFound ,
} , [ ] error { errDiskNotFound } , errVolumeNotFound } ,
2016-11-21 04:47:26 -05:00
{ [ ] error { } , [ ] error { } , errXLReadQuorum } ,
2017-03-22 13:15:16 -04:00
{ [ ] error { errFileNotFound , errFileNotFound , errFileNotFound ,
errFileNotFound , errFileNotFound , nil , nil , nil , nil , nil } ,
nil , nil } ,
2016-07-09 16:01:32 -04:00
}
2016-07-19 22:24:32 -04:00
// Validates list of all the testcases for returning valid errors.
2016-07-09 16:01:32 -04:00
for i , testCase := range testCases {
2020-04-09 12:30:02 -04:00
gotErr := reduceReadQuorumErrs ( GlobalContext , testCase . errs , testCase . ignoredErrs , 5 )
2018-04-10 12:36:37 -04:00
if gotErr != testCase . err {
2016-07-13 14:56:25 -04:00
t . Errorf ( "Test %d : expected %s, got %s" , i + 1 , testCase . err , gotErr )
}
2020-04-09 12:30:02 -04:00
gotNewErr := reduceWriteQuorumErrs ( GlobalContext , testCase . errs , testCase . ignoredErrs , 6 )
2018-04-10 12:36:37 -04:00
if gotNewErr != errXLWriteQuorum {
2016-11-21 04:47:26 -05:00
t . Errorf ( "Test %d : expected %s, got %s" , i + 1 , errXLWriteQuorum , gotErr )
}
2016-07-09 16:01:32 -04:00
}
}
2016-07-12 21:23:40 -04:00
2016-07-21 22:07:00 -04:00
// TestHashOrder - test order of ints in array
func TestHashOrder ( t * testing . T ) {
testCases := [ ] struct {
objectName string
hashedOrder [ ] int
} {
// cases which should pass the test.
// passing in valid object name.
2017-11-30 15:57:03 -05:00
{ "object" , [ ] int { 14 , 15 , 16 , 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 , 9 , 10 , 11 , 12 , 13 } } ,
{ "The Shining Script <v1>.pdf" , [ ] int { 16 , 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 , 9 , 10 , 11 , 12 , 13 , 14 , 15 } } ,
2016-07-21 22:07:00 -04:00
{ "Cost Benefit Analysis (2009-2010).pptx" , [ ] int { 15 , 16 , 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 , 9 , 10 , 11 , 12 , 13 , 14 } } ,
{ "117Gn8rfHL2ACARPAhaFd0AGzic9pUbIA/5OCn5A" , [ ] int { 3 , 4 , 5 , 6 , 7 , 8 , 9 , 10 , 11 , 12 , 13 , 14 , 15 , 16 , 1 , 2 } } ,
{ "SHØRT" , [ ] int { 11 , 12 , 13 , 14 , 15 , 16 , 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 , 9 , 10 } } ,
{ "There are far too many object names, and far too few bucket names!" , [ ] int { 15 , 16 , 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 , 9 , 10 , 11 , 12 , 13 , 14 } } ,
{ "a/b/c/" , [ ] int { 3 , 4 , 5 , 6 , 7 , 8 , 9 , 10 , 11 , 12 , 13 , 14 , 15 , 16 , 1 , 2 } } ,
2017-11-30 15:57:03 -05:00
{ "/a/b/c" , [ ] int { 6 , 7 , 8 , 9 , 10 , 11 , 12 , 13 , 14 , 15 , 16 , 1 , 2 , 3 , 4 , 5 } } ,
2016-07-21 22:07:00 -04:00
{ string ( [ ] byte { 0xff , 0xfe , 0xfd } ) , [ ] int { 15 , 16 , 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 , 9 , 10 , 11 , 12 , 13 , 14 } } ,
2016-07-12 21:23:40 -04:00
}
2016-07-21 22:07:00 -04:00
// Tests hashing order to be consistent.
for i , testCase := range testCases {
hashedOrder := hashOrder ( testCase . objectName , 16 )
if ! reflect . DeepEqual ( testCase . hashedOrder , hashedOrder ) {
2018-02-15 20:45:57 -05:00
t . Errorf ( "Test case %d: Expected \"%v\" but failed \"%v\"" , i + 1 , testCase . hashedOrder , hashedOrder )
2016-07-12 21:23:40 -04:00
}
}
2016-07-21 22:07:00 -04:00
// Tests hashing order to fail for when order is '-1'.
if hashedOrder := hashOrder ( "This will fail" , - 1 ) ; hashedOrder != nil {
t . Errorf ( "Test: Expect \"nil\" but failed \"%#v\"" , hashedOrder )
2016-07-12 21:23:40 -04:00
}
2018-02-15 20:45:57 -05:00
if hashedOrder := hashOrder ( "This will fail" , 0 ) ; hashedOrder != nil {
t . Errorf ( "Test: Expect \"nil\" but failed \"%#v\"" , hashedOrder )
}
2016-07-12 21:23:40 -04:00
}
2016-09-09 01:38:18 -04:00
// newTestXLMetaV1 - initializes new xlMetaV1, adds version, allocates a fresh erasure info and metadata.
func newTestXLMetaV1 ( ) xlMetaV1 {
xlMeta := xlMetaV1 { }
2017-01-18 15:24:34 -05:00
xlMeta . Version = xlMetaVersion
xlMeta . Format = xlMetaFormat
xlMeta . Minio . Release = "test"
2017-08-14 21:08:42 -04:00
xlMeta . Erasure = ErasureInfo {
2016-09-09 01:38:18 -04:00
Algorithm : "klauspost/reedsolomon/vandermonde" ,
DataBlocks : 5 ,
ParityBlocks : 5 ,
BlockSize : 10485760 ,
Index : 10 ,
Distribution : [ ] int { 9 , 10 , 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 } ,
}
xlMeta . Stat = statInfo {
Size : int64 ( 20 ) ,
2017-03-18 14:28:41 -04:00
ModTime : UTCNow ( ) ,
2016-09-09 01:38:18 -04:00
}
// Set meta data.
xlMeta . Meta = make ( map [ string ] string )
xlMeta . Meta [ "testKey1" ] = "val1"
xlMeta . Meta [ "testKey2" ] = "val2"
return xlMeta
}
2020-03-02 19:29:30 -05:00
func ( m * xlMetaV1 ) AddTestObjectCheckSum ( partNumber int , algorithm BitrotAlgorithm , hash string ) {
2017-08-14 21:08:42 -04:00
checksum , err := hex . DecodeString ( hash )
if err != nil {
panic ( err )
2016-09-09 01:38:18 -04:00
}
2020-03-02 19:29:30 -05:00
m . Erasure . Checksums [ partNumber - 1 ] = ChecksumInfo { partNumber , algorithm , checksum }
2016-09-09 01:38:18 -04:00
}
// AddTestObjectPart - add a new object part in order.
2020-03-02 19:29:30 -05:00
func ( m * xlMetaV1 ) AddTestObjectPart ( partNumber int , partSize int64 ) {
2019-01-05 17:16:43 -05:00
partInfo := ObjectPartInfo {
2016-09-09 01:38:18 -04:00
Number : partNumber ,
Size : partSize ,
}
// Proceed to include new part info.
2020-03-02 19:29:30 -05:00
m . Parts [ partNumber - 1 ] = partInfo
2016-09-09 01:38:18 -04:00
}
// Constructs xlMetaV1{} for given number of parts and converts it into bytes.
func getXLMetaBytes ( totalParts int ) [ ] byte {
xlSampleMeta := getSampleXLMeta ( totalParts )
xlMetaBytes , err := json . Marshal ( xlSampleMeta )
if err != nil {
panic ( err )
}
return xlMetaBytes
}
// Returns sample xlMetaV1{} for number of parts.
func getSampleXLMeta ( totalParts int ) xlMetaV1 {
xlMeta := newTestXLMetaV1 ( )
// Number of checksum info == total parts.
2017-08-14 21:08:42 -04:00
xlMeta . Erasure . Checksums = make ( [ ] ChecksumInfo , totalParts )
2016-09-09 01:38:18 -04:00
// total number of parts.
2019-01-05 17:16:43 -05:00
xlMeta . Parts = make ( [ ] ObjectPartInfo , totalParts )
2016-09-09 01:38:18 -04:00
for i := 0 ; i < totalParts ; i ++ {
// hard coding hash and algo value for the checksum, Since we are benchmarking the parsing of xl.json the magnitude doesn't affect the test,
// The magnitude doesn't make a difference, only the size does.
2020-03-02 19:29:30 -05:00
xlMeta . AddTestObjectCheckSum ( i + 1 , BLAKE2b512 , "a23f5eff248c4372badd9f3b2455a285cd4ca86c3d9a570b091d3fc5cd7ca6d9484bbea3f8c5d8d4f84daae96874419eda578fd736455334afbac2c924b3915a" )
xlMeta . AddTestObjectPart ( i + 1 , 67108864 )
2016-09-09 01:38:18 -04:00
}
return xlMeta
}
2019-09-05 18:51:27 -04:00
// Compare the unmarshaled XLMetaV1 with the one obtained from jsoniter parsing.
func compareXLMetaV1 ( t * testing . T , unMarshalXLMeta , jsoniterXLMeta xlMetaV1 ) {
// Start comparing the fields of xlMetaV1 obtained from jsoniter parsing with one parsed using json unmarshaling.
if unMarshalXLMeta . Version != jsoniterXLMeta . Version {
t . Errorf ( "Expected the Version to be \"%s\", but got \"%s\"." , unMarshalXLMeta . Version , jsoniterXLMeta . Version )
2016-09-09 01:38:18 -04:00
}
2019-09-05 18:51:27 -04:00
if unMarshalXLMeta . Format != jsoniterXLMeta . Format {
t . Errorf ( "Expected the format to be \"%s\", but got \"%s\"." , unMarshalXLMeta . Format , jsoniterXLMeta . Format )
2016-09-09 01:38:18 -04:00
}
2019-09-05 18:51:27 -04:00
if unMarshalXLMeta . Stat . Size != jsoniterXLMeta . Stat . Size {
t . Errorf ( "Expected the stat size to be %v, but got %v." , unMarshalXLMeta . Stat . Size , jsoniterXLMeta . Stat . Size )
2016-09-09 01:38:18 -04:00
}
2019-09-05 18:51:27 -04:00
if ! unMarshalXLMeta . Stat . ModTime . Equal ( jsoniterXLMeta . Stat . ModTime ) {
t . Errorf ( "Expected the modTime to be \"%v\", but got \"%v\"." , unMarshalXLMeta . Stat . ModTime , jsoniterXLMeta . Stat . ModTime )
2016-09-09 01:38:18 -04:00
}
2019-09-05 18:51:27 -04:00
if unMarshalXLMeta . Erasure . Algorithm != jsoniterXLMeta . Erasure . Algorithm {
t . Errorf ( "Expected the erasure algorithm to be \"%v\", but got \"%v\"." , unMarshalXLMeta . Erasure . Algorithm , jsoniterXLMeta . Erasure . Algorithm )
2016-09-09 01:38:18 -04:00
}
2019-09-05 18:51:27 -04:00
if unMarshalXLMeta . Erasure . DataBlocks != jsoniterXLMeta . Erasure . DataBlocks {
t . Errorf ( "Expected the erasure data blocks to be %v, but got %v." , unMarshalXLMeta . Erasure . DataBlocks , jsoniterXLMeta . Erasure . DataBlocks )
2016-09-09 01:38:18 -04:00
}
2019-09-05 18:51:27 -04:00
if unMarshalXLMeta . Erasure . ParityBlocks != jsoniterXLMeta . Erasure . ParityBlocks {
t . Errorf ( "Expected the erasure parity blocks to be %v, but got %v." , unMarshalXLMeta . Erasure . ParityBlocks , jsoniterXLMeta . Erasure . ParityBlocks )
2016-09-09 01:38:18 -04:00
}
2019-09-05 18:51:27 -04:00
if unMarshalXLMeta . Erasure . BlockSize != jsoniterXLMeta . Erasure . BlockSize {
t . Errorf ( "Expected the erasure block size to be %v, but got %v." , unMarshalXLMeta . Erasure . BlockSize , jsoniterXLMeta . Erasure . BlockSize )
2016-09-09 01:38:18 -04:00
}
2019-09-05 18:51:27 -04:00
if unMarshalXLMeta . Erasure . Index != jsoniterXLMeta . Erasure . Index {
t . Errorf ( "Expected the erasure index to be %v, but got %v." , unMarshalXLMeta . Erasure . Index , jsoniterXLMeta . Erasure . Index )
2016-09-09 01:38:18 -04:00
}
2019-09-05 18:51:27 -04:00
if len ( unMarshalXLMeta . Erasure . Distribution ) != len ( jsoniterXLMeta . Erasure . Distribution ) {
t . Errorf ( "Expected the size of Erasure Distribution to be %d, but got %d." , len ( unMarshalXLMeta . Erasure . Distribution ) , len ( jsoniterXLMeta . Erasure . Distribution ) )
2016-09-09 01:38:18 -04:00
} else {
for i := 0 ; i < len ( unMarshalXLMeta . Erasure . Distribution ) ; i ++ {
2019-09-05 18:51:27 -04:00
if unMarshalXLMeta . Erasure . Distribution [ i ] != jsoniterXLMeta . Erasure . Distribution [ i ] {
t . Errorf ( "Expected the Erasure Distribution to be %d, got %d." , unMarshalXLMeta . Erasure . Distribution [ i ] , jsoniterXLMeta . Erasure . Distribution [ i ] )
2016-09-09 01:38:18 -04:00
}
}
}
2019-09-05 18:51:27 -04:00
if len ( unMarshalXLMeta . Erasure . Checksums ) != len ( jsoniterXLMeta . Erasure . Checksums ) {
t . Errorf ( "Expected the size of Erasure Checksums to be %d, but got %d." , len ( unMarshalXLMeta . Erasure . Checksums ) , len ( jsoniterXLMeta . Erasure . Checksums ) )
2016-09-09 01:38:18 -04:00
} else {
2017-08-14 21:08:42 -04:00
for i := 0 ; i < len ( unMarshalXLMeta . Erasure . Checksums ) ; i ++ {
2020-03-02 19:29:30 -05:00
if unMarshalXLMeta . Erasure . Checksums [ i ] . PartNumber != jsoniterXLMeta . Erasure . Checksums [ i ] . PartNumber {
t . Errorf ( "Expected the Erasure Checksum PartNumber to be \"%d\", got \"%d\"." , unMarshalXLMeta . Erasure . Checksums [ i ] . PartNumber , jsoniterXLMeta . Erasure . Checksums [ i ] . PartNumber )
2016-09-09 01:38:18 -04:00
}
2019-09-05 18:51:27 -04:00
if unMarshalXLMeta . Erasure . Checksums [ i ] . Algorithm != jsoniterXLMeta . Erasure . Checksums [ i ] . Algorithm {
t . Errorf ( "Expected the Erasure Checksum Algorithm to be \"%s\", got \"%s\"." , unMarshalXLMeta . Erasure . Checksums [ i ] . Algorithm , jsoniterXLMeta . Erasure . Checksums [ i ] . Algorithm )
2016-09-09 01:38:18 -04:00
}
2019-09-05 18:51:27 -04:00
if ! bytes . Equal ( unMarshalXLMeta . Erasure . Checksums [ i ] . Hash , jsoniterXLMeta . Erasure . Checksums [ i ] . Hash ) {
t . Errorf ( "Expected the Erasure Checksum Hash to be \"%s\", got \"%s\"." , unMarshalXLMeta . Erasure . Checksums [ i ] . Hash , jsoniterXLMeta . Erasure . Checksums [ i ] . Hash )
2016-09-09 01:38:18 -04:00
}
}
}
2018-08-06 18:14:08 -04:00
2019-09-05 18:51:27 -04:00
if unMarshalXLMeta . Minio . Release != jsoniterXLMeta . Minio . Release {
t . Errorf ( "Expected the Release string to be \"%s\", but got \"%s\"." , unMarshalXLMeta . Minio . Release , jsoniterXLMeta . Minio . Release )
2016-09-09 01:38:18 -04:00
}
2019-09-05 18:51:27 -04:00
if len ( unMarshalXLMeta . Parts ) != len ( jsoniterXLMeta . Parts ) {
t . Errorf ( "Expected info of %d parts to be present, but got %d instead." , len ( unMarshalXLMeta . Parts ) , len ( jsoniterXLMeta . Parts ) )
2016-09-09 01:38:18 -04:00
} else {
for i := 0 ; i < len ( unMarshalXLMeta . Parts ) ; i ++ {
2019-09-05 18:51:27 -04:00
if unMarshalXLMeta . Parts [ i ] . Number != jsoniterXLMeta . Parts [ i ] . Number {
t . Errorf ( "Expected the number of part %d to be \"%d\", got \"%d\"." , i + 1 , unMarshalXLMeta . Parts [ i ] . Number , jsoniterXLMeta . Parts [ i ] . Number )
2016-09-09 01:38:18 -04:00
}
2019-09-05 18:51:27 -04:00
if unMarshalXLMeta . Parts [ i ] . Size != jsoniterXLMeta . Parts [ i ] . Size {
t . Errorf ( "Expected the size of part %d to be %v, got %v." , i + 1 , unMarshalXLMeta . Parts [ i ] . Size , jsoniterXLMeta . Parts [ i ] . Size )
2016-09-09 01:38:18 -04:00
}
}
}
for key , val := range unMarshalXLMeta . Meta {
2019-09-05 18:51:27 -04:00
jsoniterVal , exists := jsoniterXLMeta . Meta [ key ]
2016-09-09 01:38:18 -04:00
if ! exists {
t . Errorf ( "No meta data entry for Key \"%s\" exists." , key )
}
2019-09-05 18:51:27 -04:00
if val != jsoniterVal {
t . Errorf ( "Expected the value for Meta data key \"%s\" to be \"%s\", but got \"%s\"." , key , val , jsoniterVal )
2016-09-09 01:38:18 -04:00
}
}
}
2019-09-05 18:51:27 -04:00
// Tests the correctness of constructing XLMetaV1 using jsoniter lib.
2016-09-09 01:38:18 -04:00
// The result will be compared with the result obtained from json.unMarshal of the byte data.
2019-09-05 18:51:27 -04:00
func TestGetXLMetaV1Jsoniter1 ( t * testing . T ) {
2016-09-09 01:38:18 -04:00
xlMetaJSON := getXLMetaBytes ( 1 )
var unMarshalXLMeta xlMetaV1
if err := json . Unmarshal ( xlMetaJSON , & unMarshalXLMeta ) ; err != nil {
2017-08-14 21:08:42 -04:00
t . Errorf ( "Unmarshalling failed: %v" , err )
2016-09-09 01:38:18 -04:00
}
2020-04-09 12:30:02 -04:00
jsoniterXLMeta , err := xlMetaV1UnmarshalJSON ( GlobalContext , xlMetaJSON )
2016-09-09 01:38:18 -04:00
if err != nil {
2019-09-05 18:51:27 -04:00
t . Errorf ( "jsoniter parsing of XLMeta failed: %v" , err )
2016-09-09 01:38:18 -04:00
}
2019-09-05 18:51:27 -04:00
compareXLMetaV1 ( t , unMarshalXLMeta , jsoniterXLMeta )
2016-09-09 01:38:18 -04:00
}
2019-09-05 18:51:27 -04:00
// Tests the correctness of constructing XLMetaV1 using jsoniter lib for XLMetaV1 of size 10 parts.
2016-09-09 01:38:18 -04:00
// The result will be compared with the result obtained from json.unMarshal of the byte data.
2019-09-05 18:51:27 -04:00
func TestGetXLMetaV1Jsoniter10 ( t * testing . T ) {
2016-09-09 01:38:18 -04:00
xlMetaJSON := getXLMetaBytes ( 10 )
var unMarshalXLMeta xlMetaV1
if err := json . Unmarshal ( xlMetaJSON , & unMarshalXLMeta ) ; err != nil {
2017-08-14 21:08:42 -04:00
t . Errorf ( "Unmarshalling failed: %v" , err )
2016-09-09 01:38:18 -04:00
}
2020-04-09 12:30:02 -04:00
jsoniterXLMeta , err := xlMetaV1UnmarshalJSON ( GlobalContext , xlMetaJSON )
2016-09-09 01:38:18 -04:00
if err != nil {
2019-09-05 18:51:27 -04:00
t . Errorf ( "jsoniter parsing of XLMeta failed: %v" , err )
2016-09-09 01:38:18 -04:00
}
2019-09-05 18:51:27 -04:00
compareXLMetaV1 ( t , unMarshalXLMeta , jsoniterXLMeta )
2016-09-09 01:38:18 -04:00
}
2017-01-30 18:44:42 -05:00
// Test the predicted part size from the part index
func TestGetPartSizeFromIdx ( t * testing . T ) {
// Create test cases
testCases := [ ] struct {
totalSize int64
partSize int64
partIndex int
expectedSize int64
} {
// Total size is zero
{ 0 , 10 , 1 , 0 } ,
// part size 2MiB, total size 4MiB
{ 4 * humanize . MiByte , 2 * humanize . MiByte , 1 , 2 * humanize . MiByte } ,
{ 4 * humanize . MiByte , 2 * humanize . MiByte , 2 , 2 * humanize . MiByte } ,
{ 4 * humanize . MiByte , 2 * humanize . MiByte , 3 , 0 } ,
// part size 2MiB, total size 5MiB
{ 5 * humanize . MiByte , 2 * humanize . MiByte , 1 , 2 * humanize . MiByte } ,
{ 5 * humanize . MiByte , 2 * humanize . MiByte , 2 , 2 * humanize . MiByte } ,
{ 5 * humanize . MiByte , 2 * humanize . MiByte , 3 , 1 * humanize . MiByte } ,
{ 5 * humanize . MiByte , 2 * humanize . MiByte , 4 , 0 } ,
}
for i , testCase := range testCases {
2020-04-09 12:30:02 -04:00
s , err := calculatePartSizeFromIdx ( GlobalContext , testCase . totalSize , testCase . partSize , testCase . partIndex )
2017-01-31 18:34:49 -05:00
if err != nil {
t . Errorf ( "Test %d: Expected to pass but failed. %s" , i + 1 , err )
}
if err == nil && s != testCase . expectedSize {
2017-01-30 18:44:42 -05:00
t . Errorf ( "Test %d: The calculated part size is incorrect: expected = %d, found = %d\n" , i + 1 , testCase . expectedSize , s )
}
}
2017-01-31 18:34:49 -05:00
testCasesFailure := [ ] struct {
totalSize int64
partSize int64
partIndex int
err error
} {
2017-10-06 12:38:01 -04:00
// partSize is 0, returns error.
2017-01-31 18:34:49 -05:00
{ 10 , 0 , 1 , errPartSizeZero } ,
2017-10-06 12:38:01 -04:00
// partIndex is 0, returns error.
2017-01-31 18:34:49 -05:00
{ 10 , 1 , 0 , errPartSizeIndex } ,
2017-10-06 12:38:01 -04:00
// Total size is -1, returns error.
2018-09-27 23:36:17 -04:00
{ - 2 , 10 , 1 , errInvalidArgument } ,
2017-01-31 18:34:49 -05:00
}
for i , testCaseFailure := range testCasesFailure {
2020-04-09 12:30:02 -04:00
_ , err := calculatePartSizeFromIdx ( GlobalContext , testCaseFailure . totalSize , testCaseFailure . partSize , testCaseFailure . partIndex )
2017-01-31 18:34:49 -05:00
if err == nil {
t . Errorf ( "Test %d: Expected to failed but passed. %s" , i + 1 , err )
}
2018-04-10 12:36:37 -04:00
if err != nil && err != testCaseFailure . err {
t . Errorf ( "Test %d: Expected err %s, but got %s" , i + 1 , testCaseFailure . err , err )
2017-01-31 18:34:49 -05:00
}
}
2017-01-30 18:44:42 -05:00
}
2017-02-24 12:20:40 -05:00
func TestShuffleDisks ( t * testing . T ) {
nDisks := 16
disks , err := getRandomDisks ( nDisks )
if err != nil {
t . Fatal ( err )
}
2019-11-19 20:42:27 -05:00
objLayer , _ , err := initObjectLayer ( mustGetZoneEndpoints ( disks ... ) )
2017-02-24 12:20:40 -05:00
if err != nil {
removeRoots ( disks )
t . Fatal ( err )
}
defer removeRoots ( disks )
2019-11-19 20:42:27 -05:00
z := objLayer . ( * xlZones )
testShuffleDisks ( t , z )
2017-02-24 12:20:40 -05:00
}
// Test shuffleDisks which returns shuffled slice of disks for their actual distribution.
2019-11-19 20:42:27 -05:00
func testShuffleDisks ( t * testing . T , z * xlZones ) {
disks := z . zones [ 0 ] . GetDisks ( 0 ) ( )
2017-02-24 12:20:40 -05:00
distribution := [ ] int { 16 , 14 , 12 , 10 , 8 , 6 , 4 , 2 , 1 , 3 , 5 , 7 , 9 , 11 , 13 , 15 }
shuffledDisks := shuffleDisks ( disks , distribution )
// From the "distribution" above you can notice that:
// 1st data block is in the 9th disk (i.e distribution index 8)
// 2nd data block is in the 8th disk (i.e distribution index 7) and so on.
if shuffledDisks [ 0 ] != disks [ 8 ] ||
shuffledDisks [ 1 ] != disks [ 7 ] ||
shuffledDisks [ 2 ] != disks [ 9 ] ||
shuffledDisks [ 3 ] != disks [ 6 ] ||
shuffledDisks [ 4 ] != disks [ 10 ] ||
shuffledDisks [ 5 ] != disks [ 5 ] ||
shuffledDisks [ 6 ] != disks [ 11 ] ||
shuffledDisks [ 7 ] != disks [ 4 ] ||
shuffledDisks [ 8 ] != disks [ 12 ] ||
shuffledDisks [ 9 ] != disks [ 3 ] ||
shuffledDisks [ 10 ] != disks [ 13 ] ||
shuffledDisks [ 11 ] != disks [ 2 ] ||
shuffledDisks [ 12 ] != disks [ 14 ] ||
shuffledDisks [ 13 ] != disks [ 1 ] ||
shuffledDisks [ 14 ] != disks [ 15 ] ||
shuffledDisks [ 15 ] != disks [ 0 ] {
t . Errorf ( "shuffleDisks returned incorrect order." )
}
}
2017-06-14 20:14:27 -04:00
// TestEvalDisks tests the behavior of evalDisks
func TestEvalDisks ( t * testing . T ) {
nDisks := 16
disks , err := getRandomDisks ( nDisks )
if err != nil {
t . Fatal ( err )
}
2019-11-19 20:42:27 -05:00
objLayer , _ , err := initObjectLayer ( mustGetZoneEndpoints ( disks ... ) )
2017-06-14 20:14:27 -04:00
if err != nil {
removeRoots ( disks )
t . Fatal ( err )
}
defer removeRoots ( disks )
2019-11-19 20:42:27 -05:00
z := objLayer . ( * xlZones )
testShuffleDisks ( t , z )
2017-06-14 20:14:27 -04:00
}