mirror of
https://github.com/minio/minio.git
synced 2024-12-24 06:05:55 -05:00
xl: Rename getOrderedDisks as shuffleDisks appropriately. (#3796)
This PR is for readability cleanup - getOrderedDisks as shuffleDisks - getOrderedPartsMetadata as shufflePartsMetadata Distribution is now a second argument instead being the primary input argument for brevity. Also change the usage of type casted int64(0), instead rely on direct type reference as `var variable int64` everywhere.
This commit is contained in:
parent
25b5a0534f
commit
bcc5b6e1ef
@ -192,7 +192,7 @@ func erasureReadFile(writer io.Writer, disks []StorageAPI, volume string, path s
|
||||
}()
|
||||
|
||||
// Total bytes written to writer
|
||||
bytesWritten := int64(0)
|
||||
var bytesWritten int64
|
||||
|
||||
startBlock := offset / blockSize
|
||||
endBlock := (offset + length) / blockSize
|
||||
@ -263,7 +263,7 @@ func erasureReadFile(writer io.Writer, disks []StorageAPI, volume string, path s
|
||||
}
|
||||
|
||||
// Offset in enBlocks from where data should be read from.
|
||||
enBlocksOffset := int64(0)
|
||||
var enBlocksOffset int64
|
||||
|
||||
// Total data to be read from enBlocks.
|
||||
enBlocksLength := curBlockSize
|
||||
|
@ -121,35 +121,6 @@ func testGetReadDisks(t *testing.T, xl *xlObjects) {
|
||||
}
|
||||
}
|
||||
|
||||
// Test getOrderedDisks which returns ordered slice of disks from their
|
||||
// actual distribution.
|
||||
func testGetOrderedDisks(t *testing.T, xl *xlObjects) {
|
||||
disks := xl.storageDisks
|
||||
distribution := []int{16, 14, 12, 10, 8, 6, 4, 2, 1, 3, 5, 7, 9, 11, 13, 15}
|
||||
orderedDisks := getOrderedDisks(distribution, disks)
|
||||
// From the "distribution" above you can notice that:
|
||||
// 1st data block is in the 9th disk (i.e distribution index 8)
|
||||
// 2nd data block is in the 8th disk (i.e distribution index 7) and so on.
|
||||
if orderedDisks[0] != disks[8] ||
|
||||
orderedDisks[1] != disks[7] ||
|
||||
orderedDisks[2] != disks[9] ||
|
||||
orderedDisks[3] != disks[6] ||
|
||||
orderedDisks[4] != disks[10] ||
|
||||
orderedDisks[5] != disks[5] ||
|
||||
orderedDisks[6] != disks[11] ||
|
||||
orderedDisks[7] != disks[4] ||
|
||||
orderedDisks[8] != disks[12] ||
|
||||
orderedDisks[9] != disks[3] ||
|
||||
orderedDisks[10] != disks[13] ||
|
||||
orderedDisks[11] != disks[2] ||
|
||||
orderedDisks[12] != disks[14] ||
|
||||
orderedDisks[13] != disks[1] ||
|
||||
orderedDisks[14] != disks[15] ||
|
||||
orderedDisks[15] != disks[0] {
|
||||
t.Errorf("getOrderedDisks returned incorrect order.")
|
||||
}
|
||||
}
|
||||
|
||||
// Test for isSuccessDataBlocks and isSuccessDecodeBlocks.
|
||||
func TestIsSuccessBlocks(t *testing.T) {
|
||||
dataBlocks := 8
|
||||
@ -217,7 +188,7 @@ func TestIsSuccessBlocks(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// Wrapper function for testGetReadDisks, testGetOrderedDisks.
|
||||
// Wrapper function for testGetReadDisks, testShuffleDisks.
|
||||
func TestErasureReadUtils(t *testing.T) {
|
||||
nDisks := 16
|
||||
disks, err := getRandomDisks(nDisks)
|
||||
@ -236,7 +207,6 @@ func TestErasureReadUtils(t *testing.T) {
|
||||
defer removeRoots(disks)
|
||||
xl := objLayer.(*xlObjects)
|
||||
testGetReadDisks(t, xl)
|
||||
testGetOrderedDisks(t, xl)
|
||||
}
|
||||
|
||||
// Simulates a faulty disk for ReadFile()
|
||||
|
@ -116,7 +116,7 @@ func writeDataBlocks(dst io.Writer, enBlocks [][]byte, dataBlocks int, offset in
|
||||
write := length
|
||||
|
||||
// Counter to increment total written.
|
||||
totalWritten := int64(0)
|
||||
var totalWritten int64
|
||||
|
||||
// Write all data blocks to dst.
|
||||
for _, block := range enBlocks[:dataBlocks] {
|
||||
@ -180,7 +180,7 @@ func copyBuffer(writer io.Writer, disk StorageAPI, volume string, path string, b
|
||||
}
|
||||
|
||||
// Starting offset for Reading the file.
|
||||
startOffset := int64(0)
|
||||
var startOffset int64
|
||||
|
||||
// Read until io.EOF.
|
||||
for {
|
||||
|
@ -210,7 +210,7 @@ func (fs fsObjects) appendParts(bucket, object, uploadID string, info bgAppendPa
|
||||
func (fs fsObjects) appendPart(bucket, object, uploadID string, part objectPartInfo, buf []byte) error {
|
||||
partPath := pathJoin(fs.fsPath, minioMetaMultipartBucket, bucket, object, uploadID, part.Name)
|
||||
|
||||
offset := int64(0)
|
||||
var offset int64
|
||||
// Read each file part to start writing to the temporary concatenated object.
|
||||
file, size, err := fsOpenFile(partPath, offset)
|
||||
if err != nil {
|
||||
|
@ -463,7 +463,7 @@ func (fs fsObjects) CopyObjectPart(srcBucket, srcObject, dstBucket, dstObject, u
|
||||
pipeReader, pipeWriter := io.Pipe()
|
||||
|
||||
go func() {
|
||||
startOffset := int64(0) // Read the whole file.
|
||||
var startOffset int64 // Read the whole file.
|
||||
if gerr := fs.GetObject(srcBucket, srcObject, startOffset, length, pipeWriter); gerr != nil {
|
||||
errorIf(gerr, "Unable to read %s/%s.", srcBucket, srcObject)
|
||||
pipeWriter.CloseWithError(gerr)
|
||||
@ -864,7 +864,7 @@ func (fs fsObjects) CompleteMultipartUpload(bucket string, object string, upload
|
||||
multipartPartFile := pathJoin(fs.fsPath, minioMetaMultipartBucket, uploadIDPath, partSuffix)
|
||||
|
||||
var reader io.ReadCloser
|
||||
offset := int64(0)
|
||||
var offset int64
|
||||
reader, _, err = fsOpenFile(multipartPartFile, offset)
|
||||
if err != nil {
|
||||
fs.rwPool.Close(fsMetaPathMultipart)
|
||||
|
@ -395,7 +395,7 @@ func (fs fsObjects) CopyObject(srcBucket, srcObject, dstBucket, dstObject string
|
||||
pipeReader, pipeWriter := io.Pipe()
|
||||
|
||||
go func() {
|
||||
startOffset := int64(0) // Read the whole file.
|
||||
var startOffset int64 // Read the whole file.
|
||||
if gerr := fs.GetObject(srcBucket, srcObject, startOffset, length, pipeWriter); gerr != nil {
|
||||
errorIf(gerr, "Unable to read %s/%s.", srcBucket, srcObject)
|
||||
pipeWriter.CloseWithError(gerr)
|
||||
|
@ -653,14 +653,14 @@ func TestNsLockMapDeleteLockInfoEntryForOps(t *testing.T) {
|
||||
} else {
|
||||
t.Fatalf("Entry for <volume> %s, <path> %s should have existed. ", param.volume, param.path)
|
||||
}
|
||||
if globalNSMutex.counters.granted != int64(0) {
|
||||
t.Errorf("Expected the count of total running locks to be %v, but got %v", int64(0), globalNSMutex.counters.granted)
|
||||
if globalNSMutex.counters.granted != 0 {
|
||||
t.Errorf("Expected the count of total running locks to be %v, but got %v", 0, globalNSMutex.counters.granted)
|
||||
}
|
||||
if globalNSMutex.counters.blocked != int64(0) {
|
||||
t.Errorf("Expected the count of total blocked locks to be %v, but got %v", int64(0), globalNSMutex.counters.blocked)
|
||||
if globalNSMutex.counters.blocked != 0 {
|
||||
t.Errorf("Expected the count of total blocked locks to be %v, but got %v", 0, globalNSMutex.counters.blocked)
|
||||
}
|
||||
if globalNSMutex.counters.total != int64(0) {
|
||||
t.Errorf("Expected the count of all locks to be %v, but got %v", int64(0), globalNSMutex.counters.total)
|
||||
if globalNSMutex.counters.total != 0 {
|
||||
t.Errorf("Expected the count of all locks to be %v, but got %v", 0, globalNSMutex.counters.total)
|
||||
}
|
||||
}
|
||||
|
||||
@ -723,13 +723,13 @@ func TestNsLockMapDeleteLockInfoEntryForVolumePath(t *testing.T) {
|
||||
t.Fatalf("Entry for <volume> %s, <path> %s should have been deleted. ", param.volume, param.path)
|
||||
}
|
||||
// The lock count values should be 0.
|
||||
if globalNSMutex.counters.granted != int64(0) {
|
||||
t.Errorf("Expected the count of total running locks to be %v, but got %v", int64(0), globalNSMutex.counters.granted)
|
||||
if globalNSMutex.counters.granted != 0 {
|
||||
t.Errorf("Expected the count of total running locks to be %v, but got %v", 0, globalNSMutex.counters.granted)
|
||||
}
|
||||
if globalNSMutex.counters.blocked != int64(0) {
|
||||
t.Errorf("Expected the count of total blocked locks to be %v, but got %v", int64(0), globalNSMutex.counters.blocked)
|
||||
if globalNSMutex.counters.blocked != 0 {
|
||||
t.Errorf("Expected the count of total blocked locks to be %v, but got %v", 0, globalNSMutex.counters.blocked)
|
||||
}
|
||||
if globalNSMutex.counters.total != int64(0) {
|
||||
t.Errorf("Expected the count of all locks to be %v, but got %v", int64(0), globalNSMutex.counters.total)
|
||||
if globalNSMutex.counters.total != 0 {
|
||||
t.Errorf("Expected the count of all locks to be %v, but got %v", 0, globalNSMutex.counters.total)
|
||||
}
|
||||
}
|
||||
|
@ -146,7 +146,7 @@ func testObjectAPIPutObject(obj ObjectLayer, instanceType string, t TestErrHandl
|
||||
// data with size different from the actual number of bytes available in the reader
|
||||
{bucket, object, data, nil, "", int64(len(data) - 1), getMD5Hash(data[:len(data)-1]), nil},
|
||||
{bucket, object, nilBytes, nil, "", int64(len(nilBytes) + 1), getMD5Hash(nilBytes), IncompleteBody{}},
|
||||
{bucket, object, fiveMBBytes, nil, "", int64(0), getMD5Hash(fiveMBBytes), nil},
|
||||
{bucket, object, fiveMBBytes, nil, "", 0, getMD5Hash(fiveMBBytes), nil},
|
||||
|
||||
// Test case 30
|
||||
// valid data with X-Amz-Meta- meta
|
||||
|
@ -133,7 +133,7 @@ func (api objectAPIHandlers) GetObjectHandler(w http.ResponseWriter, r *http.Req
|
||||
}
|
||||
|
||||
// Get the object.
|
||||
startOffset := int64(0)
|
||||
var startOffset int64
|
||||
length := objInfo.Size
|
||||
if hrange != nil {
|
||||
startOffset = hrange.offsetBegin
|
||||
@ -620,7 +620,7 @@ func (api objectAPIHandlers) CopyObjectPartHandler(w http.ResponseWriter, r *htt
|
||||
}
|
||||
|
||||
// Get the object.
|
||||
startOffset := int64(0)
|
||||
var startOffset int64
|
||||
length := objInfo.Size
|
||||
if hrange != nil {
|
||||
startOffset = hrange.offsetBegin
|
||||
|
@ -91,7 +91,7 @@ func calculateStreamContentLength(dataLen, chunkSize int64) int64 {
|
||||
}
|
||||
chunksCount := int64(dataLen / chunkSize)
|
||||
remainingBytes := int64(dataLen % chunkSize)
|
||||
streamLen := int64(0)
|
||||
var streamLen int64
|
||||
streamLen += chunksCount * calculateSignedChunkLength(chunkSize)
|
||||
if remainingBytes > 0 {
|
||||
streamLen += calculateSignedChunkLength(remainingBytes)
|
||||
|
@ -371,9 +371,9 @@ func healObject(storageDisks []StorageAPI, bucket string, object string, quorum
|
||||
}
|
||||
|
||||
// Reorder so that we have data disks first and parity disks next.
|
||||
latestDisks = getOrderedDisks(latestMeta.Erasure.Distribution, latestDisks)
|
||||
outDatedDisks = getOrderedDisks(latestMeta.Erasure.Distribution, outDatedDisks)
|
||||
partsMetadata = getOrderedPartsMetadata(latestMeta.Erasure.Distribution, partsMetadata)
|
||||
latestDisks = shuffleDisks(latestDisks, latestMeta.Erasure.Distribution)
|
||||
outDatedDisks = shuffleDisks(outDatedDisks, latestMeta.Erasure.Distribution)
|
||||
partsMetadata = shufflePartsMetadata(partsMetadata, latestMeta.Erasure.Distribution)
|
||||
|
||||
// We write at temporary location and then rename to fianal location.
|
||||
tmpID := mustGetUUID()
|
||||
|
@ -544,7 +544,7 @@ func (xl xlObjects) CopyObjectPart(srcBucket, srcObject, dstBucket, dstObject, u
|
||||
pipeReader, pipeWriter := io.Pipe()
|
||||
|
||||
go func() {
|
||||
startOffset := int64(0) // Read the whole file.
|
||||
var startOffset int64 // Read the whole file.
|
||||
if gerr := xl.GetObject(srcBucket, srcObject, startOffset, length, pipeWriter); gerr != nil {
|
||||
errorIf(gerr, "Unable to read %s of the object `%s/%s`.", srcBucket, srcObject)
|
||||
pipeWriter.CloseWithError(toObjectErr(gerr, srcBucket, srcObject))
|
||||
@ -607,8 +607,7 @@ func (xl xlObjects) PutObjectPart(bucket, object, uploadID string, partID int, s
|
||||
return PartInfo{}, err
|
||||
}
|
||||
|
||||
onlineDisks = getOrderedDisks(xlMeta.Erasure.Distribution, onlineDisks)
|
||||
_ = getOrderedPartsMetadata(xlMeta.Erasure.Distribution, partsMetadata)
|
||||
onlineDisks = shuffleDisks(onlineDisks, xlMeta.Erasure.Distribution)
|
||||
|
||||
// Need a unique name for the part being written in minioMetaBucket to
|
||||
// accommodate concurrent PutObjectPart requests
|
||||
@ -919,10 +918,10 @@ func (xl xlObjects) CompleteMultipartUpload(bucket string, object string, upload
|
||||
}
|
||||
|
||||
// Order online disks in accordance with distribution order.
|
||||
onlineDisks = getOrderedDisks(xlMeta.Erasure.Distribution, onlineDisks)
|
||||
onlineDisks = shuffleDisks(onlineDisks, xlMeta.Erasure.Distribution)
|
||||
|
||||
// Order parts metadata in accordance with distribution order.
|
||||
partsMetadata = getOrderedPartsMetadata(xlMeta.Erasure.Distribution, partsMetadata)
|
||||
partsMetadata = shufflePartsMetadata(partsMetadata, xlMeta.Erasure.Distribution)
|
||||
|
||||
// Save current xl meta for validation.
|
||||
var currentXLMeta = xlMeta
|
||||
|
@ -58,7 +58,7 @@ func (xl xlObjects) CopyObject(srcBucket, srcObject, dstBucket, dstObject string
|
||||
}
|
||||
|
||||
// Reorder online disks based on erasure distribution order.
|
||||
onlineDisks = getOrderedDisks(xlMeta.Erasure.Distribution, onlineDisks)
|
||||
onlineDisks = shuffleDisks(onlineDisks, xlMeta.Erasure.Distribution)
|
||||
|
||||
// Length of the file to read.
|
||||
length := xlMeta.Stat.Size
|
||||
@ -106,7 +106,7 @@ func (xl xlObjects) CopyObject(srcBucket, srcObject, dstBucket, dstObject string
|
||||
pipeReader, pipeWriter := io.Pipe()
|
||||
|
||||
go func() {
|
||||
startOffset := int64(0) // Read the whole file.
|
||||
var startOffset int64 // Read the whole file.
|
||||
if gerr := xl.GetObject(srcBucket, srcObject, startOffset, length, pipeWriter); gerr != nil {
|
||||
errorIf(gerr, "Unable to read %s of the object `%s/%s`.", srcBucket, srcObject)
|
||||
pipeWriter.CloseWithError(toObjectErr(gerr, srcBucket, srcObject))
|
||||
@ -163,10 +163,10 @@ func (xl xlObjects) GetObject(bucket, object string, startOffset int64, length i
|
||||
}
|
||||
|
||||
// Reorder online disks based on erasure distribution order.
|
||||
onlineDisks = getOrderedDisks(xlMeta.Erasure.Distribution, onlineDisks)
|
||||
onlineDisks = shuffleDisks(onlineDisks, xlMeta.Erasure.Distribution)
|
||||
|
||||
// Reorder parts metadata based on erasure distribution order.
|
||||
metaArr = getOrderedPartsMetadata(xlMeta.Erasure.Distribution, metaArr)
|
||||
metaArr = shufflePartsMetadata(metaArr, xlMeta.Erasure.Distribution)
|
||||
|
||||
// For negative length read everything.
|
||||
if length < 0 {
|
||||
@ -242,7 +242,7 @@ func (xl xlObjects) GetObject(bucket, object string, startOffset int64, length i
|
||||
}
|
||||
}
|
||||
|
||||
totalBytesRead := int64(0)
|
||||
var totalBytesRead int64
|
||||
|
||||
chunkSize := getChunkSize(xlMeta.Erasure.BlockSize, xlMeta.Erasure.DataBlocks)
|
||||
pool := bpool.NewBytePool(chunkSize, len(onlineDisks))
|
||||
@ -525,7 +525,7 @@ func (xl xlObjects) PutObject(bucket string, object string, size int64, data io.
|
||||
}
|
||||
|
||||
// Order disks according to erasure distribution
|
||||
onlineDisks := getOrderedDisks(partsMetadata[0].Erasure.Distribution, xl.storageDisks)
|
||||
onlineDisks := shuffleDisks(xl.storageDisks, partsMetadata[0].Erasure.Distribution)
|
||||
|
||||
// Delete temporary object in the event of failure.
|
||||
// If PutObject succeeded there would be no temporary
|
||||
@ -533,7 +533,7 @@ func (xl xlObjects) PutObject(bucket string, object string, size int64, data io.
|
||||
defer xl.deleteObject(minioMetaTmpBucket, tempObj)
|
||||
|
||||
// Total size of the written object
|
||||
sizeWritten := int64(0)
|
||||
var sizeWritten int64
|
||||
|
||||
// Read data and split into parts - similar to multipart mechanism
|
||||
for partIdx := 1; ; partIdx++ {
|
||||
@ -550,9 +550,10 @@ func (xl xlObjects) PutObject(bucket string, object string, size int64, data io.
|
||||
return ObjectInfo{}, toObjectErr(err, bucket, object)
|
||||
}
|
||||
|
||||
// Prepare file for eventual optimization in the disk
|
||||
// Hint the filesystem to pre-allocate one continuous large block.
|
||||
// This is only an optimization.
|
||||
if curPartSize > 0 {
|
||||
// Calculate the real size of the part in the disk and prepare it for eventual optimization
|
||||
// Calculate the real size of the part in the disk.
|
||||
actualSize := xl.sizeOnDisk(curPartSize, xlMeta.Erasure.BlockSize, xlMeta.Erasure.DataBlocks)
|
||||
for _, disk := range onlineDisks {
|
||||
if disk != nil {
|
||||
|
@ -306,26 +306,34 @@ func readAllXLMetadata(disks []StorageAPI, bucket, object string) ([]xlMetaV1, [
|
||||
return metadataArray, errs
|
||||
}
|
||||
|
||||
// Return ordered partsMetadata depending on distribution.
|
||||
func getOrderedPartsMetadata(distribution []int, partsMetadata []xlMetaV1) (orderedPartsMetadata []xlMetaV1) {
|
||||
orderedPartsMetadata = make([]xlMetaV1, len(partsMetadata))
|
||||
// Return shuffled partsMetadata depending on distribution.
|
||||
func shufflePartsMetadata(partsMetadata []xlMetaV1, distribution []int) (shuffledPartsMetadata []xlMetaV1) {
|
||||
if distribution == nil {
|
||||
return partsMetadata
|
||||
}
|
||||
shuffledPartsMetadata = make([]xlMetaV1, len(partsMetadata))
|
||||
// Shuffle slice xl metadata for expected distribution.
|
||||
for index := range partsMetadata {
|
||||
blockIndex := distribution[index]
|
||||
orderedPartsMetadata[blockIndex-1] = partsMetadata[index]
|
||||
shuffledPartsMetadata[blockIndex-1] = partsMetadata[index]
|
||||
}
|
||||
return orderedPartsMetadata
|
||||
return shuffledPartsMetadata
|
||||
}
|
||||
|
||||
// getOrderedDisks - get ordered disks from erasure distribution.
|
||||
// returns ordered slice of disks from their actual distribution.
|
||||
func getOrderedDisks(distribution []int, disks []StorageAPI) (orderedDisks []StorageAPI) {
|
||||
orderedDisks = make([]StorageAPI, len(disks))
|
||||
// From disks gets ordered disks.
|
||||
// shuffleDisks - shuffle input disks slice depending on the
|
||||
// erasure distribution. return shuffled slice of disks with
|
||||
// their expected distribution.
|
||||
func shuffleDisks(disks []StorageAPI, distribution []int) (shuffledDisks []StorageAPI) {
|
||||
if distribution == nil {
|
||||
return disks
|
||||
}
|
||||
shuffledDisks = make([]StorageAPI, len(disks))
|
||||
// Shuffle disks for expected distribution.
|
||||
for index := range disks {
|
||||
blockIndex := distribution[index]
|
||||
orderedDisks[blockIndex-1] = disks[index]
|
||||
shuffledDisks[blockIndex-1] = disks[index]
|
||||
}
|
||||
return orderedDisks
|
||||
return shuffledDisks
|
||||
}
|
||||
|
||||
// Errors specifically generated by getPartSizeFromIdx function.
|
||||
|
@ -383,3 +383,51 @@ func TestGetPartSizeFromIdx(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestShuffleDisks(t *testing.T) {
|
||||
nDisks := 16
|
||||
disks, err := getRandomDisks(nDisks)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
endpoints, err := parseStorageEndpoints(disks)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
objLayer, _, err := initObjectLayer(endpoints)
|
||||
if err != nil {
|
||||
removeRoots(disks)
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer removeRoots(disks)
|
||||
xl := objLayer.(*xlObjects)
|
||||
testShuffleDisks(t, xl)
|
||||
}
|
||||
|
||||
// Test shuffleDisks which returns shuffled slice of disks for their actual distribution.
|
||||
func testShuffleDisks(t *testing.T, xl *xlObjects) {
|
||||
disks := xl.storageDisks
|
||||
distribution := []int{16, 14, 12, 10, 8, 6, 4, 2, 1, 3, 5, 7, 9, 11, 13, 15}
|
||||
shuffledDisks := shuffleDisks(disks, distribution)
|
||||
// From the "distribution" above you can notice that:
|
||||
// 1st data block is in the 9th disk (i.e distribution index 8)
|
||||
// 2nd data block is in the 8th disk (i.e distribution index 7) and so on.
|
||||
if shuffledDisks[0] != disks[8] ||
|
||||
shuffledDisks[1] != disks[7] ||
|
||||
shuffledDisks[2] != disks[9] ||
|
||||
shuffledDisks[3] != disks[6] ||
|
||||
shuffledDisks[4] != disks[10] ||
|
||||
shuffledDisks[5] != disks[5] ||
|
||||
shuffledDisks[6] != disks[11] ||
|
||||
shuffledDisks[7] != disks[4] ||
|
||||
shuffledDisks[8] != disks[12] ||
|
||||
shuffledDisks[9] != disks[3] ||
|
||||
shuffledDisks[10] != disks[13] ||
|
||||
shuffledDisks[11] != disks[2] ||
|
||||
shuffledDisks[12] != disks[14] ||
|
||||
shuffledDisks[13] != disks[1] ||
|
||||
shuffledDisks[14] != disks[15] ||
|
||||
shuffledDisks[15] != disks[0] {
|
||||
t.Errorf("shuffleDisks returned incorrect order.")
|
||||
}
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user