2016-06-26 06:32:49 -04:00
|
|
|
/*
|
2020-06-12 23:04:01 -04:00
|
|
|
* MinIO Cloud Storage, (C) 2016-2020 MinIO, Inc.
|
2016-06-26 06:32:49 -04:00
|
|
|
*
|
|
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
* you may not use this file except in compliance with the License.
|
|
|
|
* You may obtain a copy of the License at
|
|
|
|
*
|
|
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
*
|
|
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
* See the License for the specific language governing permissions and
|
|
|
|
* limitations under the License.
|
|
|
|
*/
|
|
|
|
|
2016-08-18 19:23:42 -04:00
|
|
|
package cmd
|
2016-06-26 06:32:49 -04:00
|
|
|
|
|
|
|
import (
|
|
|
|
"bytes"
|
2018-04-05 18:04:40 -04:00
|
|
|
"context"
|
2017-08-14 21:08:42 -04:00
|
|
|
"crypto/rand"
|
|
|
|
"io"
|
2017-08-12 22:25:43 -04:00
|
|
|
"os"
|
2016-06-26 06:32:49 -04:00
|
|
|
"testing"
|
|
|
|
)
|
|
|
|
|
2018-08-24 02:35:37 -04:00
|
|
|
var erasureEncodeDecodeTests = []struct {
|
2017-08-14 21:08:42 -04:00
|
|
|
dataBlocks, parityBlocks int
|
|
|
|
missingData, missingParity int
|
|
|
|
reconstructParity bool
|
|
|
|
shouldFail bool
|
|
|
|
}{
|
|
|
|
{dataBlocks: 2, parityBlocks: 2, missingData: 0, missingParity: 0, reconstructParity: true, shouldFail: false},
|
|
|
|
{dataBlocks: 3, parityBlocks: 3, missingData: 1, missingParity: 0, reconstructParity: true, shouldFail: false},
|
|
|
|
{dataBlocks: 4, parityBlocks: 4, missingData: 2, missingParity: 0, reconstructParity: false, shouldFail: false},
|
|
|
|
{dataBlocks: 5, parityBlocks: 5, missingData: 0, missingParity: 1, reconstructParity: true, shouldFail: false},
|
|
|
|
{dataBlocks: 6, parityBlocks: 6, missingData: 0, missingParity: 2, reconstructParity: true, shouldFail: false},
|
|
|
|
{dataBlocks: 7, parityBlocks: 7, missingData: 1, missingParity: 1, reconstructParity: false, shouldFail: false},
|
|
|
|
{dataBlocks: 8, parityBlocks: 8, missingData: 3, missingParity: 2, reconstructParity: false, shouldFail: false},
|
|
|
|
{dataBlocks: 2, parityBlocks: 2, missingData: 2, missingParity: 1, reconstructParity: true, shouldFail: true},
|
|
|
|
{dataBlocks: 4, parityBlocks: 2, missingData: 2, missingParity: 2, reconstructParity: false, shouldFail: true},
|
|
|
|
{dataBlocks: 8, parityBlocks: 4, missingData: 2, missingParity: 2, reconstructParity: false, shouldFail: false},
|
2016-06-26 06:32:49 -04:00
|
|
|
}
|
|
|
|
|
2018-08-24 02:35:37 -04:00
|
|
|
func TestErasureEncodeDecode(t *testing.T) {
|
2017-08-14 21:08:42 -04:00
|
|
|
data := make([]byte, 256)
|
|
|
|
if _, err := io.ReadFull(rand.Reader, data); err != nil {
|
|
|
|
t.Fatalf("Failed to read random data: %v", err)
|
2016-06-26 06:32:49 -04:00
|
|
|
}
|
2018-08-24 02:35:37 -04:00
|
|
|
for i, test := range erasureEncodeDecodeTests {
|
2017-08-14 21:08:42 -04:00
|
|
|
buffer := make([]byte, len(data), 2*len(data))
|
|
|
|
copy(buffer, data)
|
2016-06-26 06:32:49 -04:00
|
|
|
|
[feat]: change erasure coding default block size from 10MiB to 1MiB (#11721)
major performance improvements in range GETs to avoid large
read amplification when ranges are tiny and random
```
-------------------
Operation: GET
Operations: 142014 -> 339421
Duration: 4m50s -> 4m56s
* Average: +139.41% (+1177.3 MiB/s) throughput, +139.11% (+658.4) obj/s
* Fastest: +125.24% (+1207.4 MiB/s) throughput, +132.32% (+612.9) obj/s
* 50% Median: +139.06% (+1175.7 MiB/s) throughput, +133.46% (+660.9) obj/s
* Slowest: +203.40% (+1267.9 MiB/s) throughput, +198.59% (+753.5) obj/s
```
TTFB from 10MiB BlockSize
```
* First Access TTFB: Avg: 81ms, Median: 61ms, Best: 20ms, Worst: 2.056s
```
TTFB from 1MiB BlockSize
```
* First Access TTFB: Avg: 22ms, Median: 21ms, Best: 8ms, Worst: 91ms
```
Full object reads however do see a slight change which won't be
noticeable in real world, so not doing any comparisons
TTFB still had improvements with full object reads with 1MiB
```
* First Access TTFB: Avg: 68ms, Median: 35ms, Best: 11ms, Worst: 1.16s
```
v/s
TTFB with 10MiB
```
* First Access TTFB: Avg: 388ms, Median: 98ms, Best: 20ms, Worst: 4.156s
```
This change should affect all new uploads, previous uploads should
continue to work with business as usual. But dramatic improvements can
be seen with these changes.
2021-03-06 17:09:34 -05:00
|
|
|
erasure, err := NewErasure(context.Background(), test.dataBlocks, test.parityBlocks, blockSizeV2)
|
2017-08-14 21:08:42 -04:00
|
|
|
if err != nil {
|
2018-08-24 02:35:37 -04:00
|
|
|
t.Fatalf("Test %d: failed to create erasure: %v", i, err)
|
2017-08-14 21:08:42 -04:00
|
|
|
}
|
2018-08-24 02:35:37 -04:00
|
|
|
encoded, err := erasure.EncodeData(context.Background(), buffer)
|
2017-08-14 21:08:42 -04:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Test %d: failed to encode data: %v", i, err)
|
|
|
|
}
|
2016-06-26 06:32:49 -04:00
|
|
|
|
2017-08-14 21:08:42 -04:00
|
|
|
for j := range encoded[:test.missingData] {
|
|
|
|
encoded[j] = nil
|
|
|
|
}
|
|
|
|
for j := test.dataBlocks; j < test.dataBlocks+test.missingParity; j++ {
|
|
|
|
encoded[j] = nil
|
|
|
|
}
|
2016-06-26 06:32:49 -04:00
|
|
|
|
2017-08-14 21:08:42 -04:00
|
|
|
if test.reconstructParity {
|
2018-08-24 02:35:37 -04:00
|
|
|
err = erasure.DecodeDataAndParityBlocks(context.Background(), encoded)
|
2017-08-14 21:08:42 -04:00
|
|
|
} else {
|
2018-08-24 02:35:37 -04:00
|
|
|
err = erasure.DecodeDataBlocks(encoded)
|
2017-08-14 21:08:42 -04:00
|
|
|
}
|
2017-08-11 21:24:48 -04:00
|
|
|
|
2017-08-14 21:08:42 -04:00
|
|
|
if err == nil && test.shouldFail {
|
|
|
|
t.Errorf("Test %d: test should fail but it passed", i)
|
|
|
|
}
|
|
|
|
if err != nil && !test.shouldFail {
|
|
|
|
t.Errorf("Test %d: test should pass but it failed: %v", i, err)
|
|
|
|
}
|
2017-08-11 21:24:48 -04:00
|
|
|
|
2017-08-14 21:08:42 -04:00
|
|
|
decoded := encoded
|
|
|
|
if !test.shouldFail {
|
|
|
|
if test.reconstructParity {
|
|
|
|
for j := range decoded {
|
|
|
|
if decoded[j] == nil {
|
|
|
|
t.Errorf("Test %d: failed to reconstruct shard %d", i, j)
|
|
|
|
}
|
2017-08-11 21:24:48 -04:00
|
|
|
}
|
2017-08-14 21:08:42 -04:00
|
|
|
} else {
|
|
|
|
for j := range decoded[:test.dataBlocks] {
|
|
|
|
if decoded[j] == nil {
|
|
|
|
t.Errorf("Test %d: failed to reconstruct data shard %d", i, j)
|
|
|
|
}
|
2017-08-11 21:24:48 -04:00
|
|
|
}
|
2016-06-26 06:32:49 -04:00
|
|
|
}
|
|
|
|
|
2017-08-14 21:08:42 -04:00
|
|
|
decodedData := new(bytes.Buffer)
|
2018-04-05 18:04:40 -04:00
|
|
|
if _, err = writeDataBlocks(context.Background(), decodedData, decoded, test.dataBlocks, 0, int64(len(data))); err != nil {
|
2017-08-14 21:08:42 -04:00
|
|
|
t.Errorf("Test %d: failed to write data blocks: %v", i, err)
|
|
|
|
}
|
|
|
|
if !bytes.Equal(decodedData.Bytes(), data) {
|
|
|
|
t.Errorf("Test %d: Decoded data does not match original data: got: %v want: %v", i, decodedData.Bytes(), data)
|
2016-06-26 06:32:49 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2016-07-14 17:59:01 -04:00
|
|
|
|
2016-07-19 02:56:16 -04:00
|
|
|
// Setup for erasureCreateFile and erasureReadFile tests.
|
|
|
|
type erasureTestSetup struct {
|
|
|
|
dataBlocks int
|
|
|
|
parityBlocks int
|
|
|
|
blockSize int64
|
|
|
|
diskPaths []string
|
|
|
|
disks []StorageAPI
|
2016-07-14 17:59:01 -04:00
|
|
|
}
|
|
|
|
|
2016-07-19 02:56:16 -04:00
|
|
|
// Removes the temporary disk directories.
|
|
|
|
func (e erasureTestSetup) Remove() {
|
|
|
|
for _, path := range e.diskPaths {
|
2017-08-12 22:25:43 -04:00
|
|
|
os.RemoveAll(path)
|
2016-07-19 02:56:16 -04:00
|
|
|
}
|
2016-07-14 17:59:01 -04:00
|
|
|
}
|
|
|
|
|
2016-07-19 02:56:16 -04:00
|
|
|
// Returns an initialized setup for erasure tests.
|
|
|
|
func newErasureTestSetup(dataBlocks int, parityBlocks int, blockSize int64) (*erasureTestSetup, error) {
|
2016-07-14 17:59:01 -04:00
|
|
|
diskPaths := make([]string, dataBlocks+parityBlocks)
|
|
|
|
disks := make([]StorageAPI, len(diskPaths))
|
2016-07-30 04:26:19 -04:00
|
|
|
var err error
|
2016-07-14 17:59:01 -04:00
|
|
|
for i := range diskPaths {
|
2020-06-12 23:04:01 -04:00
|
|
|
disks[i], diskPaths[i], err = newXLStorageTestSetup()
|
2016-07-14 17:59:01 -04:00
|
|
|
if err != nil {
|
2016-07-19 02:56:16 -04:00
|
|
|
return nil, err
|
2016-07-14 17:59:01 -04:00
|
|
|
}
|
2020-09-04 12:45:06 -04:00
|
|
|
err = disks[i].MakeVol(context.Background(), "testbucket")
|
2016-07-14 17:59:01 -04:00
|
|
|
if err != nil {
|
2016-07-19 02:56:16 -04:00
|
|
|
return nil, err
|
2016-07-14 17:59:01 -04:00
|
|
|
}
|
|
|
|
}
|
2016-07-19 02:56:16 -04:00
|
|
|
return &erasureTestSetup{dataBlocks, parityBlocks, blockSize, diskPaths, disks}, nil
|
2016-07-14 17:59:01 -04:00
|
|
|
}
|