mirror of
https://github.com/minio/minio.git
synced 2024-12-25 22:55:54 -05:00
8975da4e84
This is an enhancement to the XL/distributed-XL mode. FS mode is unaffected. The ReadFileWithVerify storage-layer call is similar to ReadFile with the additional functionality of performing bit-rot checking. It accepts additional parameters for a hashing algorithm to use and the expected hex-encoded hash string. This patch provides significant performance improvement because: 1. combines the step of reading the file (during erasure-decoding/reconstruction) with bit-rot verification; 2. limits the number of file-reads; and 3. avoids transferring the file over the network for bit-rot verification. ReadFile API is implemented as ReadFileWithVerify with empty hashing arguments. Credits to AB and Harsha for the algorithmic improvement. Fixes #4236.
426 lines
11 KiB
Go
426 lines
11 KiB
Go
/*
|
|
* Minio Cloud Storage, (C) 2016, 2017 Minio, Inc.
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
package cmd
|
|
|
|
import (
|
|
"bytes"
|
|
"crypto/sha256"
|
|
"encoding/hex"
|
|
"reflect"
|
|
"testing"
|
|
"time"
|
|
)
|
|
|
|
// Tests retry storage.
|
|
func TestRetryStorage(t *testing.T) {
|
|
root, err := newTestConfig(globalMinioDefaultRegion)
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
defer removeAll(root)
|
|
|
|
originalStorageDisks, disks := prepareXLStorageDisks(t)
|
|
defer removeRoots(disks)
|
|
|
|
var storageDisks = make([]StorageAPI, len(originalStorageDisks))
|
|
for i := range originalStorageDisks {
|
|
retryDisk, ok := originalStorageDisks[i].(*retryStorage)
|
|
if !ok {
|
|
t.Fatal("storage disk is not *retryStorage type")
|
|
}
|
|
storageDisks[i] = &retryStorage{
|
|
remoteStorage: newNaughtyDisk(retryDisk, map[int]error{
|
|
1: errDiskNotFound,
|
|
}, nil),
|
|
maxRetryAttempts: 1,
|
|
retryUnit: time.Millisecond,
|
|
retryCap: time.Millisecond * 10,
|
|
}
|
|
}
|
|
|
|
// Validate all the conditions for retrying calls.
|
|
|
|
storageDisks = make([]StorageAPI, len(originalStorageDisks))
|
|
for i := range originalStorageDisks {
|
|
retryDisk, ok := originalStorageDisks[i].(*retryStorage)
|
|
if !ok {
|
|
t.Fatal("storage disk is not *retryStorage type")
|
|
}
|
|
storageDisks[i] = &retryStorage{
|
|
remoteStorage: newNaughtyDisk(retryDisk, map[int]error{
|
|
1: errDiskNotFound,
|
|
}, nil),
|
|
maxRetryAttempts: 1,
|
|
retryUnit: time.Millisecond,
|
|
retryCap: time.Millisecond * 10,
|
|
}
|
|
}
|
|
|
|
for _, disk := range storageDisks {
|
|
err = disk.Init()
|
|
if err != errDiskNotFound {
|
|
t.Fatal("Expected errDiskNotFound, got", err)
|
|
}
|
|
}
|
|
|
|
for _, disk := range storageDisks {
|
|
_, err = disk.DiskInfo()
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
}
|
|
|
|
storageDisks = make([]StorageAPI, len(originalStorageDisks))
|
|
for i := range originalStorageDisks {
|
|
retryDisk, ok := originalStorageDisks[i].(*retryStorage)
|
|
if !ok {
|
|
t.Fatal("storage disk is not *retryStorage type")
|
|
}
|
|
storageDisks[i] = &retryStorage{
|
|
remoteStorage: newNaughtyDisk(retryDisk, map[int]error{
|
|
1: errDiskNotFound,
|
|
}, nil),
|
|
maxRetryAttempts: 1,
|
|
retryUnit: time.Millisecond,
|
|
retryCap: time.Millisecond * 10,
|
|
}
|
|
}
|
|
|
|
for _, disk := range storageDisks {
|
|
if err = disk.MakeVol("existent"); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if _, err = disk.StatVol("existent"); err == errVolumeNotFound {
|
|
t.Fatal(err)
|
|
}
|
|
}
|
|
|
|
storageDisks = make([]StorageAPI, len(originalStorageDisks))
|
|
for i := range originalStorageDisks {
|
|
retryDisk, ok := originalStorageDisks[i].(*retryStorage)
|
|
if !ok {
|
|
t.Fatal("storage disk is not *retryStorage type")
|
|
}
|
|
storageDisks[i] = &retryStorage{
|
|
remoteStorage: newNaughtyDisk(retryDisk, map[int]error{
|
|
1: errDiskNotFound,
|
|
}, nil),
|
|
maxRetryAttempts: 1,
|
|
retryUnit: time.Millisecond,
|
|
retryCap: time.Millisecond * 10,
|
|
}
|
|
}
|
|
|
|
for _, disk := range storageDisks {
|
|
if _, err = disk.StatVol("existent"); err == errVolumeNotFound {
|
|
t.Fatal(err)
|
|
}
|
|
}
|
|
|
|
storageDisks = make([]StorageAPI, len(originalStorageDisks))
|
|
for i := range originalStorageDisks {
|
|
retryDisk, ok := originalStorageDisks[i].(*retryStorage)
|
|
if !ok {
|
|
t.Fatal("storage disk is not *retryStorage type")
|
|
}
|
|
storageDisks[i] = &retryStorage{
|
|
remoteStorage: newNaughtyDisk(retryDisk, map[int]error{
|
|
1: errDiskNotFound,
|
|
}, nil),
|
|
maxRetryAttempts: 1,
|
|
retryUnit: time.Millisecond,
|
|
retryCap: time.Millisecond * 10,
|
|
}
|
|
}
|
|
|
|
for _, disk := range storageDisks {
|
|
if _, err = disk.ListVols(); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
}
|
|
|
|
storageDisks = make([]StorageAPI, len(originalStorageDisks))
|
|
for i := range originalStorageDisks {
|
|
retryDisk, ok := originalStorageDisks[i].(*retryStorage)
|
|
if !ok {
|
|
t.Fatal("storage disk is not *retryStorage type")
|
|
}
|
|
storageDisks[i] = &retryStorage{
|
|
remoteStorage: newNaughtyDisk(retryDisk, map[int]error{
|
|
1: errDiskNotFound,
|
|
}, nil),
|
|
maxRetryAttempts: 1,
|
|
retryUnit: time.Millisecond,
|
|
retryCap: time.Millisecond * 10,
|
|
}
|
|
}
|
|
|
|
for _, disk := range storageDisks {
|
|
if err = disk.DeleteVol("existent"); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if str := disk.String(); str == "" {
|
|
t.Fatal("String method for disk cannot be empty.")
|
|
}
|
|
}
|
|
|
|
storageDisks = make([]StorageAPI, len(originalStorageDisks))
|
|
for i := range originalStorageDisks {
|
|
retryDisk, ok := originalStorageDisks[i].(*retryStorage)
|
|
if !ok {
|
|
t.Fatal("storage disk is not *retryStorage type")
|
|
}
|
|
storageDisks[i] = &retryStorage{
|
|
remoteStorage: newNaughtyDisk(retryDisk, map[int]error{
|
|
1: errDiskNotFound,
|
|
}, nil),
|
|
maxRetryAttempts: 1,
|
|
retryUnit: time.Millisecond,
|
|
retryCap: time.Millisecond * 10,
|
|
}
|
|
}
|
|
|
|
for _, disk := range storageDisks {
|
|
if err = disk.MakeVol("existent"); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
}
|
|
|
|
storageDisks = make([]StorageAPI, len(originalStorageDisks))
|
|
for i := range originalStorageDisks {
|
|
retryDisk, ok := originalStorageDisks[i].(*retryStorage)
|
|
if !ok {
|
|
t.Fatal("storage disk is not *retryStorage type")
|
|
}
|
|
storageDisks[i] = &retryStorage{
|
|
remoteStorage: newNaughtyDisk(retryDisk, map[int]error{
|
|
1: errDiskNotFound,
|
|
}, nil),
|
|
maxRetryAttempts: 1,
|
|
retryUnit: time.Millisecond,
|
|
retryCap: time.Millisecond * 10,
|
|
}
|
|
}
|
|
|
|
for _, disk := range storageDisks {
|
|
if err = disk.PrepareFile("existent", "path", 10); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
}
|
|
|
|
storageDisks = make([]StorageAPI, len(originalStorageDisks))
|
|
for i := range originalStorageDisks {
|
|
retryDisk, ok := originalStorageDisks[i].(*retryStorage)
|
|
if !ok {
|
|
t.Fatal("storage disk is not *retryStorage type")
|
|
}
|
|
storageDisks[i] = &retryStorage{
|
|
remoteStorage: newNaughtyDisk(retryDisk, map[int]error{
|
|
1: errDiskNotFound,
|
|
}, nil),
|
|
maxRetryAttempts: 1,
|
|
retryUnit: time.Millisecond,
|
|
retryCap: time.Millisecond * 10,
|
|
}
|
|
}
|
|
|
|
for _, disk := range storageDisks {
|
|
if err = disk.AppendFile("existent", "path", []byte("Hello, World")); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
}
|
|
|
|
storageDisks = make([]StorageAPI, len(originalStorageDisks))
|
|
for i := range originalStorageDisks {
|
|
retryDisk, ok := originalStorageDisks[i].(*retryStorage)
|
|
if !ok {
|
|
t.Fatal("storage disk is not *retryStorage type")
|
|
}
|
|
storageDisks[i] = &retryStorage{
|
|
remoteStorage: newNaughtyDisk(retryDisk, map[int]error{
|
|
1: errDiskNotFound,
|
|
}, nil),
|
|
maxRetryAttempts: 1,
|
|
retryUnit: time.Millisecond,
|
|
retryCap: time.Millisecond * 10,
|
|
}
|
|
}
|
|
|
|
for _, disk := range storageDisks {
|
|
var buf1 []byte
|
|
if buf1, err = disk.ReadAll("existent", "path"); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if !bytes.Equal(buf1, []byte("Hello, World")) {
|
|
t.Fatalf("Expected `Hello, World`, got %s", string(buf1))
|
|
}
|
|
}
|
|
|
|
storageDisks = make([]StorageAPI, len(originalStorageDisks))
|
|
for i := range originalStorageDisks {
|
|
retryDisk, ok := originalStorageDisks[i].(*retryStorage)
|
|
if !ok {
|
|
t.Fatal("storage disk is not *retryStorage type")
|
|
}
|
|
storageDisks[i] = &retryStorage{
|
|
remoteStorage: newNaughtyDisk(retryDisk, map[int]error{
|
|
1: errDiskNotFound,
|
|
}, nil),
|
|
maxRetryAttempts: 1,
|
|
retryUnit: time.Millisecond,
|
|
retryCap: time.Millisecond * 10,
|
|
}
|
|
}
|
|
|
|
for _, disk := range storageDisks {
|
|
var buf2 = make([]byte, 5)
|
|
var n int64
|
|
if n, err = disk.ReadFile("existent", "path", 7, buf2); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if err != nil {
|
|
t.Error("Error in ReadFile", err)
|
|
}
|
|
if n != 5 {
|
|
t.Fatalf("Expected 5, got %d", n)
|
|
}
|
|
if !bytes.Equal(buf2, []byte("World")) {
|
|
t.Fatalf("Expected `World`, got %s", string(buf2))
|
|
}
|
|
}
|
|
|
|
sha256Hash := func(s string) string {
|
|
k := sha256.Sum256([]byte(s))
|
|
return hex.EncodeToString(k[:])
|
|
}
|
|
for _, disk := range storageDisks {
|
|
var buf2 = make([]byte, 5)
|
|
var n int64
|
|
if n, err = disk.ReadFileWithVerify("existent", "path", 7, buf2,
|
|
HashSha256, sha256Hash("Hello, World")); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if err != nil {
|
|
t.Error("Error in ReadFileWithVerify", err)
|
|
}
|
|
if n != 5 {
|
|
t.Fatalf("Expected 5, got %d", n)
|
|
}
|
|
if !bytes.Equal(buf2, []byte("World")) {
|
|
t.Fatalf("Expected `World`, got %s", string(buf2))
|
|
}
|
|
}
|
|
|
|
storageDisks = make([]StorageAPI, len(originalStorageDisks))
|
|
for i := range originalStorageDisks {
|
|
retryDisk, ok := originalStorageDisks[i].(*retryStorage)
|
|
if !ok {
|
|
t.Fatal("storage disk is not *retryStorage type")
|
|
}
|
|
storageDisks[i] = &retryStorage{
|
|
remoteStorage: newNaughtyDisk(retryDisk, map[int]error{
|
|
1: errDiskNotFound,
|
|
}, nil),
|
|
maxRetryAttempts: 1,
|
|
retryUnit: time.Millisecond,
|
|
retryCap: time.Millisecond * 10,
|
|
}
|
|
}
|
|
|
|
for _, disk := range storageDisks {
|
|
if err = disk.RenameFile("existent", "path", "existent", "new-path"); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if _, err = disk.StatFile("existent", "new-path"); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
}
|
|
|
|
storageDisks = make([]StorageAPI, len(originalStorageDisks))
|
|
for i := range originalStorageDisks {
|
|
retryDisk, ok := originalStorageDisks[i].(*retryStorage)
|
|
if !ok {
|
|
t.Fatal("storage disk is not *retryStorage type")
|
|
}
|
|
storageDisks[i] = &retryStorage{
|
|
remoteStorage: newNaughtyDisk(retryDisk, map[int]error{
|
|
1: errDiskNotFound,
|
|
}, nil),
|
|
maxRetryAttempts: 1,
|
|
retryUnit: time.Millisecond,
|
|
retryCap: time.Millisecond * 10,
|
|
}
|
|
}
|
|
|
|
for _, disk := range storageDisks {
|
|
if _, err = disk.StatFile("existent", "new-path"); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
}
|
|
|
|
storageDisks = make([]StorageAPI, len(originalStorageDisks))
|
|
for i := range originalStorageDisks {
|
|
retryDisk, ok := originalStorageDisks[i].(*retryStorage)
|
|
if !ok {
|
|
t.Fatal("storage disk is not *retryStorage type")
|
|
}
|
|
storageDisks[i] = &retryStorage{
|
|
remoteStorage: newNaughtyDisk(retryDisk, map[int]error{
|
|
1: errDiskNotFound,
|
|
}, nil),
|
|
maxRetryAttempts: 1,
|
|
retryUnit: time.Millisecond,
|
|
retryCap: time.Millisecond * 10,
|
|
}
|
|
}
|
|
|
|
for _, disk := range storageDisks {
|
|
var entries []string
|
|
if entries, err = disk.ListDir("existent", ""); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if !reflect.DeepEqual(entries, []string{"new-path"}) {
|
|
t.Fatalf("Expected []string{\"new-path\"}, got %s", entries)
|
|
}
|
|
}
|
|
|
|
storageDisks = make([]StorageAPI, len(originalStorageDisks))
|
|
for i := range originalStorageDisks {
|
|
retryDisk, ok := originalStorageDisks[i].(*retryStorage)
|
|
if !ok {
|
|
t.Fatal("storage disk is not *retryStorage type")
|
|
}
|
|
storageDisks[i] = &retryStorage{
|
|
remoteStorage: newNaughtyDisk(retryDisk, map[int]error{
|
|
1: errDiskNotFound,
|
|
}, nil),
|
|
maxRetryAttempts: 1,
|
|
retryUnit: time.Millisecond,
|
|
retryCap: time.Millisecond * 10,
|
|
}
|
|
}
|
|
|
|
for _, disk := range storageDisks {
|
|
if err = disk.DeleteFile("existent", "new-path"); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if err = disk.DeleteVol("existent"); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
}
|
|
}
|