2016-11-16 19:42:23 -05:00
|
|
|
/*
|
2020-06-12 23:04:01 -04:00
|
|
|
* MinIO Cloud Storage, (C) 2016-2020 MinIO, Inc.
|
2016-11-16 19:42:23 -05:00
|
|
|
*
|
|
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
* you may not use this file except in compliance with the License.
|
|
|
|
* You may obtain a copy of the License at
|
|
|
|
*
|
|
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
*
|
|
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
* See the License for the specific language governing permissions and
|
|
|
|
* limitations under the License.
|
|
|
|
*/
|
|
|
|
|
|
|
|
package cmd
|
|
|
|
|
|
|
|
import (
|
2017-03-17 12:25:49 -04:00
|
|
|
"bytes"
|
2020-04-14 20:52:38 -04:00
|
|
|
"context"
|
2020-06-12 23:04:01 -04:00
|
|
|
"crypto/rand"
|
|
|
|
"os"
|
|
|
|
"path"
|
|
|
|
"reflect"
|
2016-11-16 19:42:23 -05:00
|
|
|
"testing"
|
2020-06-12 23:04:01 -04:00
|
|
|
"time"
|
2019-03-14 16:08:51 -04:00
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
"github.com/dustin/go-humanize"
|
2019-03-14 16:08:51 -04:00
|
|
|
"github.com/minio/minio/pkg/madmin"
|
2016-11-16 19:42:23 -05:00
|
|
|
)
|
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
// Tests both object and bucket healing.
|
|
|
|
func TestHealing(t *testing.T) {
|
|
|
|
ctx, cancel := context.WithCancel(context.Background())
|
|
|
|
defer cancel()
|
|
|
|
|
|
|
|
obj, fsDirs, err := prepareErasure16(ctx)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2020-09-10 12:18:19 -04:00
|
|
|
defer obj.Shutdown(context.Background())
|
2020-06-12 23:04:01 -04:00
|
|
|
defer removeRoots(fsDirs)
|
|
|
|
|
2020-12-01 16:50:33 -05:00
|
|
|
z := obj.(*erasureServerPools)
|
|
|
|
er := z.serverPools[0].sets[0]
|
2020-06-12 23:04:01 -04:00
|
|
|
|
|
|
|
// Create "bucket"
|
|
|
|
err = obj.MakeBucketWithLocation(ctx, "bucket", BucketOptions{})
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
bucket := "bucket"
|
|
|
|
object := "object"
|
|
|
|
|
|
|
|
data := make([]byte, 1*humanize.MiByte)
|
|
|
|
length := int64(len(data))
|
|
|
|
_, err = rand.Read(data)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
_, err = obj.PutObject(ctx, bucket, object, mustGetPutObjReader(t, bytes.NewReader(data), length, "", ""), ObjectOptions{})
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
disk := er.getDisks()[0]
|
2021-01-02 13:35:57 -05:00
|
|
|
fileInfoPreHeal, err := disk.ReadVersion(context.Background(), bucket, object, "")
|
2020-06-12 23:04:01 -04:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Remove the object - to simulate the case where the disk was down when the object
|
|
|
|
// was created.
|
|
|
|
err = removeAll(pathJoin(disk.String(), bucket, object))
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
_, err = er.HealObject(ctx, bucket, object, "", madmin.HealOpts{ScanMode: madmin.HealNormalScan})
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
2021-01-02 13:35:57 -05:00
|
|
|
fileInfoPostHeal, err := disk.ReadVersion(context.Background(), bucket, object, "")
|
2020-06-12 23:04:01 -04:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// After heal the meta file should be as expected.
|
|
|
|
if !reflect.DeepEqual(fileInfoPreHeal, fileInfoPostHeal) {
|
|
|
|
t.Fatal("HealObject failed")
|
|
|
|
}
|
|
|
|
|
|
|
|
err = os.RemoveAll(path.Join(fsDirs[0], bucket, object, "er.meta"))
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Write er.meta with different modtime to simulate the case where a disk had
|
|
|
|
// gone down when an object was replaced by a new object.
|
|
|
|
fileInfoOutDated := fileInfoPreHeal
|
|
|
|
fileInfoOutDated.ModTime = time.Now()
|
2020-09-04 12:45:06 -04:00
|
|
|
err = disk.WriteMetadata(context.Background(), bucket, object, fileInfoOutDated)
|
2020-06-12 23:04:01 -04:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
_, err = er.HealObject(ctx, bucket, object, "", madmin.HealOpts{ScanMode: madmin.HealDeepScan})
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
2021-01-02 13:35:57 -05:00
|
|
|
fileInfoPostHeal, err = disk.ReadVersion(context.Background(), bucket, object, "")
|
2020-06-12 23:04:01 -04:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// After heal the meta file should be as expected.
|
|
|
|
if !reflect.DeepEqual(fileInfoPreHeal, fileInfoPostHeal) {
|
|
|
|
t.Fatal("HealObject failed")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Remove the bucket - to simulate the case where bucket was
|
|
|
|
// created when the disk was down.
|
|
|
|
err = os.RemoveAll(path.Join(fsDirs[0], bucket))
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
// This would create the bucket.
|
2020-12-13 14:57:08 -05:00
|
|
|
_, err = er.HealBucket(ctx, bucket, madmin.HealOpts{
|
|
|
|
DryRun: false,
|
|
|
|
Remove: false,
|
|
|
|
})
|
2020-06-12 23:04:01 -04:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
// Stat the bucket to make sure that it was created.
|
2020-09-04 12:45:06 -04:00
|
|
|
_, err = er.getDisks()[0].StatVol(context.Background(), bucket)
|
2020-06-12 23:04:01 -04:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-03-26 17:57:44 -04:00
|
|
|
func TestHealObjectCorrupted(t *testing.T) {
|
2020-04-14 20:52:38 -04:00
|
|
|
ctx, cancel := context.WithCancel(context.Background())
|
|
|
|
defer cancel()
|
2020-03-02 19:29:30 -05:00
|
|
|
|
2020-04-14 20:52:38 -04:00
|
|
|
resetGlobalHealState()
|
2020-03-02 19:29:30 -05:00
|
|
|
defer resetGlobalHealState()
|
|
|
|
|
2019-03-26 17:57:44 -04:00
|
|
|
nDisks := 16
|
|
|
|
fsDirs, err := getRandomDisks(nDisks)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
defer removeRoots(fsDirs)
|
|
|
|
|
|
|
|
// Everything is fine, should return nil
|
2020-04-14 20:52:38 -04:00
|
|
|
objLayer, _, err := initObjectLayer(ctx, mustGetZoneEndpoints(fsDirs...))
|
2019-03-26 17:57:44 -04:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
bucket := "bucket"
|
|
|
|
object := "object"
|
|
|
|
data := bytes.Repeat([]byte("a"), 5*1024*1024)
|
|
|
|
var opts ObjectOptions
|
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
err = objLayer.MakeBucketWithLocation(ctx, bucket, BucketOptions{})
|
2019-03-26 17:57:44 -04:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Failed to make a bucket - %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Create an object with multiple parts uploaded in decreasing
|
|
|
|
// part number.
|
2020-04-14 20:52:38 -04:00
|
|
|
uploadID, err := objLayer.NewMultipartUpload(ctx, bucket, object, opts)
|
2019-03-26 17:57:44 -04:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Failed to create a multipart upload - %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
var uploadedParts []CompletePart
|
|
|
|
for _, partID := range []int{2, 1} {
|
2020-04-14 20:52:38 -04:00
|
|
|
pInfo, err1 := objLayer.PutObjectPart(ctx, bucket, object, uploadID, partID, mustGetPutObjReader(t, bytes.NewReader(data), int64(len(data)), "", ""), opts)
|
2019-03-26 17:57:44 -04:00
|
|
|
if err1 != nil {
|
|
|
|
t.Fatalf("Failed to upload a part - %v", err1)
|
|
|
|
}
|
|
|
|
uploadedParts = append(uploadedParts, CompletePart{
|
|
|
|
PartNumber: pInfo.PartNumber,
|
|
|
|
ETag: pInfo.ETag,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2020-04-14 20:52:38 -04:00
|
|
|
_, err = objLayer.CompleteMultipartUpload(ctx, bucket, object, uploadID, uploadedParts, ObjectOptions{})
|
2019-03-26 17:57:44 -04:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Failed to complete multipart upload - %v", err)
|
|
|
|
}
|
|
|
|
|
2019-07-12 19:29:44 -04:00
|
|
|
// Test 1: Remove the object backend files from the first disk.
|
2020-12-01 16:50:33 -05:00
|
|
|
z := objLayer.(*erasureServerPools)
|
|
|
|
er := z.serverPools[0].sets[0]
|
2020-06-12 23:04:01 -04:00
|
|
|
erasureDisks := er.getDisks()
|
|
|
|
firstDisk := erasureDisks[0]
|
2020-10-28 12:18:35 -04:00
|
|
|
err = firstDisk.Delete(context.Background(), bucket, pathJoin(object, xlStorageFormatFile), false)
|
2019-03-26 17:57:44 -04:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Failed to delete a file - %v", err)
|
|
|
|
}
|
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
_, err = objLayer.HealObject(ctx, bucket, object, "", madmin.HealOpts{ScanMode: madmin.HealNormalScan})
|
2019-03-26 17:57:44 -04:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Failed to heal object - %v", err)
|
|
|
|
}
|
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
fileInfos, errs := readAllFileInfo(ctx, erasureDisks, bucket, object, "")
|
|
|
|
fi, err := getLatestFileInfo(ctx, fileInfos, errs)
|
2019-03-26 17:57:44 -04:00
|
|
|
if err != nil {
|
2020-06-12 23:04:01 -04:00
|
|
|
t.Fatalf("Failed to getLatestFileInfo - %v", err)
|
2019-03-26 17:57:44 -04:00
|
|
|
}
|
|
|
|
|
2020-09-04 12:45:06 -04:00
|
|
|
if err = firstDisk.CheckFile(context.Background(), bucket, object); err != nil {
|
2020-06-12 23:04:01 -04:00
|
|
|
t.Errorf("Expected er.meta file to be present but stat failed - %v", err)
|
2019-07-12 19:29:44 -04:00
|
|
|
}
|
2020-06-12 23:04:01 -04:00
|
|
|
|
2020-10-28 12:18:35 -04:00
|
|
|
err = firstDisk.Delete(context.Background(), bucket, pathJoin(object, fi.DataDir, "part.1"), false)
|
2019-07-12 19:29:44 -04:00
|
|
|
if err != nil {
|
2019-10-01 16:12:15 -04:00
|
|
|
t.Errorf("Failure during deleting part.1 - %v", err)
|
2019-07-12 19:29:44 -04:00
|
|
|
}
|
2020-06-12 23:04:01 -04:00
|
|
|
|
2020-11-02 19:14:31 -05:00
|
|
|
err = firstDisk.WriteAll(context.Background(), bucket, pathJoin(object, fi.DataDir, "part.1"), []byte{})
|
2019-07-12 19:29:44 -04:00
|
|
|
if err != nil {
|
|
|
|
t.Errorf("Failure during creating part.1 - %v", err)
|
|
|
|
}
|
2020-06-12 23:04:01 -04:00
|
|
|
|
|
|
|
_, err = objLayer.HealObject(ctx, bucket, object, "", madmin.HealOpts{DryRun: false, Remove: true, ScanMode: madmin.HealDeepScan})
|
2019-07-12 19:29:44 -04:00
|
|
|
if err != nil {
|
|
|
|
t.Errorf("Expected nil but received %v", err)
|
|
|
|
}
|
2020-06-12 23:04:01 -04:00
|
|
|
|
|
|
|
fileInfos, errs = readAllFileInfo(ctx, erasureDisks, bucket, object, "")
|
|
|
|
nfi, err := getLatestFileInfo(ctx, fileInfos, errs)
|
2019-07-12 19:29:44 -04:00
|
|
|
if err != nil {
|
2020-06-12 23:04:01 -04:00
|
|
|
t.Fatalf("Failed to getLatestFileInfo - %v", err)
|
2019-07-12 19:29:44 -04:00
|
|
|
}
|
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
if !reflect.DeepEqual(fi, nfi) {
|
|
|
|
t.Fatalf("FileInfo not equal after healing")
|
2019-10-01 16:12:15 -04:00
|
|
|
}
|
2020-06-12 23:04:01 -04:00
|
|
|
|
2020-10-28 12:18:35 -04:00
|
|
|
err = firstDisk.Delete(context.Background(), bucket, pathJoin(object, fi.DataDir, "part.1"), false)
|
2019-10-01 16:12:15 -04:00
|
|
|
if err != nil {
|
|
|
|
t.Errorf("Failure during deleting part.1 - %v", err)
|
|
|
|
}
|
2020-06-12 23:04:01 -04:00
|
|
|
|
|
|
|
bdata := bytes.Repeat([]byte("b"), int(nfi.Size))
|
2020-11-02 19:14:31 -05:00
|
|
|
err = firstDisk.WriteAll(context.Background(), bucket, pathJoin(object, fi.DataDir, "part.1"), bdata)
|
2019-10-01 16:12:15 -04:00
|
|
|
if err != nil {
|
|
|
|
t.Errorf("Failure during creating part.1 - %v", err)
|
|
|
|
}
|
2020-06-12 23:04:01 -04:00
|
|
|
|
|
|
|
_, err = objLayer.HealObject(ctx, bucket, object, "", madmin.HealOpts{DryRun: false, Remove: true, ScanMode: madmin.HealDeepScan})
|
2019-10-01 16:12:15 -04:00
|
|
|
if err != nil {
|
|
|
|
t.Errorf("Expected nil but received %v", err)
|
|
|
|
}
|
2020-06-12 23:04:01 -04:00
|
|
|
|
|
|
|
fileInfos, errs = readAllFileInfo(ctx, erasureDisks, bucket, object, "")
|
|
|
|
nfi, err = getLatestFileInfo(ctx, fileInfos, errs)
|
2019-10-01 16:12:15 -04:00
|
|
|
if err != nil {
|
2020-06-12 23:04:01 -04:00
|
|
|
t.Fatalf("Failed to getLatestFileInfo - %v", err)
|
2019-10-01 16:12:15 -04:00
|
|
|
}
|
2020-06-12 23:04:01 -04:00
|
|
|
|
|
|
|
if !reflect.DeepEqual(fi, nfi) {
|
|
|
|
t.Fatalf("FileInfo not equal after healing")
|
2019-10-01 16:12:15 -04:00
|
|
|
}
|
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
// Test 4: checks if HealObject returns an error when xl.meta is not found
|
2019-07-12 19:29:44 -04:00
|
|
|
// in more than read quorum number of disks, to create a corrupted situation.
|
2020-12-01 14:59:03 -05:00
|
|
|
for i := 0; i <= len(er.getDisks())/2; i++ {
|
|
|
|
er.getDisks()[i].Delete(context.Background(), bucket, pathJoin(object, xlStorageFormatFile), false)
|
2019-03-26 17:57:44 -04:00
|
|
|
}
|
|
|
|
|
2019-11-21 16:18:32 -05:00
|
|
|
// Try healing now, expect to receive errFileNotFound.
|
2020-06-12 23:04:01 -04:00
|
|
|
_, err = objLayer.HealObject(ctx, bucket, object, "", madmin.HealOpts{DryRun: false, Remove: true, ScanMode: madmin.HealDeepScan})
|
2019-03-26 17:57:44 -04:00
|
|
|
if err != nil {
|
2019-11-21 16:18:32 -05:00
|
|
|
if _, ok := err.(ObjectNotFound); !ok {
|
|
|
|
t.Errorf("Expect %v but received %v", ObjectNotFound{Bucket: bucket, Object: object}, err)
|
|
|
|
}
|
2019-03-26 17:57:44 -04:00
|
|
|
}
|
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
// since majority of xl.meta's are not available, object should be successfully deleted.
|
2020-04-14 20:52:38 -04:00
|
|
|
_, err = objLayer.GetObjectInfo(ctx, bucket, object, ObjectOptions{})
|
2019-03-26 17:57:44 -04:00
|
|
|
if _, ok := err.(ObjectNotFound); !ok {
|
|
|
|
t.Errorf("Expect %v but received %v", ObjectNotFound{Bucket: bucket, Object: object}, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-03-17 12:25:49 -04:00
|
|
|
// Tests healing of object.
|
2020-06-12 23:04:01 -04:00
|
|
|
func TestHealObjectErasure(t *testing.T) {
|
2020-04-14 20:52:38 -04:00
|
|
|
ctx, cancel := context.WithCancel(context.Background())
|
|
|
|
defer cancel()
|
|
|
|
|
2017-03-17 12:25:49 -04:00
|
|
|
nDisks := 16
|
|
|
|
fsDirs, err := getRandomDisks(nDisks)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
defer removeRoots(fsDirs)
|
|
|
|
|
|
|
|
// Everything is fine, should return nil
|
2020-04-14 20:52:38 -04:00
|
|
|
obj, _, err := initObjectLayer(ctx, mustGetZoneEndpoints(fsDirs...))
|
2017-03-17 12:25:49 -04:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
bucket := "bucket"
|
|
|
|
object := "object"
|
2017-03-22 13:15:16 -04:00
|
|
|
data := bytes.Repeat([]byte("a"), 5*1024*1024)
|
2018-09-10 12:42:43 -04:00
|
|
|
var opts ObjectOptions
|
2017-03-22 13:15:16 -04:00
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
err = obj.MakeBucketWithLocation(ctx, bucket, BucketOptions{})
|
2017-03-17 12:25:49 -04:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Failed to make a bucket - %v", err)
|
|
|
|
}
|
|
|
|
|
2017-03-22 13:15:16 -04:00
|
|
|
// Create an object with multiple parts uploaded in decreasing
|
|
|
|
// part number.
|
2020-04-14 20:52:38 -04:00
|
|
|
uploadID, err := obj.NewMultipartUpload(ctx, bucket, object, opts)
|
2017-03-22 13:15:16 -04:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Failed to create a multipart upload - %v", err)
|
|
|
|
}
|
|
|
|
|
2017-11-14 03:25:10 -05:00
|
|
|
var uploadedParts []CompletePart
|
2017-03-22 13:15:16 -04:00
|
|
|
for _, partID := range []int{2, 1} {
|
2020-04-14 20:52:38 -04:00
|
|
|
pInfo, err1 := obj.PutObjectPart(ctx, bucket, object, uploadID, partID, mustGetPutObjReader(t, bytes.NewReader(data), int64(len(data)), "", ""), opts)
|
2017-04-01 04:06:06 -04:00
|
|
|
if err1 != nil {
|
|
|
|
t.Fatalf("Failed to upload a part - %v", err1)
|
2017-03-22 13:15:16 -04:00
|
|
|
}
|
2017-11-14 03:25:10 -05:00
|
|
|
uploadedParts = append(uploadedParts, CompletePart{
|
2017-03-22 13:15:16 -04:00
|
|
|
PartNumber: pInfo.PartNumber,
|
|
|
|
ETag: pInfo.ETag,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
// Remove the object backend files from the first disk.
|
2020-12-01 16:50:33 -05:00
|
|
|
z := obj.(*erasureServerPools)
|
|
|
|
er := z.serverPools[0].sets[0]
|
2020-06-12 23:04:01 -04:00
|
|
|
firstDisk := er.getDisks()[0]
|
|
|
|
|
2020-04-14 20:52:38 -04:00
|
|
|
_, err = obj.CompleteMultipartUpload(ctx, bucket, object, uploadID, uploadedParts, ObjectOptions{})
|
2017-03-17 12:25:49 -04:00
|
|
|
if err != nil {
|
2017-03-22 13:15:16 -04:00
|
|
|
t.Fatalf("Failed to complete multipart upload - %v", err)
|
2017-03-17 12:25:49 -04:00
|
|
|
}
|
|
|
|
|
2020-10-28 12:18:35 -04:00
|
|
|
err = firstDisk.Delete(context.Background(), bucket, pathJoin(object, xlStorageFormatFile), false)
|
2017-03-17 12:25:49 -04:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Failed to delete a file - %v", err)
|
|
|
|
}
|
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
_, err = obj.HealObject(ctx, bucket, object, "", madmin.HealOpts{ScanMode: madmin.HealNormalScan})
|
2017-03-17 12:25:49 -04:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Failed to heal object - %v", err)
|
|
|
|
}
|
|
|
|
|
2020-09-04 12:45:06 -04:00
|
|
|
if err = firstDisk.CheckFile(context.Background(), bucket, object); err != nil {
|
2020-06-12 23:04:01 -04:00
|
|
|
t.Errorf("Expected er.meta file to be present but stat failed - %v", err)
|
2017-03-17 12:25:49 -04:00
|
|
|
}
|
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
erasureDisks := er.getDisks()
|
2020-12-01 16:50:33 -05:00
|
|
|
z.serverPools[0].erasureDisksMu.Lock()
|
2020-06-12 23:04:01 -04:00
|
|
|
er.getDisks = func() []StorageAPI {
|
2019-11-19 20:42:27 -05:00
|
|
|
// Nil more than half the disks, to remove write quorum.
|
2020-06-12 23:04:01 -04:00
|
|
|
for i := 0; i <= len(erasureDisks)/2; i++ {
|
|
|
|
erasureDisks[i] = nil
|
2019-11-19 20:42:27 -05:00
|
|
|
}
|
2020-06-12 23:04:01 -04:00
|
|
|
return erasureDisks
|
2017-03-17 12:25:49 -04:00
|
|
|
}
|
2020-12-01 16:50:33 -05:00
|
|
|
z.serverPools[0].erasureDisksMu.Unlock()
|
2017-03-17 12:25:49 -04:00
|
|
|
|
|
|
|
// Try healing now, expect to receive errDiskNotFound.
|
2020-06-12 23:04:01 -04:00
|
|
|
_, err = obj.HealObject(ctx, bucket, object, "", madmin.HealOpts{ScanMode: madmin.HealDeepScan})
|
|
|
|
// since majority of er.meta's are not available, object quorum can't be read properly and error will be errErasureReadQuorum
|
2018-04-25 14:56:39 -04:00
|
|
|
if _, ok := err.(InsufficientReadQuorum); !ok {
|
2018-07-31 03:23:29 -04:00
|
|
|
t.Errorf("Expected %v but received %v", InsufficientReadQuorum{}, err)
|
2017-03-17 12:25:49 -04:00
|
|
|
}
|
|
|
|
}
|
2019-08-01 17:13:06 -04:00
|
|
|
|
|
|
|
// Tests healing of empty directories
|
2020-06-12 23:04:01 -04:00
|
|
|
func TestHealEmptyDirectoryErasure(t *testing.T) {
|
2020-04-14 20:52:38 -04:00
|
|
|
ctx, cancel := context.WithCancel(context.Background())
|
|
|
|
defer cancel()
|
|
|
|
|
2019-08-01 17:13:06 -04:00
|
|
|
nDisks := 16
|
|
|
|
fsDirs, err := getRandomDisks(nDisks)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
defer removeRoots(fsDirs)
|
|
|
|
|
|
|
|
// Everything is fine, should return nil
|
2020-04-14 20:52:38 -04:00
|
|
|
obj, _, err := initObjectLayer(ctx, mustGetZoneEndpoints(fsDirs...))
|
2019-08-01 17:13:06 -04:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
bucket := "bucket"
|
|
|
|
object := "empty-dir/"
|
|
|
|
var opts ObjectOptions
|
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
err = obj.MakeBucketWithLocation(ctx, bucket, BucketOptions{})
|
2019-08-01 17:13:06 -04:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Failed to make a bucket - %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Upload an empty directory
|
2020-04-14 20:52:38 -04:00
|
|
|
_, err = obj.PutObject(ctx, bucket, object, mustGetPutObjReader(t,
|
2019-11-19 20:42:27 -05:00
|
|
|
bytes.NewReader([]byte{}), 0, "", ""), opts)
|
2019-08-01 17:13:06 -04:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Remove the object backend files from the first disk.
|
2020-12-01 16:50:33 -05:00
|
|
|
z := obj.(*erasureServerPools)
|
|
|
|
er := z.serverPools[0].sets[0]
|
2020-06-12 23:04:01 -04:00
|
|
|
firstDisk := er.getDisks()[0]
|
2020-09-19 11:39:41 -04:00
|
|
|
err = firstDisk.DeleteVol(context.Background(), pathJoin(bucket, encodeDirObject(object)), true)
|
2019-08-01 17:13:06 -04:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Failed to delete a file - %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Heal the object
|
2020-06-12 23:04:01 -04:00
|
|
|
hr, err := obj.HealObject(ctx, bucket, object, "", madmin.HealOpts{ScanMode: madmin.HealNormalScan})
|
2019-08-01 17:13:06 -04:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Failed to heal object - %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check if the empty directory is restored in the first disk
|
2020-09-19 11:39:41 -04:00
|
|
|
_, err = firstDisk.StatVol(context.Background(), pathJoin(bucket, encodeDirObject(object)))
|
2019-08-01 17:13:06 -04:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Expected object to be present but stat failed - %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check the state of the object in the first disk (should be missing)
|
|
|
|
if hr.Before.Drives[0].State != madmin.DriveStateMissing {
|
|
|
|
t.Fatalf("Unexpected drive state: %v", hr.Before.Drives[0].State)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check the state of all other disks (should be ok)
|
|
|
|
for i, h := range append(hr.Before.Drives[1:], hr.After.Drives...) {
|
|
|
|
if h.State != madmin.DriveStateOk {
|
|
|
|
t.Fatalf("Unexpected drive state (%d): %v", i+1, h.State)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Heal the same object again
|
2020-06-12 23:04:01 -04:00
|
|
|
hr, err = obj.HealObject(ctx, bucket, object, "", madmin.HealOpts{ScanMode: madmin.HealNormalScan})
|
2019-08-01 17:13:06 -04:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Failed to heal object - %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check that Before & After states are all okay
|
|
|
|
for i, h := range append(hr.Before.Drives, hr.After.Drives...) {
|
|
|
|
if h.State != madmin.DriveStateOk {
|
|
|
|
t.Fatalf("Unexpected drive state (%d): %v", i+1, h.State)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|