2016-11-16 19:42:23 -05:00
|
|
|
/*
|
2019-04-09 14:39:42 -04:00
|
|
|
* MinIO Cloud Storage, (C) 2016, 2017 MinIO, Inc.
|
2016-11-16 19:42:23 -05:00
|
|
|
*
|
|
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
* you may not use this file except in compliance with the License.
|
|
|
|
* You may obtain a copy of the License at
|
|
|
|
*
|
|
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
*
|
|
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
* See the License for the specific language governing permissions and
|
|
|
|
* limitations under the License.
|
|
|
|
*/
|
|
|
|
|
|
|
|
package cmd
|
|
|
|
|
|
|
|
import (
|
2017-03-17 12:25:49 -04:00
|
|
|
"bytes"
|
2018-03-15 16:27:16 -04:00
|
|
|
"context"
|
2017-03-17 12:25:49 -04:00
|
|
|
"path/filepath"
|
2016-11-16 19:42:23 -05:00
|
|
|
"testing"
|
2019-03-14 16:08:51 -04:00
|
|
|
|
|
|
|
"github.com/minio/minio/pkg/madmin"
|
2016-11-16 19:42:23 -05:00
|
|
|
)
|
|
|
|
|
|
|
|
// Tests undoes and validates if the undoing completes successfully.
|
|
|
|
func TestUndoMakeBucket(t *testing.T) {
|
|
|
|
nDisks := 16
|
|
|
|
fsDirs, err := getRandomDisks(nDisks)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
defer removeRoots(fsDirs)
|
|
|
|
|
|
|
|
// Remove format.json on 16 disks.
|
2017-04-11 18:44:27 -04:00
|
|
|
obj, _, err := initObjectLayer(mustGetNewEndpointList(fsDirs...))
|
2016-11-16 19:42:23 -05:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
bucketName := getRandomBucketName()
|
2018-03-15 16:27:16 -04:00
|
|
|
if err = obj.MakeBucketWithLocation(context.Background(), bucketName, ""); err != nil {
|
2016-11-16 19:42:23 -05:00
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
xl := obj.(*xlObjects)
|
|
|
|
undoMakeBucket(xl.storageDisks, bucketName)
|
|
|
|
|
|
|
|
// Validate if bucket was deleted properly.
|
2018-03-15 16:27:16 -04:00
|
|
|
_, err = obj.GetBucketInfo(context.Background(), bucketName)
|
2016-11-16 19:42:23 -05:00
|
|
|
if err != nil {
|
|
|
|
switch err.(type) {
|
|
|
|
case BucketNotFound:
|
|
|
|
default:
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-03-26 17:57:44 -04:00
|
|
|
func TestHealObjectCorrupted(t *testing.T) {
|
|
|
|
nDisks := 16
|
|
|
|
fsDirs, err := getRandomDisks(nDisks)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
defer removeRoots(fsDirs)
|
|
|
|
|
|
|
|
// Everything is fine, should return nil
|
|
|
|
obj, _, err := initObjectLayer(mustGetNewEndpointList(fsDirs...))
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
bucket := "bucket"
|
|
|
|
object := "object"
|
|
|
|
data := bytes.Repeat([]byte("a"), 5*1024*1024)
|
|
|
|
var opts ObjectOptions
|
|
|
|
|
|
|
|
err = obj.MakeBucketWithLocation(context.Background(), bucket, "")
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Failed to make a bucket - %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Create an object with multiple parts uploaded in decreasing
|
|
|
|
// part number.
|
|
|
|
uploadID, err := obj.NewMultipartUpload(context.Background(), bucket, object, opts)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Failed to create a multipart upload - %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
var uploadedParts []CompletePart
|
|
|
|
for _, partID := range []int{2, 1} {
|
|
|
|
pInfo, err1 := obj.PutObjectPart(context.Background(), bucket, object, uploadID, partID, mustGetPutObjReader(t, bytes.NewReader(data), int64(len(data)), "", ""), opts)
|
|
|
|
if err1 != nil {
|
|
|
|
t.Fatalf("Failed to upload a part - %v", err1)
|
|
|
|
}
|
|
|
|
uploadedParts = append(uploadedParts, CompletePart{
|
|
|
|
PartNumber: pInfo.PartNumber,
|
|
|
|
ETag: pInfo.ETag,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
_, err = obj.CompleteMultipartUpload(context.Background(), bucket, object, uploadID, uploadedParts, ObjectOptions{})
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Failed to complete multipart upload - %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Remove the object backend files from the first disk.
|
|
|
|
xl := obj.(*xlObjects)
|
|
|
|
firstDisk := xl.storageDisks[0]
|
|
|
|
err = firstDisk.DeleteFile(bucket, filepath.Join(object, xlMetaJSONFile))
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Failed to delete a file - %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
_, err = obj.HealObject(context.Background(), bucket, object, false, false, madmin.HealNormalScan)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Failed to heal object - %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
_, err = firstDisk.StatFile(bucket, filepath.Join(object, xlMetaJSONFile))
|
|
|
|
if err != nil {
|
|
|
|
t.Errorf("Expected xl.json file to be present but stat failed - %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Delete xl.json from more than read quorum number of disks, to create a corrupted situation.
|
|
|
|
for i := 0; i <= len(xl.storageDisks)/2; i++ {
|
|
|
|
xl.storageDisks[i].DeleteFile(bucket, filepath.Join(object, xlMetaJSONFile))
|
|
|
|
}
|
|
|
|
|
|
|
|
// Try healing now, expect to receive errDiskNotFound.
|
|
|
|
_, err = obj.HealObject(context.Background(), bucket, object, false, true, madmin.HealDeepScan)
|
|
|
|
if err != nil {
|
|
|
|
t.Errorf("Expected nil but received %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// since majority of xl.jsons are not available, object should be successfully deleted.
|
|
|
|
_, err = obj.GetObjectInfo(context.Background(), bucket, object, ObjectOptions{})
|
|
|
|
if _, ok := err.(ObjectNotFound); !ok {
|
|
|
|
t.Errorf("Expect %v but received %v", ObjectNotFound{Bucket: bucket, Object: object}, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-03-17 12:25:49 -04:00
|
|
|
// Tests healing of object.
|
|
|
|
func TestHealObjectXL(t *testing.T) {
|
|
|
|
nDisks := 16
|
|
|
|
fsDirs, err := getRandomDisks(nDisks)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
defer removeRoots(fsDirs)
|
|
|
|
|
|
|
|
// Everything is fine, should return nil
|
2017-04-11 18:44:27 -04:00
|
|
|
obj, _, err := initObjectLayer(mustGetNewEndpointList(fsDirs...))
|
2017-03-17 12:25:49 -04:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
bucket := "bucket"
|
|
|
|
object := "object"
|
2017-03-22 13:15:16 -04:00
|
|
|
data := bytes.Repeat([]byte("a"), 5*1024*1024)
|
2018-09-10 12:42:43 -04:00
|
|
|
var opts ObjectOptions
|
2017-03-22 13:15:16 -04:00
|
|
|
|
2018-03-15 16:27:16 -04:00
|
|
|
err = obj.MakeBucketWithLocation(context.Background(), bucket, "")
|
2017-03-17 12:25:49 -04:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Failed to make a bucket - %v", err)
|
|
|
|
}
|
|
|
|
|
2017-03-22 13:15:16 -04:00
|
|
|
// Create an object with multiple parts uploaded in decreasing
|
|
|
|
// part number.
|
2019-02-09 00:31:06 -05:00
|
|
|
uploadID, err := obj.NewMultipartUpload(context.Background(), bucket, object, opts)
|
2017-03-22 13:15:16 -04:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Failed to create a multipart upload - %v", err)
|
|
|
|
}
|
|
|
|
|
2017-11-14 03:25:10 -05:00
|
|
|
var uploadedParts []CompletePart
|
2017-03-22 13:15:16 -04:00
|
|
|
for _, partID := range []int{2, 1} {
|
2018-11-14 20:36:41 -05:00
|
|
|
pInfo, err1 := obj.PutObjectPart(context.Background(), bucket, object, uploadID, partID, mustGetPutObjReader(t, bytes.NewReader(data), int64(len(data)), "", ""), opts)
|
2017-04-01 04:06:06 -04:00
|
|
|
if err1 != nil {
|
|
|
|
t.Fatalf("Failed to upload a part - %v", err1)
|
2017-03-22 13:15:16 -04:00
|
|
|
}
|
2017-11-14 03:25:10 -05:00
|
|
|
uploadedParts = append(uploadedParts, CompletePart{
|
2017-03-22 13:15:16 -04:00
|
|
|
PartNumber: pInfo.PartNumber,
|
|
|
|
ETag: pInfo.ETag,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2018-11-14 20:36:41 -05:00
|
|
|
_, err = obj.CompleteMultipartUpload(context.Background(), bucket, object, uploadID, uploadedParts, ObjectOptions{})
|
2017-03-17 12:25:49 -04:00
|
|
|
if err != nil {
|
2017-03-22 13:15:16 -04:00
|
|
|
t.Fatalf("Failed to complete multipart upload - %v", err)
|
2017-03-17 12:25:49 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// Remove the object backend files from the first disk.
|
|
|
|
xl := obj.(*xlObjects)
|
|
|
|
firstDisk := xl.storageDisks[0]
|
|
|
|
err = firstDisk.DeleteFile(bucket, filepath.Join(object, xlMetaJSONFile))
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Failed to delete a file - %v", err)
|
|
|
|
}
|
|
|
|
|
2019-03-14 16:08:51 -04:00
|
|
|
_, err = obj.HealObject(context.Background(), bucket, object, false, false, madmin.HealNormalScan)
|
2017-03-17 12:25:49 -04:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Failed to heal object - %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
_, err = firstDisk.StatFile(bucket, filepath.Join(object, xlMetaJSONFile))
|
|
|
|
if err != nil {
|
|
|
|
t.Errorf("Expected xl.json file to be present but stat failed - %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Nil more than half the disks, to remove write quorum.
|
|
|
|
for i := 0; i <= len(xl.storageDisks)/2; i++ {
|
|
|
|
xl.storageDisks[i] = nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Try healing now, expect to receive errDiskNotFound.
|
2019-03-14 16:08:51 -04:00
|
|
|
_, err = obj.HealObject(context.Background(), bucket, object, false, false, madmin.HealDeepScan)
|
2017-12-22 06:28:13 -05:00
|
|
|
// since majority of xl.jsons are not available, object quorum can't be read properly and error will be errXLReadQuorum
|
2018-04-25 14:56:39 -04:00
|
|
|
if _, ok := err.(InsufficientReadQuorum); !ok {
|
2018-07-31 03:23:29 -04:00
|
|
|
t.Errorf("Expected %v but received %v", InsufficientReadQuorum{}, err)
|
2017-03-17 12:25:49 -04:00
|
|
|
}
|
|
|
|
}
|