tests: Implemented more tests for fs-v1*.go (#2686)

This commit is contained in:
Anis Elleuch 2016-09-16 21:06:49 +01:00 committed by Harshavardhana
parent 7d37dea449
commit b89a1cd482
10 changed files with 803 additions and 24 deletions

View File

@ -17,7 +17,9 @@
package cmd package cmd
import ( import (
"bytes"
"os" "os"
"path/filepath"
"testing" "testing"
) )
@ -64,3 +66,93 @@ func TestHasExtendedHeader(t *testing.T) {
} }
} }
} }
// TestReadFsMetadata - readFSMetadata testing with a healthy and faulty disk
func TestReadFSMetadata(t *testing.T) {
disk := filepath.Join(os.TempDir(), "minio-"+nextSuffix())
defer removeAll(disk)
obj, err := newFSObjects(disk)
if err != nil {
t.Fatal("Unexpected err: ", err)
}
fs := obj.(fsObjects)
bucketName := "bucket"
objectName := "object"
if err = obj.MakeBucket(bucketName); err != nil {
t.Fatal("Unexpected err: ", err)
}
if _, err = obj.PutObject(bucketName, objectName, int64(len("abcd")), bytes.NewReader([]byte("abcd")),
map[string]string{"X-Amz-Meta-AppId": "a"}); err != nil {
t.Fatal("Unexpected err: ", err)
}
// Construct the full path of fs.json
fsPath := "buckets/" + bucketName + "/" + objectName + "/fs.json"
// Regular fs metadata reading, no errors expected
if _, err = readFSMetadata(fs.storage, ".minio.sys", fsPath); err != nil {
t.Fatal("Unexpected error ", err)
}
// Corrupted fs.json
if err = fs.storage.AppendFile(".minio.sys", fsPath, []byte{'a'}); err != nil {
t.Fatal("Unexpected error ", err)
}
if _, err = readFSMetadata(fs.storage, ".minio.sys", fsPath); err == nil {
t.Fatal("Should fail", err)
}
// Test with corrupted disk
fsStorage := fs.storage.(*posix)
naughty := newNaughtyDisk(fsStorage, nil, errFaultyDisk)
fs.storage = naughty
if _, err = readFSMetadata(fs.storage, ".minio.sys", fsPath); errorCause(err) != errFaultyDisk {
t.Fatal("Should fail", err)
}
}
// TestWriteFsMetadata - tests of writeFSMetadata with healthy and faulty disks
func TestWriteFSMetadata(t *testing.T) {
disk := filepath.Join(os.TempDir(), "minio-"+nextSuffix())
defer removeAll(disk)
obj, err := newFSObjects(disk)
if err != nil {
t.Fatal("Unexpected err: ", err)
}
fs := obj.(fsObjects)
bucketName := "bucket"
objectName := "object"
if err = obj.MakeBucket(bucketName); err != nil {
t.Fatal("Unexpected err: ", err)
}
if _, err = obj.PutObject(bucketName, objectName, int64(len("abcd")), bytes.NewReader([]byte("abcd")),
map[string]string{"X-Amz-Meta-AppId": "a"}); err != nil {
t.Fatal("Unexpected err: ", err)
}
// Construct the complete path of fs.json
fsPath := "buckets/" + bucketName + "/" + objectName + "/fs.json"
// Fs metadata reading, no errors expected (healthy disk)
fsMeta, err := readFSMetadata(fs.storage, ".minio.sys", fsPath)
if err != nil {
t.Fatal("Unexpected error ", err)
}
// Reading metadata with a corrupted disk
fsStorage := fs.storage.(*posix)
for i := 1; i <= 2; i++ {
naughty := newNaughtyDisk(fsStorage, map[int]error{i: errFaultyDisk, i + 1: errFaultyDisk}, nil)
fs.storage = naughty
if err = writeFSMetadata(fs.storage, ".minio.sys", fsPath, fsMeta); errorCause(err) != errFaultyDisk {
t.Fatal("Unexpected error", i, err)
}
}
}

View File

@ -0,0 +1,165 @@
/*
* Minio Cloud Storage, (C) 2016 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"os"
"path/filepath"
"testing"
"time"
)
// TestFSIsBucketExist - complete test of isBucketExist
func TestFSIsBucketExist(t *testing.T) {
// Prepare for testing
disk := filepath.Join(os.TempDir(), "minio-"+nextSuffix())
defer removeAll(disk)
obj, err := newFSObjects(disk)
if err != nil {
t.Fatal("Cannot create a new FS object: ", err)
}
fs := obj.(fsObjects)
bucketName := "bucket"
if err := obj.MakeBucket(bucketName); err != nil {
t.Fatal("Cannot create bucket, err: ", err)
}
// Test with a valid bucket
if found := fs.isBucketExist(bucketName); !found {
t.Fatal("isBucketExist should true")
}
// Test with a inexistant bucket
if found := fs.isBucketExist("foo"); found {
t.Fatal("isBucketExist should false")
}
// Using a faulty disk
fsStorage := fs.storage.(*posix)
naughty := newNaughtyDisk(fsStorage, nil, errFaultyDisk)
fs.storage = naughty
if found := fs.isBucketExist(bucketName); found {
t.Fatal("isBucketExist should return false because it is wired to a corrupted disk")
}
}
// TestFSIsUploadExists - complete test with valid and invalid cases
func TestFSIsUploadExists(t *testing.T) {
// Prepare for testing
disk := filepath.Join(os.TempDir(), "minio-"+nextSuffix())
defer removeAll(disk)
obj, err := newFSObjects(disk)
if err != nil {
t.Fatal("Cannot create a new FS object: ", err)
}
fs := obj.(fsObjects)
bucketName := "bucket"
objectName := "object"
obj.MakeBucket(bucketName)
uploadID, err := obj.NewMultipartUpload(bucketName, objectName, nil)
// Test with valid upload id
if exists := fs.isUploadIDExists(bucketName, objectName, uploadID); !exists {
t.Fatal("Wrong result, expected: ", exists)
}
// Test with inexistant bucket/object names
if exists := fs.isUploadIDExists("bucketfoo", "objectfoo", uploadID); exists {
t.Fatal("Wrong result, expected: ", !exists)
}
// Test with inexistant upload ID
if exists := fs.isUploadIDExists(bucketName, objectName, uploadID+"-ff"); exists {
t.Fatal("Wrong result, expected: ", !exists)
}
// isUploadIdExists with a faulty disk should return false
fsStorage := fs.storage.(*posix)
naughty := newNaughtyDisk(fsStorage, nil, errFaultyDisk)
fs.storage = naughty
if exists := fs.isUploadIDExists(bucketName, objectName, uploadID); exists {
t.Fatal("Wrong result, expected: ", !exists)
}
}
// TestFSWriteUploadJSON - tests for writeUploadJSON for FS
func TestFSWriteUploadJSON(t *testing.T) {
// Prepare for tests
disk := filepath.Join(os.TempDir(), "minio-"+nextSuffix())
defer removeAll(disk)
obj, err := newFSObjects(disk)
if err != nil {
t.Fatal("Unexpected err: ", err)
}
fs := obj.(fsObjects)
bucketName := "bucket"
objectName := "object"
obj.MakeBucket(bucketName)
uploadID, err := obj.NewMultipartUpload(bucketName, objectName, nil)
if err := fs.writeUploadJSON(bucketName, objectName, uploadID, time.Now()); err != nil {
t.Fatal("Unexpected err: ", err)
}
// isUploadIdExists with a faulty disk should return false
fsStorage := fs.storage.(*posix)
for i := 1; i <= 3; i++ {
naughty := newNaughtyDisk(fsStorage, map[int]error{i: errFaultyDisk}, nil)
fs.storage = naughty
if err := fs.writeUploadJSON(bucketName, objectName, uploadID, time.Now()); errorCause(err) != errFaultyDisk {
t.Fatal("Unexpected err: ", err)
}
}
}
// TestFSUpdateUploadsJSON - tests for updateUploadsJSON for FS
func TestFSUpdateUploadsJSON(t *testing.T) {
// Prepare for tests
disk := filepath.Join(os.TempDir(), "minio-"+nextSuffix())
defer removeAll(disk)
obj, err := newFSObjects(disk)
if err != nil {
t.Fatal("Unexpected err: ", err)
}
fs := obj.(fsObjects)
bucketName := "bucket"
objectName := "object"
obj.MakeBucket(bucketName)
if err := fs.updateUploadsJSON(bucketName, objectName, uploadsV1{}); err != nil {
t.Fatal("Unexpected err: ", err)
}
// isUploadIdExists with a faulty disk should return false
fsStorage := fs.storage.(*posix)
for i := 1; i <= 2; i++ {
naughty := newNaughtyDisk(fsStorage, map[int]error{i: errFaultyDisk}, nil)
fs.storage = naughty
if err := fs.updateUploadsJSON(bucketName, objectName, uploadsV1{}); errorCause(err) != errFaultyDisk {
t.Fatal("Unexpected err: ", err)
}
}
}

226
cmd/fs-v1-multipart_test.go Normal file
View File

@ -0,0 +1,226 @@
/*
* Minio Cloud Storage, (C) 2016 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"bytes"
"crypto/md5"
"encoding/hex"
"os"
"path/filepath"
"reflect"
"testing"
)
// TestNewMultipartUploadFaultyDisk - test NewMultipartUpload with faulty disks
func TestNewMultipartUploadFaultyDisk(t *testing.T) {
// Prepare for tests
disk := filepath.Join(os.TempDir(), "minio-"+nextSuffix())
defer removeAll(disk)
obj, err := newFSObjects(disk)
if err != nil {
t.Fatal("Cannot create a new FS object: ", err)
}
fs := obj.(fsObjects)
bucketName := "bucket"
objectName := "object"
if err := obj.MakeBucket(bucketName); err != nil {
t.Fatal("Cannot create bucket, err: ", err)
}
// Test with faulty disk
fsStorage := fs.storage.(*posix)
for i := 1; i <= 5; i++ {
// Faulty disk generates errFaultyDisk at 'i' storage api call number
fs.storage = newNaughtyDisk(fsStorage, map[int]error{i: errFaultyDisk}, nil)
if _, err := fs.NewMultipartUpload(bucketName, objectName, map[string]string{"X-Amz-Meta-xid": "3f"}); errorCause(err) != errFaultyDisk {
switch i {
case 1:
if !isSameType(errorCause(err), BucketNotFound{}) {
t.Fatal("Unexpected error ", err)
}
default:
t.Fatal("Unexpected error ", err)
}
}
}
}
// TestPutObjectPartFaultyDisk - test PutObjectPart with faulty disks
func TestPutObjectPartFaultyDisk(t *testing.T) {
// Prepare for tests
disk := filepath.Join(os.TempDir(), "minio-"+nextSuffix())
defer removeAll(disk)
obj, err := newFSObjects(disk)
if err != nil {
t.Fatal("Cannot create a new FS object: ", err)
}
fs := obj.(fsObjects)
bucketName := "bucket"
objectName := "object"
data := []byte("12345")
dataLen := int64(len(data))
if err = obj.MakeBucket(bucketName); err != nil {
t.Fatal("Cannot create bucket, err: ", err)
}
uploadID, err := fs.NewMultipartUpload(bucketName, objectName, map[string]string{"X-Amz-Meta-xid": "3f"})
if err != nil {
t.Fatal("Unexpected error ", err)
}
md5Writer := md5.New()
md5Writer.Write(data)
md5Hex := hex.EncodeToString(md5Writer.Sum(nil))
// Test with faulty disk
fsStorage := fs.storage.(*posix)
for i := 1; i <= 7; i++ {
// Faulty disk generates errFaultyDisk at 'i' storage api call number
fs.storage = newNaughtyDisk(fsStorage, map[int]error{i: errFaultyDisk}, nil)
if _, err := fs.PutObjectPart(bucketName, objectName, uploadID, 1, dataLen, bytes.NewReader(data), md5Hex); errorCause(err) != errFaultyDisk {
switch i {
case 1:
if !isSameType(errorCause(err), BucketNotFound{}) {
t.Fatal("Unexpected error ", err)
}
case 2, 4:
if !isSameType(errorCause(err), InvalidUploadID{}) {
t.Fatal("Unexpected error ", err)
}
default:
t.Fatal("Unexpected error ", i, err, reflect.TypeOf(errorCause(err)), reflect.TypeOf(errFaultyDisk))
}
}
}
}
// TestCompleteMultipartUploadFaultyDisk - test CompleteMultipartUpload with faulty disks
func TestCompleteMultipartUploadFaultyDisk(t *testing.T) {
// Prepare for tests
disk := filepath.Join(os.TempDir(), "minio-"+nextSuffix())
defer removeAll(disk)
obj, err := newFSObjects(disk)
if err != nil {
t.Fatal("Cannot create a new FS object: ", err)
}
fs := obj.(fsObjects)
bucketName := "bucket"
objectName := "object"
data := []byte("12345")
if err = obj.MakeBucket(bucketName); err != nil {
t.Fatal("Cannot create bucket, err: ", err)
}
uploadID, err := fs.NewMultipartUpload(bucketName, objectName, map[string]string{"X-Amz-Meta-xid": "3f"})
if err != nil {
t.Fatal("Unexpected error ", err)
}
md5Writer := md5.New()
md5Writer.Write(data)
md5Hex := hex.EncodeToString(md5Writer.Sum(nil))
if _, err := fs.PutObjectPart(bucketName, objectName, uploadID, 1, 5, bytes.NewReader(data), md5Hex); err != nil {
t.Fatal("Unexpected error ", err)
}
parts := []completePart{{PartNumber: 1, ETag: md5Hex}}
fsStorage := fs.storage.(*posix)
for i := 1; i <= 3; i++ {
// Faulty disk generates errFaultyDisk at 'i' storage api call number
fs.storage = newNaughtyDisk(fsStorage, map[int]error{i: errFaultyDisk}, nil)
if _, err := fs.CompleteMultipartUpload(bucketName, objectName, uploadID, parts); errorCause(err) != errFaultyDisk {
switch i {
case 1:
if !isSameType(errorCause(err), BucketNotFound{}) {
t.Fatal("Unexpected error ", err)
}
case 2:
if !isSameType(errorCause(err), InvalidUploadID{}) {
t.Fatal("Unexpected error ", err)
}
default:
t.Fatal("Unexpected error ", i, err, reflect.TypeOf(errorCause(err)), reflect.TypeOf(errFaultyDisk))
}
}
}
}
// TestListMultipartUploadsFaultyDisk - test ListMultipartUploads with faulty disks
func TestListMultipartUploadsFaultyDisk(t *testing.T) {
// Prepare for tests
disk := filepath.Join(os.TempDir(), "minio-"+nextSuffix())
defer removeAll(disk)
obj, err := newFSObjects(disk)
if err != nil {
t.Fatal("Cannot create a new FS object: ", err)
}
fs := obj.(fsObjects)
bucketName := "bucket"
objectName := "object"
data := []byte("12345")
if err = obj.MakeBucket(bucketName); err != nil {
t.Fatal("Cannot create bucket, err: ", err)
}
uploadID, err := fs.NewMultipartUpload(bucketName, objectName, map[string]string{"X-Amz-Meta-xid": "3f"})
if err != nil {
t.Fatal("Unexpected error ", err)
}
md5Writer := md5.New()
md5Writer.Write(data)
md5Hex := hex.EncodeToString(md5Writer.Sum(nil))
if _, err := fs.PutObjectPart(bucketName, objectName, uploadID, 1, 5, bytes.NewReader(data), md5Hex); err != nil {
t.Fatal("Unexpected error ", err)
}
fsStorage := fs.storage.(*posix)
for i := 1; i <= 4; i++ {
// Faulty disk generates errFaultyDisk at 'i' storage api call number
fs.storage = newNaughtyDisk(fsStorage, map[int]error{i: errFaultyDisk}, nil)
if _, err := fs.ListMultipartUploads(bucketName, objectName, "", "", "", 1000); errorCause(err) != errFaultyDisk {
switch i {
case 1:
if !isSameType(errorCause(err), BucketNotFound{}) {
t.Fatal("Unexpected error ", err)
}
case 2:
if !isSameType(errorCause(err), InvalidUploadID{}) {
t.Fatal("Unexpected error ", err)
}
case 3:
if errorCause(err) != errFileNotFound {
t.Fatal("Unexpected error ", err)
}
default:
t.Fatal("Unexpected error ", i, err, reflect.TypeOf(errorCause(err)), reflect.TypeOf(errFaultyDisk))
}
}
}
}

View File

@ -505,20 +505,6 @@ func (fs fsObjects) DeleteObject(bucket, object string) error {
return nil return nil
} }
// Checks whether bucket exists.
func isBucketExist(storage StorageAPI, bucketName string) bool {
// Check whether bucket exists.
_, err := storage.StatVol(bucketName)
if err != nil {
if err == errVolumeNotFound {
return false
}
errorIf(err, "Stat failed on bucket "+bucketName+".")
return false
}
return true
}
// ListObjects - list all objects at prefix upto maxKeys., optionally delimited by '/'. Maintains the list pool // ListObjects - list all objects at prefix upto maxKeys., optionally delimited by '/'. Maintains the list pool
// state for future re-entrant list requests. // state for future re-entrant list requests.
func (fs fsObjects) ListObjects(bucket, prefix, marker, delimiter string, maxKeys int) (ListObjectsInfo, error) { func (fs fsObjects) ListObjects(bucket, prefix, marker, delimiter string, maxKeys int) (ListObjectsInfo, error) {
@ -551,7 +537,7 @@ func (fs fsObjects) ListObjects(bucket, prefix, marker, delimiter string, maxKey
return ListObjectsInfo{}, traceError(BucketNameInvalid{Bucket: bucket}) return ListObjectsInfo{}, traceError(BucketNameInvalid{Bucket: bucket})
} }
// Verify if bucket exists. // Verify if bucket exists.
if !isBucketExist(fs.storage, bucket) { if !fs.isBucketExist(bucket) {
return ListObjectsInfo{}, traceError(BucketNotFound{Bucket: bucket}) return ListObjectsInfo{}, traceError(BucketNotFound{Bucket: bucket})
} }
if !IsValidObjectPrefix(prefix) { if !IsValidObjectPrefix(prefix) {

View File

@ -68,8 +68,9 @@ func TestNewFS(t *testing.T) {
// TestFSShutdown - initialize a new FS object layer then calls Shutdown // TestFSShutdown - initialize a new FS object layer then calls Shutdown
// to check returned results // to check returned results
func TestFSShutdown(t *testing.T) { func TestFSShutdown(t *testing.T) {
// Create an FS object and shutdown it. No errors expected // Prepare for tests
disk := filepath.Join(os.TempDir(), "minio-"+nextSuffix()) disk := filepath.Join(os.TempDir(), "minio-"+nextSuffix())
defer removeAll(disk)
obj, err := newFSObjects(disk) obj, err := newFSObjects(disk)
if err != nil { if err != nil {
t.Fatal("Cannot create a new FS object: ", err) t.Fatal("Cannot create a new FS object: ", err)
@ -85,18 +86,249 @@ func TestFSShutdown(t *testing.T) {
obj.MakeBucket(bucketName) obj.MakeBucket(bucketName)
obj.PutObject(bucketName, objectName, int64(len(objectContent)), bytes.NewReader([]byte(objectContent)), nil) obj.PutObject(bucketName, objectName, int64(len(objectContent)), bytes.NewReader([]byte(objectContent)), nil)
// Test Shutdown with regular conditions
if err := fs.Shutdown(); err != nil { if err := fs.Shutdown(); err != nil {
t.Fatal("Cannot shutdown the FS object: ", err) t.Fatal("Cannot shutdown the FS object: ", err)
} }
// Create an FS and program errors with disks when shutdown is called // Test Shutdown with faulty disks
for i := 1; i <= 5; i++ { for i := 1; i <= 5; i++ {
naughty := newNaughtyDisk(fsStorage, map[int]error{i: errFaultyDisk}, nil) fs.storage = newNaughtyDisk(fsStorage, map[int]error{i: errFaultyDisk}, nil)
fs.storage = naughty
if err := fs.Shutdown(); errorCause(err) != errFaultyDisk { if err := fs.Shutdown(); errorCause(err) != errFaultyDisk {
t.Fatal(i, ", Got unexpected fs shutdown error: ", err) t.Fatal(i, ", Got unexpected fs shutdown error: ", err)
} }
} }
removeAll(disk) }
// TestFSLoadFormatFS - test loadFormatFS with healty and faulty disks
func TestFSLoadFormatFS(t *testing.T) {
// Prepare for testing
disk := filepath.Join(os.TempDir(), "minio-"+nextSuffix())
defer removeAll(disk)
obj, err := newFSObjects(disk)
fs := obj.(fsObjects)
// Regular format loading
_, err = loadFormatFS(fs.storage)
if err != nil {
t.Fatal("Should not fail here", err)
}
// Loading corrupted format file
fs.storage.AppendFile(minioMetaBucket, fsFormatJSONFile, []byte{'b'})
_, err = loadFormatFS(fs.storage)
if err == nil {
t.Fatal("Should return an error here")
}
// Loading format file from faulty disk
fsStorage := fs.storage.(*posix)
fs.storage = newNaughtyDisk(fsStorage, nil, errFaultyDisk)
_, err = loadFormatFS(fs.storage)
if err != errFaultyDisk {
t.Fatal("Should return faulty disk error")
}
}
// TestFSGetBucketInfo - test GetBucketInfo with healty and faulty disks
func TestFSGetBucketInfo(t *testing.T) {
// Prepare for testing
disk := filepath.Join(os.TempDir(), "minio-"+nextSuffix())
defer removeAll(disk)
obj, err := newFSObjects(disk)
fs := obj.(fsObjects)
bucketName := "bucket"
obj.MakeBucket(bucketName)
// Test with valid parameters
info, err := fs.GetBucketInfo(bucketName)
if err != nil {
t.Fatal(err)
}
if info.Name != bucketName {
t.Fatalf("wrong bucket name, expected: %s, found: %s", bucketName, info.Name)
}
// Test with inexistant bucket
_, err = fs.GetBucketInfo("a")
if !isSameType(errorCause(err), BucketNameInvalid{}) {
t.Fatal("BucketNameInvalid error not returned")
}
// Loading format file from faulty disk
fsStorage := fs.storage.(*posix)
fs.storage = newNaughtyDisk(fsStorage, nil, errFaultyDisk)
_, err = fs.GetBucketInfo(bucketName)
if errorCause(err) != errFaultyDisk {
t.Fatal("errFaultyDisk error not returned")
}
}
// TestFSDeleteObject - test fs.DeleteObject() with healthy and corrupted disks
func TestFSDeleteObject(t *testing.T) {
// Prepare for tests
disk := filepath.Join(os.TempDir(), "minio-"+nextSuffix())
defer removeAll(disk)
obj, _ := newFSObjects(disk)
fs := obj.(fsObjects)
bucketName := "bucket"
objectName := "object"
obj.MakeBucket(bucketName)
obj.PutObject(bucketName, objectName, int64(len("abcd")), bytes.NewReader([]byte("abcd")), nil)
// Test with invalid bucket name
if err := fs.DeleteObject("fo", objectName); !isSameType(errorCause(err), BucketNameInvalid{}) {
t.Fatal("Unexpected error: ", err)
}
// Test with invalid object name
if err := fs.DeleteObject(bucketName, "^"); !isSameType(errorCause(err), ObjectNameInvalid{}) {
t.Fatal("Unexpected error: ", err)
}
// Test with inexist bucket/object
if err := fs.DeleteObject("foobucket", "fooobject"); !isSameType(errorCause(err), BucketNotFound{}) {
t.Fatal("Unexpected error: ", err)
}
// Test with valid condition
if err := fs.DeleteObject(bucketName, objectName); err != nil {
t.Fatal("Unexpected error: ", err)
}
// Loading format file from faulty disk
fsStorage := fs.storage.(*posix)
fs.storage = newNaughtyDisk(fsStorage, nil, errFaultyDisk)
if err := fs.DeleteObject(bucketName, objectName); errorCause(err) != errFaultyDisk {
t.Fatal("Unexpected error: ", err)
}
}
// TestFSDeleteBucket - tests for fs DeleteBucket
func TestFSDeleteBucket(t *testing.T) {
// Prepare for testing
disk := filepath.Join(os.TempDir(), "minio-"+nextSuffix())
defer removeAll(disk)
obj, _ := newFSObjects(disk)
fs := obj.(fsObjects)
bucketName := "bucket"
err := obj.MakeBucket(bucketName)
if err != nil {
t.Fatal("Unexpected error: ", err)
}
// Test with an invalid bucket name
if err := fs.DeleteBucket("fo"); !isSameType(errorCause(err), BucketNameInvalid{}) {
t.Fatal("Unexpected error: ", err)
}
// Test with an inexistant bucket
if err := fs.DeleteBucket("foobucket"); !isSameType(errorCause(err), BucketNotFound{}) {
t.Fatal("Unexpected error: ", err)
}
// Test with a valid case
if err := fs.DeleteBucket(bucketName); err != nil {
t.Fatal("Unexpected error: ", err)
}
obj.MakeBucket(bucketName)
// Loading format file from faulty disk
fsStorage := fs.storage.(*posix)
for i := 1; i <= 2; i++ {
fs.storage = newNaughtyDisk(fsStorage, map[int]error{i: errFaultyDisk}, nil)
if err := fs.DeleteBucket(bucketName); errorCause(err) != errFaultyDisk {
t.Fatal("Unexpected error: ", err)
}
}
}
// TestFSListBuckets - tests for fs ListBuckets
func TestFSListBuckets(t *testing.T) {
// Prepare for tests
disk := filepath.Join(os.TempDir(), "minio-"+nextSuffix())
defer removeAll(disk)
obj, _ := newFSObjects(disk)
fs := obj.(fsObjects)
bucketName := "bucket"
if err := obj.MakeBucket(bucketName); err != nil {
t.Fatal("Unexpected error: ", err)
}
// Create a bucket with invalid name
if err := fs.storage.MakeVol("vo^"); err != nil {
t.Fatal("Unexpected error: ", err)
}
// Test
buckets, err := fs.ListBuckets()
if err != nil {
t.Fatal("Unexpected error: ", err)
}
if len(buckets) != 1 {
t.Fatal("ListBuckets not working properly")
}
// Test ListBuckets with faulty disks
fsStorage := fs.storage.(*posix)
for i := 1; i <= 2; i++ {
fs.storage = newNaughtyDisk(fsStorage, nil, errFaultyDisk)
if _, err := fs.ListBuckets(); errorCause(err) != errFaultyDisk {
t.Fatal("Unexpected error: ", err)
}
}
}
// TestFSHealObject - tests for fs HealObject
func TestFSHealObject(t *testing.T) {
disk := filepath.Join(os.TempDir(), "minio-"+nextSuffix())
defer removeAll(disk)
obj, err := newFSObjects(disk)
if err != nil {
t.Fatal("Cannot create a new FS object: ", err)
}
err = obj.HealObject("bucket", "object")
if err == nil || !isSameType(errorCause(err), NotImplemented{}) {
t.Fatalf("Heal Object should return NotImplemented error ")
}
}
// TestFSListObjectHeal - tests for fs ListObjectHeals
func TestFSListObjectsHeal(t *testing.T) {
disk := filepath.Join(os.TempDir(), "minio-"+nextSuffix())
defer removeAll(disk)
obj, err := newFSObjects(disk)
if err != nil {
t.Fatal("Cannot create a new FS object: ", err)
}
_, err = obj.ListObjectsHeal("bucket", "prefix", "marker", "delimiter", 1000)
if err == nil || !isSameType(errorCause(err), NotImplemented{}) {
t.Fatalf("Heal Object should return NotImplemented error ")
}
}
// TestFSHealDiskMetadata - tests for fs HealDiskMetadata
func TestFSHealDiskMetadata(t *testing.T) {
disk := filepath.Join(os.TempDir(), "minio-"+nextSuffix())
defer removeAll(disk)
obj, err := newFSObjects(disk)
if err != nil {
t.Fatal("Cannot create a new FS object: ", err)
}
err = obj.HealDiskMetadata()
if err == nil || !isSameType(errorCause(err), NotImplemented{}) {
t.Fatalf("Heal Object should return NotImplemented error ")
}
} }

View File

@ -16,7 +16,10 @@
package cmd package cmd
import "github.com/minio/minio/pkg/disk" import (
"github.com/minio/minio/pkg/disk"
"sync"
)
// naughtyDisk wraps a POSIX disk and returns programmed errors // naughtyDisk wraps a POSIX disk and returns programmed errors
// specified by the developer. The purpose is to simulate errors // specified by the developer. The purpose is to simulate errors
@ -31,6 +34,8 @@ type naughtyDisk struct {
defaultErr error defaultErr error
// The current API call number // The current API call number
callNR int callNR int
// Data protection
mu sync.Mutex
} }
func newNaughtyDisk(d *posix, errs map[int]error, defaultErr error) *naughtyDisk { func newNaughtyDisk(d *posix, errs map[int]error, defaultErr error) *naughtyDisk {
@ -38,6 +43,8 @@ func newNaughtyDisk(d *posix, errs map[int]error, defaultErr error) *naughtyDisk
} }
func (d *naughtyDisk) calcError() (err error) { func (d *naughtyDisk) calcError() (err error) {
d.mu.Lock()
defer d.mu.Unlock()
d.callNR++ d.callNR++
if err, ok := d.errors[d.callNR]; ok { if err, ok := d.errors[d.callNR]; ok {
return err return err

View File

@ -437,7 +437,7 @@ func testListObjects(obj ObjectLayer, instanceType string, t TestErrHandler) {
{"test-bucket-list-object", "asia", "europe-object", "", 0, ListObjectsInfo{}, fmt.Errorf("Invalid combination of marker '%s' and prefix '%s'", "europe-object", "asia"), false}, {"test-bucket-list-object", "asia", "europe-object", "", 0, ListObjectsInfo{}, fmt.Errorf("Invalid combination of marker '%s' and prefix '%s'", "europe-object", "asia"), false},
// Setting a non-existing directory to be prefix (12-13). // Setting a non-existing directory to be prefix (12-13).
{"empty-bucket", "europe/france/", "", "", 1, ListObjectsInfo{}, nil, true}, {"empty-bucket", "europe/france/", "", "", 1, ListObjectsInfo{}, nil, true},
{"empty-bucket", "europe/tunisia/", "", "", 1, ListObjectsInfo{}, nil, true}, {"empty-bucket", "africa/tunisia/", "", "", 1, ListObjectsInfo{}, nil, true},
// Testing on empty bucket, that is, bucket without any objects in it (14). // Testing on empty bucket, that is, bucket without any objects in it (14).
{"empty-bucket", "", "", "", 0, ListObjectsInfo{}, nil, true}, {"empty-bucket", "", "", "", 0, ListObjectsInfo{}, nil, true},
// Setting maxKeys to negative value (15-16). // Setting maxKeys to negative value (15-16).
@ -512,6 +512,9 @@ func testListObjects(obj ObjectLayer, instanceType string, t TestErrHandler) {
{"test-bucket-list-object", "", "Asia/India/Karnataka/Bangalore/Koramangala/pics", "/", 10, resultCases[29], nil, true}, {"test-bucket-list-object", "", "Asia/India/Karnataka/Bangalore/Koramangala/pics", "/", 10, resultCases[29], nil, true},
// Test with prefix and delimiter set to '/'. (60) // Test with prefix and delimiter set to '/'. (60)
{"test-bucket-list-object", "/", "", "/", 10, resultCases[30], nil, true}, {"test-bucket-list-object", "/", "", "/", 10, resultCases[30], nil, true},
// Test with invalid prefix (61)
{"test-bucket-list-object", "^", "", "/", 10, resultCases[30], ObjectNameInvalid{Bucket: "test-bucket-list-object", Object: "^"}, false},
} }
for i, testCase := range testCases { for i, testCase := range testCases {

View File

@ -36,9 +36,14 @@ func testObjectNewMultipartUpload(obj ObjectLayer, instanceType string, t TestEr
bucket := "minio-bucket" bucket := "minio-bucket"
object := "minio-object" object := "minio-object"
_, err := obj.NewMultipartUpload("--", object, nil)
if err == nil {
t.Fatalf("%s: Expected to fail since bucket name is invalid.", instanceType)
}
errMsg := "Bucket not found: minio-bucket" errMsg := "Bucket not found: minio-bucket"
// opearation expected to fail since the bucket on which NewMultipartUpload is being initiated doesn't exist. // opearation expected to fail since the bucket on which NewMultipartUpload is being initiated doesn't exist.
_, err := obj.NewMultipartUpload(bucket, object, nil) _, err = obj.NewMultipartUpload(bucket, object, nil)
if err == nil { if err == nil {
t.Fatalf("%s: Expected to fail since the NewMultipartUpload is intialized on a non-existent bucket.", instanceType) t.Fatalf("%s: Expected to fail since the NewMultipartUpload is intialized on a non-existent bucket.", instanceType)
} }
@ -53,6 +58,11 @@ func testObjectNewMultipartUpload(obj ObjectLayer, instanceType string, t TestEr
t.Fatalf("%s : %s", instanceType, err.Error()) t.Fatalf("%s : %s", instanceType, err.Error())
} }
_, err = obj.NewMultipartUpload(bucket, "^", nil)
if err == nil {
t.Fatalf("%s: Expected to fail since object name is invalid.", instanceType)
}
uploadID, err := obj.NewMultipartUpload(bucket, object, nil) uploadID, err := obj.NewMultipartUpload(bucket, object, nil)
if err != nil { if err != nil {
t.Fatalf("%s : %s", instanceType, err.Error()) t.Fatalf("%s : %s", instanceType, err.Error())
@ -69,6 +79,53 @@ func testObjectNewMultipartUpload(obj ObjectLayer, instanceType string, t TestEr
} }
} }
// Wrapper for calling AbortMultipartUpload tests for both XL multiple disks and single node setup.
func TestObjectAbortMultipartUpload(t *testing.T) {
ExecObjectLayerTest(t, testObjectAbortMultipartUpload)
}
// Tests validate creation of abort multipart upload instance.
func testObjectAbortMultipartUpload(obj ObjectLayer, instanceType string, t TestErrHandler) {
bucket := "minio-bucket"
object := "minio-object"
// Create bucket before intiating NewMultipartUpload.
err := obj.MakeBucket(bucket)
if err != nil {
// failed to create newbucket, abort.
t.Fatalf("%s : %s", instanceType, err.Error())
}
uploadID, err := obj.NewMultipartUpload(bucket, object, nil)
if err != nil {
t.Fatalf("%s : %s", instanceType, err.Error())
}
abortTestCases := []struct {
bucketName string
objName string
uploadID string
expectedErrType error
}{
{"--", object, uploadID, BucketNameInvalid{}},
{bucket, "^", uploadID, ObjectNameInvalid{}},
{"foo", object, uploadID, BucketNotFound{}},
{bucket, object, "foo-foo", InvalidUploadID{}},
{bucket, object, uploadID, nil},
}
// Iterating over creatPartCases to generate multipart chunks.
for i, testCase := range abortTestCases {
err = obj.AbortMultipartUpload(testCase.bucketName, testCase.objName, testCase.uploadID)
if testCase.expectedErrType == nil && err != nil {
t.Errorf("Test %d, unexpected err is received: %v, expected:%v\n", i+1, err, testCase.expectedErrType)
}
if testCase.expectedErrType != nil && !isSameType(errorCause(err), testCase.expectedErrType) {
t.Errorf("Test %d, unexpected err is received: %v, expected:%v\n", i+1, err, testCase.expectedErrType)
}
}
}
// Wrapper for calling isUploadIDExists tests for both XL multiple disks and single node setup. // Wrapper for calling isUploadIDExists tests for both XL multiple disks and single node setup.
func TestObjectAPIIsUploadIDExists(t *testing.T) { func TestObjectAPIIsUploadIDExists(t *testing.T) {
ExecObjectLayerTest(t, testObjectAPIIsUploadIDExists) ExecObjectLayerTest(t, testObjectAPIIsUploadIDExists)
@ -1716,7 +1773,7 @@ func testObjectCompleteMultipartUpload(obj ObjectLayer, instanceType string, t T
t.Fatalf("%s : %s", instanceType, err) t.Fatalf("%s : %s", instanceType, err)
} }
// Initiate Multipart Upload on the above created bucket. // Initiate Multipart Upload on the above created bucket.
uploadID, err = obj.NewMultipartUpload(bucketNames[0], objectNames[0], nil) uploadID, err = obj.NewMultipartUpload(bucketNames[0], objectNames[0], map[string]string{"X-Amz-Meta-Id": "id"})
if err != nil { if err != nil {
// Failed to create NewMultipartUpload, abort. // Failed to create NewMultipartUpload, abort.
t.Fatalf("%s : %s", instanceType, err) t.Fatalf("%s : %s", instanceType, err)
@ -1849,6 +1906,7 @@ func testObjectCompleteMultipartUpload(obj ObjectLayer, instanceType string, t T
// the case above successfully completes CompleteMultipartUpload, the remaining Parts will be flushed. // the case above successfully completes CompleteMultipartUpload, the remaining Parts will be flushed.
// Expecting to fail with Invalid UploadID. // Expecting to fail with Invalid UploadID.
{bucketNames[0], objectNames[0], uploadIDs[0], inputParts[4].parts, "", InvalidUploadID{UploadID: uploadIDs[0]}, false}, {bucketNames[0], objectNames[0], uploadIDs[0], inputParts[4].parts, "", InvalidUploadID{UploadID: uploadIDs[0]}, false},
// Expecting to fail due to bad
} }
for i, testCase := range testCases { for i, testCase := range testCases {

View File

@ -148,6 +148,10 @@ func testObjectAPIPutObject(obj ObjectLayer, instanceType string, t TestErrHandl
{bucket, object, data, nil, int64(len(data) - 1), md5Hex(data[:len(data)-1]), nil}, {bucket, object, data, nil, int64(len(data) - 1), md5Hex(data[:len(data)-1]), nil},
{bucket, object, nilBytes, nil, int64(len(nilBytes) + 1), md5Hex(nilBytes), IncompleteBody{}}, {bucket, object, nilBytes, nil, int64(len(nilBytes) + 1), md5Hex(nilBytes), IncompleteBody{}},
{bucket, object, fiveMBBytes, nil, int64(0), md5Hex(fiveMBBytes), nil}, {bucket, object, fiveMBBytes, nil, int64(0), md5Hex(fiveMBBytes), nil},
// Test case 29
// valid data with X-Amz-Meta- meta
{bucket, object, data, map[string]string{"X-Amz-Meta-AppID": "a42"}, int64(len(data)), md5Hex(data), nil},
} }
for i, testCase := range testCases { for i, testCase := range testCases {

View File

@ -30,6 +30,7 @@ import (
"net/http/httptest" "net/http/httptest"
"net/url" "net/url"
"os" "os"
"reflect"
"regexp" "regexp"
"sort" "sort"
"strconv" "strconv"
@ -127,6 +128,11 @@ func nextSuffix() string {
return strconv.Itoa(int(1e9 + r%1e9))[1:] return strconv.Itoa(int(1e9 + r%1e9))[1:]
} }
// isSameType - compares two object types via reflect.TypeOf
func isSameType(obj1, obj2 interface{}) bool {
return reflect.TypeOf(obj1) == reflect.TypeOf(obj2)
}
// TestServer encapsulates an instantiation of a Minio instance with a temporary backend. // TestServer encapsulates an instantiation of a Minio instance with a temporary backend.
// Example usage: // Example usage:
// s := StartTestServer(t,"XL") // s := StartTestServer(t,"XL")