Tests: object api multipart tests and bug fixes.

This commit is contained in:
karthic rao 2016-04-25 23:09:28 +05:30 committed by Harshavardhana
parent e9fba04b36
commit 6e372f83b4
5 changed files with 327 additions and 9 deletions

6
fs.go
View File

@ -571,7 +571,7 @@ func (s fsStorage) ReadFile(volume string, path string, offset int64) (readClose
"diskPath": s.diskPath, "diskPath": s.diskPath,
"filePath": filePath, "filePath": filePath,
}).Debugf("Unexpected type %s", errIsNotRegular) }).Debugf("Unexpected type %s", errIsNotRegular)
return nil, errIsNotRegular return nil, errFileNotFound
} }
// Seek to requested offset. // Seek to requested offset.
_, err = file.Seek(offset, os.SEEK_SET) _, err = file.Seek(offset, os.SEEK_SET)
@ -639,7 +639,7 @@ func (s fsStorage) StatFile(volume, path string) (file FileInfo, err error) {
// File path cannot be verified since one of the parents is a file. // File path cannot be verified since one of the parents is a file.
if strings.Contains(err.Error(), "not a directory") { if strings.Contains(err.Error(), "not a directory") {
return FileInfo{}, errIsNotRegular return FileInfo{}, errFileNotFound
} }
// Return all errors here. // Return all errors here.
@ -649,7 +649,7 @@ func (s fsStorage) StatFile(volume, path string) (file FileInfo, err error) {
// If its a directory its not a regular file. // If its a directory its not a regular file.
if st.Mode().IsDir() { if st.Mode().IsDir() {
log.Debugf("File is %s", errIsNotRegular) log.Debugf("File is %s", errIsNotRegular)
return FileInfo{}, errIsNotRegular return FileInfo{}, errFileNotFound
} }
return FileInfo{ return FileInfo{
Volume: volume, Volume: volume,

View File

@ -21,6 +21,7 @@ import (
"encoding/hex" "encoding/hex"
"fmt" "fmt"
"io" "io"
"io/ioutil"
"path" "path"
"strconv" "strconv"
"strings" "strings"
@ -34,6 +35,18 @@ const (
minioMetaVolume = ".minio" minioMetaVolume = ".minio"
) )
// checks whether bucket exists.
func (o objectAPI) isBucketExist(bucketName string) (bool, error) {
// Check whether bucket exists.
if _, e := o.storage.StatVol(bucketName); e != nil {
if e == errVolumeNotFound {
return false, nil
}
return false, e
}
return true, nil
}
// checkLeafDirectory - verifies if a given path is leaf directory if // checkLeafDirectory - verifies if a given path is leaf directory if
// yes returns all the files inside it. // yes returns all the files inside it.
func (o objectAPI) checkLeafDirectory(prefixPath string) (isLeaf bool, fis []FileInfo) { func (o objectAPI) checkLeafDirectory(prefixPath string) (isLeaf bool, fis []FileInfo) {
@ -242,13 +255,23 @@ func (o objectAPI) ListMultipartUploads(bucket, prefix, keyMarker, uploadIDMarke
} }
func (o objectAPI) NewMultipartUpload(bucket, object string) (string, *probe.Error) { func (o objectAPI) NewMultipartUpload(bucket, object string) (string, *probe.Error) {
// Verify if bucket is valid. // Verify if bucket name is valid.
if !IsValidBucketName(bucket) { if !IsValidBucketName(bucket) {
return "", probe.NewError(BucketNameInvalid{Bucket: bucket}) return "", probe.NewError(BucketNameInvalid{Bucket: bucket})
} }
// Verify if object name is valid.
if !IsValidObjectName(object) { if !IsValidObjectName(object) {
return "", probe.NewError(ObjectNameInvalid{Bucket: bucket, Object: object}) return "", probe.NewError(ObjectNameInvalid{Bucket: bucket, Object: object})
} }
// Verify whether the bucket exists.
isExist, err := o.isBucketExist(bucket)
if err != nil {
return "", probe.NewError(err)
}
if !isExist {
return "", probe.NewError(BucketNotFound{Bucket: bucket})
}
if _, e := o.storage.StatVol(minioMetaVolume); e != nil { if _, e := o.storage.StatVol(minioMetaVolume); e != nil {
if e == errVolumeNotFound { if e == errVolumeNotFound {
e = o.storage.MakeVol(minioMetaVolume) e = o.storage.MakeVol(minioMetaVolume)
@ -305,6 +328,7 @@ func (o objectAPI) isUploadIDExists(bucket, object, uploadID string) (bool, erro
return st.Mode.IsRegular(), nil return st.Mode.IsRegular(), nil
} }
// PutObjectPart - writes the multipart upload chunks.
func (o objectAPI) PutObjectPart(bucket, object, uploadID string, partID int, size int64, data io.Reader, md5Hex string) (string, *probe.Error) { func (o objectAPI) PutObjectPart(bucket, object, uploadID string, partID int, size int64, data io.Reader, md5Hex string) (string, *probe.Error) {
// Verify if bucket is valid. // Verify if bucket is valid.
if !IsValidBucketName(bucket) { if !IsValidBucketName(bucket) {
@ -313,6 +337,15 @@ func (o objectAPI) PutObjectPart(bucket, object, uploadID string, partID int, si
if !IsValidObjectName(object) { if !IsValidObjectName(object) {
return "", probe.NewError(ObjectNameInvalid{Bucket: bucket, Object: object}) return "", probe.NewError(ObjectNameInvalid{Bucket: bucket, Object: object})
} }
// Verify whether the bucket exists.
isExist, err := o.isBucketExist(bucket)
if err != nil {
return "", probe.NewError(err)
}
if !isExist {
return "", probe.NewError(BucketNotFound{Bucket: bucket})
}
if status, e := o.isUploadIDExists(bucket, object, uploadID); e != nil { if status, e := o.isUploadIDExists(bucket, object, uploadID); e != nil {
return "", probe.NewError(e) return "", probe.NewError(e)
} else if !status { } else if !status {
@ -347,11 +380,18 @@ func (o objectAPI) PutObjectPart(bucket, object, uploadID string, partID int, si
if size > 0 { if size > 0 {
if _, e = io.CopyN(multiWriter, data, size); e != nil { if _, e = io.CopyN(multiWriter, data, size); e != nil {
safeCloseAndRemove(fileWriter) safeCloseAndRemove(fileWriter)
if e == io.ErrUnexpectedEOF { if e == io.ErrUnexpectedEOF || e == io.ErrShortWrite {
return "", probe.NewError(IncompleteBody{}) return "", probe.NewError(IncompleteBody{})
} }
return "", probe.NewError(e) return "", probe.NewError(e)
} }
// Reader shouldn't have more data what mentioned in size argument.
// reading one more byte from the reader to validate it.
// expected to fail, success validates existence of more data in the reader.
if _, e = io.CopyN(ioutil.Discard, data, 1); e == nil {
safeCloseAndRemove(fileWriter)
return "", probe.NewError(UnExpectedDataSize{Size: int(size)})
}
} else { } else {
if _, e = io.Copy(multiWriter, data); e != nil { if _, e = io.Copy(multiWriter, data); e != nil {
safeCloseAndRemove(fileWriter) safeCloseAndRemove(fileWriter)

View File

@ -0,0 +1,268 @@
/*
* Minio Cloud Storage, (C) 2016 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package main
import (
"bytes"
"fmt"
"io/ioutil"
"os"
"path"
"testing"
)
// Tests validate creation of new multipart upload instance.
func TestObjectNewMultipartUpload(t *testing.T) {
directory, e := ioutil.TempDir("", "minio-multipart-1-test")
if e != nil {
t.Fatal(e)
}
defer os.RemoveAll(directory)
// Initialize fs layer.
fs, e := newFS(directory)
if e != nil {
t.Fatal(e)
}
// Initialize object layer.
obj := newObjectLayer(fs)
bucket := "minio-bucket"
object := "minio-object"
errMsg := "Bucket not found: minio-bucket"
// opearation expected to fail since the bucket on which NewMultipartUpload is being initiated doesn't exist.
uploadID, err := obj.NewMultipartUpload(bucket, object)
if err == nil {
t.Fatalf("Expcected to fail since the NewMultipartUpload is intialized on a non-existant bucket.")
}
if errMsg != err.ToGoError().Error() {
t.Errorf("Expected to fail with Error \"%s\", but instead found \"%s\".", errMsg, err.ToGoError().Error())
}
// Create bucket before intiating NewMultipartUpload.
err = obj.MakeBucket(bucket)
if err != nil {
// failed to create newbucket, abort.
t.Fatal(err.ToGoError())
}
uploadID, err = obj.NewMultipartUpload(bucket, object)
if err != nil {
t.Fatal(err.ToGoError())
}
uploadIDPath := path.Join(bucket, object, uploadID)
_, e = obj.storage.StatFile(minioMetaVolume, uploadIDPath)
if e != nil {
if e == errFileNotFound {
t.Fatalf("New Multipart upload failed to create uuid file.")
}
t.Fatalf(e.Error())
}
}
// Tests validates the validator for existence of uploadID.
func TestObjectAPIIsUploadIDExists(t *testing.T) {
directory, e := ioutil.TempDir("", "minio-multipart-2-test")
if e != nil {
t.Fatal(e)
}
defer os.RemoveAll(directory)
// Initialize fs layer.
fs, e := newFS(directory)
if e != nil {
t.Fatal(e)
}
// Initialize object layer.
obj := newObjectLayer(fs)
bucket := "minio-bucket"
object := "minio-object"
// Create bucket before intiating NewMultipartUpload.
err := obj.MakeBucket(bucket)
if err != nil {
// Failed to create newbucket, abort.
t.Fatal(err.ToGoError())
}
// UploadID file shouldn't exist.
isExists, e := obj.isUploadIDExists(bucket, object, "abc")
if e == nil {
t.Fatal(e.Error())
}
if isExists {
t.Fatal("Expected uploadIDPath to not to exist.")
}
uploadID, err := obj.NewMultipartUpload(bucket, object)
if err != nil {
t.Fatal(err.ToGoError())
}
// UploadID file should exist.
isExists, e = obj.isUploadIDExists(bucket, object, uploadID)
if e != nil {
t.Fatal(e.Error())
}
if !isExists {
t.Fatal("Expected uploadIDPath to exist.")
}
}
// Tests validate correctness of PutObjectPart.
func TestObjectAPIPutObjectPart(t *testing.T) {
// Generating cases for which the PutObjectPart fails.
directory, e := ioutil.TempDir("", "minio-multipart-3-test")
if e != nil {
t.Fatal(e)
}
defer os.RemoveAll(directory)
// Initializing fs layer.
fs, e := newFS(directory)
if e != nil {
t.Fatal(e)
}
bucket := "minio-bucket"
object := "minio-object"
// Initializing object layer.
obj := newObjectLayer(fs)
// Create bucket before intiating NewMultipartUpload.
err := obj.MakeBucket(bucket)
if err != nil {
// Failed to create newbucket, abort.
t.Fatal(err.ToGoError())
}
// Initiate Multipart Upload on the above created bucket.
uploadID, err := obj.NewMultipartUpload(bucket, object)
if err != nil {
// Failed to create NewMultipartUpload, abort.
t.Fatal(err.ToGoError())
}
// Creating a dummy bucket for tests.
err = obj.MakeBucket("unused-bucket")
if err != nil {
// Failed to create newbucket, abort.
t.Fatal(err.ToGoError())
}
failCases := []struct {
bucketName string
objName string
uploadID string
PartID int
inputReaderData string
inputMd5 string
intputDataSize int64
// flag indicating whether the test should pass.
shouldPass bool
// expected error output.
expectedMd5 string
expectedError error
}{
// Test case 1-4.
// Cases with invalid bucket name.
{".test", "obj", "", 1, "", "", 0, false, "", fmt.Errorf("%s", "Bucket name invalid: .test")},
{"------", "obj", "", 1, "", "", 0, false, "", fmt.Errorf("%s", "Bucket name invalid: ------")},
{"$this-is-not-valid-too", "obj", "", 1, "", "", 0, false, "",
fmt.Errorf("%s", "Bucket name invalid: $this-is-not-valid-too")},
{"a", "obj", "", 1, "", "", 0, false, "", fmt.Errorf("%s", "Bucket name invalid: a")},
// Test case - 5.
// Case with invalid object names.
{"abc", "", "", 1, "", "", 0, false, "", fmt.Errorf("%s", "Object name invalid: abc#")},
// Test case - 6.
// Valid object and bucket names but non-existent bucket.
{"abc", "def", "", 1, "", "", 0, false, "", fmt.Errorf("%s", "Bucket not found: abc")},
// Test Case - 7.
// Existing bucket, but using a bucket on which NewMultipartUpload is not Initiated.
{"unused-bucket", "def", "xyz", 1, "", "", 0, false, "", fmt.Errorf("%s", "Invalid upload id xyz")},
// Test Case - 8.
// Existing bucket, object name different from which NewMultipartUpload is constructed from.
// Expecting "Invalid upload id".
{bucket, "def", "xyz", 1, "", "", 0, false, "", fmt.Errorf("%s", "Invalid upload id xyz")},
// Test Case - 9.
// Existing bucket, bucket and object name are the ones from which NewMultipartUpload is constructed from.
// But the uploadID is invalid.
// Expecting "Invalid upload id".
{bucket, object, "xyz", 1, "", "", 0, false, "", fmt.Errorf("%s", "Invalid upload id xyz")},
// Test Case - 10.
// Case with valid UploadID, existing bucket name.
// But using the bucket name from which NewMultipartUpload is not constructed from.
{"unused-bucket", object, uploadID, 1, "", "", 0, false, "", fmt.Errorf("%s", "Invalid upload id "+uploadID)},
// Test Case - 10.
// Case with valid UploadID, existing bucket name.
// But using the object name from which NewMultipartUpload is not constructed from.
{bucket, "none-object", uploadID, 1, "", "", 0, false, "", fmt.Errorf("%s", "Invalid upload id "+uploadID)},
// Test case - 11.
// Input to replicate Md5 mismatch.
{bucket, object, uploadID, 1, "", "a35", 0, false, "",
fmt.Errorf("%s", "Bad digest expected a35 is not valid with what we calculated "+"d41d8cd98f00b204e9800998ecf8427e")},
// Test case - 12.
// Input with size more than the size of actual data inside the reader.
{bucket, object, uploadID, 1, "abcd", "a35", int64(len("abcd") + 1), false, "", fmt.Errorf("%s", "EOF")},
// Test case - 13.
// Input with size less than the size of actual data inside the reader.
{bucket, object, uploadID, 1, "abcd", "a35", int64(len("abcd") - 1), false, "",
fmt.Errorf("%s", "Contains more data than specified size of 3 bytes.")},
// Test case - 14-17.
// Validating for success cases.
{bucket, object, uploadID, 1, "abcd", "e2fc714c4727ee9395f324cd2e7f331f", int64(len("abcd")), true, "", nil},
{bucket, object, uploadID, 2, "efgh", "1f7690ebdd9b4caf8fab49ca1757bf27", int64(len("efgh")), true, "", nil},
{bucket, object, uploadID, 3, "ijkl", "09a0877d04abf8759f99adec02baf579", int64(len("abcd")), true, "", nil},
{bucket, object, uploadID, 4, "mnop", "e132e96a5ddad6da8b07bba6f6131fef", int64(len("abcd")), true, "", nil},
}
for i, testCase := range failCases {
actualMd5Hex, actualErr := obj.PutObjectPart(testCase.bucketName, testCase.objName, testCase.uploadID, testCase.PartID, testCase.intputDataSize,
bytes.NewBufferString(testCase.inputReaderData), testCase.inputMd5)
// All are test cases above are expected to fail.
if actualErr != nil && testCase.shouldPass {
t.Errorf("Test %d: Expected to pass, but failed with: <ERROR> %s.", i+1, actualErr.ToGoError().Error())
}
if actualErr == nil && !testCase.shouldPass {
t.Errorf("Test %d: Expected to fail with <ERROR> \"%s\", but passed instead.", i+1, testCase.expectedError.Error())
}
// Failed as expected, but does it fail for the expected reason.
if actualErr != nil && !testCase.shouldPass {
if testCase.expectedError.Error() != actualErr.ToGoError().Error() {
t.Errorf("Test %d: Expected to fail with error \"%s\", but instead failed with error \"%s\" instead.", i+1,
testCase.expectedError.Error(), actualErr.ToGoError().Error())
}
}
// Since there are cases for which ListObjects fails, this is necessary.
// Test passes as expected, but the output values are verified for correctness here.
if actualErr == nil && testCase.shouldPass {
// Asserting whether the md5 output is correct.
if testCase.inputMd5 != actualMd5Hex {
t.Errorf("Test %d: Calculated Md5 different from the actual one %s.", i+1, actualMd5Hex)
}
partSuffix := fmt.Sprintf("%s.%d.%s", uploadID, testCase.PartID, testCase.inputMd5)
// Verifying whether the part file is created.
_, e := obj.storage.StatFile(minioMetaVolume, path.Join(bucket, object, partSuffix))
if e != nil {
t.Errorf("Test %d: Failed to create the Part file.", i+1)
}
}
}
}

View File

@ -130,6 +130,15 @@ func (e ObjectNameInvalid) Error() string {
return "Object name invalid: " + e.Bucket + "#" + e.Object return "Object name invalid: " + e.Bucket + "#" + e.Object
} }
// UnExpectedDataSize - Reader contains more/less data than specified.
type UnExpectedDataSize struct {
Size int
}
func (e UnExpectedDataSize) Error() string {
return fmt.Sprintf("Contains more data than specified size of %d bytes.", e.Size)
}
// IncompleteBody You did not provide the number of bytes specified by the Content-Length HTTP header // IncompleteBody You did not provide the number of bytes specified by the Content-Length HTTP header
type IncompleteBody GenericError type IncompleteBody GenericError

View File

@ -52,6 +52,7 @@ func testMakeBucket(c *check.C, create func() *objectAPI) {
c.Assert(err, check.IsNil) c.Assert(err, check.IsNil)
} }
// Tests verifies the functionality of PutObjectPart.
func testMultipartObjectCreation(c *check.C, create func() *objectAPI) { func testMultipartObjectCreation(c *check.C, create func() *objectAPI) {
obj := create() obj := create()
err := obj.MakeBucket("bucket") err := obj.MakeBucket("bucket")
@ -390,22 +391,22 @@ func testGetDirectoryReturnsObjectNotFound(c *check.C, create func() *objectAPI)
_, err = obj.GetObject("bucket", "dir1", 0) _, err = obj.GetObject("bucket", "dir1", 0)
switch err := err.ToGoError().(type) { switch err := err.ToGoError().(type) {
case ObjectExistsAsPrefix: case ObjectNotFound:
c.Assert(err.Bucket, check.Equals, "bucket") c.Assert(err.Bucket, check.Equals, "bucket")
c.Assert(err.Object, check.Equals, "dir1") c.Assert(err.Object, check.Equals, "dir1")
default: default:
// force a failure with a line number // force a failure with a line number
c.Assert(err, check.Equals, "ObjectExistsAsPrefix") c.Assert(err, check.Equals, "ObjectNotFound")
} }
_, err = obj.GetObject("bucket", "dir1/", 0) _, err = obj.GetObject("bucket", "dir1/", 0)
switch err := err.ToGoError().(type) { switch err := err.ToGoError().(type) {
case ObjectExistsAsPrefix: case ObjectNotFound:
c.Assert(err.Bucket, check.Equals, "bucket") c.Assert(err.Bucket, check.Equals, "bucket")
c.Assert(err.Object, check.Equals, "dir1/") c.Assert(err.Object, check.Equals, "dir1/")
default: default:
// force a failure with a line number // force a failure with a line number
c.Assert(err, check.Equals, "ObjectExistsAsPrefix") c.Assert(err, check.Equals, "ObjectNotFound")
} }
} }