mirror of
https://github.com/minio/minio.git
synced 2024-12-24 06:05:55 -05:00
XL: Handle quorum situations properly for write operations. (#1986)
Adds two test cases one for - PutObject when write quorum is not available. - PutObjectPart when write quorum is not available. Fixes #1951
This commit is contained in:
parent
c88720ea2c
commit
2e1f66c37d
@ -86,7 +86,7 @@ func generateBytesData(size int) []byte {
|
||||
b = letterBytes[rand.Intn(len(letterBytes))]
|
||||
return []byte{b}
|
||||
}
|
||||
// repeat the random character chosen size.
|
||||
// repeat the random character chosen size
|
||||
return bytes.Repeat(getRandomByte(), size)
|
||||
}
|
||||
|
||||
|
@ -1,264 +0,0 @@
|
||||
/*
|
||||
* Minio Cloud Storage, (C) 2016 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// Wrapper for calling TestListObjectPartsDiskNotFound tests for both XL multiple disks and single node setup.
|
||||
func TestListObjectPartsDiskNotFound(t *testing.T) {
|
||||
ExecObjectLayerDiskNotFoundTest(t, testListObjectPartsDiskNotFound)
|
||||
}
|
||||
|
||||
// testListObjectParts - Tests validate listing of object parts when disks go offline.
|
||||
func testListObjectPartsDiskNotFound(obj ObjectLayer, instanceType string, disks []string, t *testing.T) {
|
||||
|
||||
bucketNames := []string{"minio-bucket", "minio-2-bucket"}
|
||||
objectNames := []string{"minio-object-1.txt"}
|
||||
uploadIDs := []string{}
|
||||
|
||||
// bucketnames[0].
|
||||
// objectNames[0].
|
||||
// uploadIds [0].
|
||||
// Create bucket before intiating NewMultipartUpload.
|
||||
err := obj.MakeBucket(bucketNames[0])
|
||||
if err != nil {
|
||||
// Failed to create newbucket, abort.
|
||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||
}
|
||||
// Initiate Multipart Upload on the above created bucket.
|
||||
uploadID, err := obj.NewMultipartUpload(bucketNames[0], objectNames[0], nil)
|
||||
if err != nil {
|
||||
// Failed to create NewMultipartUpload, abort.
|
||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||
}
|
||||
|
||||
// Remove some random disk.
|
||||
removeRandomDisk(disks, 1)
|
||||
|
||||
uploadIDs = append(uploadIDs, uploadID)
|
||||
|
||||
// Create multipart parts.
|
||||
// Need parts to be uploaded before MultipartLists can be called and tested.
|
||||
createPartCases := []struct {
|
||||
bucketName string
|
||||
objName string
|
||||
uploadID string
|
||||
PartID int
|
||||
inputReaderData string
|
||||
inputMd5 string
|
||||
intputDataSize int64
|
||||
expectedMd5 string
|
||||
}{
|
||||
// Case 1-4.
|
||||
// Creating sequence of parts for same uploadID.
|
||||
// Used to ensure that the ListMultipartResult produces one output for the four parts uploaded below for the given upload ID.
|
||||
{bucketNames[0], objectNames[0], uploadIDs[0], 1, "abcd", "e2fc714c4727ee9395f324cd2e7f331f", int64(len("abcd")), "e2fc714c4727ee9395f324cd2e7f331f"},
|
||||
{bucketNames[0], objectNames[0], uploadIDs[0], 2, "efgh", "1f7690ebdd9b4caf8fab49ca1757bf27", int64(len("efgh")), "1f7690ebdd9b4caf8fab49ca1757bf27"},
|
||||
{bucketNames[0], objectNames[0], uploadIDs[0], 3, "ijkl", "09a0877d04abf8759f99adec02baf579", int64(len("abcd")), "09a0877d04abf8759f99adec02baf579"},
|
||||
{bucketNames[0], objectNames[0], uploadIDs[0], 4, "mnop", "e132e96a5ddad6da8b07bba6f6131fef", int64(len("abcd")), "e132e96a5ddad6da8b07bba6f6131fef"},
|
||||
}
|
||||
// Iterating over creatPartCases to generate multipart chunks.
|
||||
for _, testCase := range createPartCases {
|
||||
_, err := obj.PutObjectPart(testCase.bucketName, testCase.objName, testCase.uploadID, testCase.PartID, testCase.intputDataSize,
|
||||
bytes.NewBufferString(testCase.inputReaderData), testCase.inputMd5)
|
||||
if err != nil {
|
||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
// Remove one more random disk.
|
||||
removeRandomDisk(disks, 1)
|
||||
|
||||
partInfos := []ListPartsInfo{
|
||||
// partinfos - 0.
|
||||
{
|
||||
Bucket: bucketNames[0],
|
||||
Object: objectNames[0],
|
||||
MaxParts: 10,
|
||||
UploadID: uploadIDs[0],
|
||||
Parts: []partInfo{
|
||||
{
|
||||
PartNumber: 1,
|
||||
Size: 4,
|
||||
ETag: "e2fc714c4727ee9395f324cd2e7f331f",
|
||||
},
|
||||
{
|
||||
PartNumber: 2,
|
||||
Size: 4,
|
||||
ETag: "1f7690ebdd9b4caf8fab49ca1757bf27",
|
||||
},
|
||||
{
|
||||
PartNumber: 3,
|
||||
Size: 4,
|
||||
ETag: "09a0877d04abf8759f99adec02baf579",
|
||||
},
|
||||
{
|
||||
PartNumber: 4,
|
||||
Size: 4,
|
||||
ETag: "e132e96a5ddad6da8b07bba6f6131fef",
|
||||
},
|
||||
},
|
||||
},
|
||||
// partinfos - 1.
|
||||
{
|
||||
Bucket: bucketNames[0],
|
||||
Object: objectNames[0],
|
||||
MaxParts: 3,
|
||||
NextPartNumberMarker: 3,
|
||||
IsTruncated: true,
|
||||
UploadID: uploadIDs[0],
|
||||
Parts: []partInfo{
|
||||
{
|
||||
PartNumber: 1,
|
||||
Size: 4,
|
||||
ETag: "e2fc714c4727ee9395f324cd2e7f331f",
|
||||
},
|
||||
{
|
||||
PartNumber: 2,
|
||||
Size: 4,
|
||||
ETag: "1f7690ebdd9b4caf8fab49ca1757bf27",
|
||||
},
|
||||
{
|
||||
PartNumber: 3,
|
||||
Size: 4,
|
||||
ETag: "09a0877d04abf8759f99adec02baf579",
|
||||
},
|
||||
},
|
||||
},
|
||||
// partinfos - 2.
|
||||
{
|
||||
Bucket: bucketNames[0],
|
||||
Object: objectNames[0],
|
||||
MaxParts: 2,
|
||||
IsTruncated: false,
|
||||
UploadID: uploadIDs[0],
|
||||
Parts: []partInfo{
|
||||
{
|
||||
PartNumber: 4,
|
||||
Size: 4,
|
||||
ETag: "e132e96a5ddad6da8b07bba6f6131fef",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
bucket string
|
||||
object string
|
||||
uploadID string
|
||||
partNumberMarker int
|
||||
maxParts int
|
||||
// Expected output of ListPartsInfo.
|
||||
expectedResult ListPartsInfo
|
||||
expectedErr error
|
||||
// Flag indicating whether the test is expected to pass or not.
|
||||
shouldPass bool
|
||||
}{
|
||||
// Test cases with invalid bucket names (Test number 1-4).
|
||||
{".test", "", "", 0, 0, ListPartsInfo{}, BucketNameInvalid{Bucket: ".test"}, false},
|
||||
{"Test", "", "", 0, 0, ListPartsInfo{}, BucketNameInvalid{Bucket: "Test"}, false},
|
||||
{"---", "", "", 0, 0, ListPartsInfo{}, BucketNameInvalid{Bucket: "---"}, false},
|
||||
{"ad", "", "", 0, 0, ListPartsInfo{}, BucketNameInvalid{Bucket: "ad"}, false},
|
||||
// Test cases for listing uploadID with single part.
|
||||
// Valid bucket names, but they donot exist (Test number 5-7).
|
||||
{"volatile-bucket-1", "", "", 0, 0, ListPartsInfo{}, BucketNotFound{Bucket: "volatile-bucket-1"}, false},
|
||||
{"volatile-bucket-2", "", "", 0, 0, ListPartsInfo{}, BucketNotFound{Bucket: "volatile-bucket-2"}, false},
|
||||
{"volatile-bucket-3", "", "", 0, 0, ListPartsInfo{}, BucketNotFound{Bucket: "volatile-bucket-3"}, false},
|
||||
// Test case for Asserting for invalid objectName (Test number 8).
|
||||
{bucketNames[0], "", "", 0, 0, ListPartsInfo{}, ObjectNameInvalid{Bucket: bucketNames[0]}, false},
|
||||
// Asserting for Invalid UploadID (Test number 9).
|
||||
{bucketNames[0], objectNames[0], "abc", 0, 0, ListPartsInfo{}, InvalidUploadID{UploadID: "abc"}, false},
|
||||
// Test case for uploadID with multiple parts (Test number 12).
|
||||
{bucketNames[0], objectNames[0], uploadIDs[0], 0, 10, partInfos[0], nil, true},
|
||||
// Test case with maxParts set to less than number of parts (Test number 13).
|
||||
{bucketNames[0], objectNames[0], uploadIDs[0], 0, 3, partInfos[1], nil, true},
|
||||
// Test case with partNumberMarker set (Test number 14)-.
|
||||
{bucketNames[0], objectNames[0], uploadIDs[0], 3, 2, partInfos[2], nil, true},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
actualResult, actualErr := obj.ListObjectParts(testCase.bucket, testCase.object, testCase.uploadID, testCase.partNumberMarker, testCase.maxParts)
|
||||
if actualErr != nil && testCase.shouldPass {
|
||||
t.Errorf("Test %d: %s: Expected to pass, but failed with: <ERROR> %s", i+1, instanceType, actualErr.Error())
|
||||
}
|
||||
if actualErr == nil && !testCase.shouldPass {
|
||||
t.Errorf("Test %d: %s: Expected to fail with <ERROR> \"%s\", but passed instead", i+1, instanceType, testCase.expectedErr.Error())
|
||||
}
|
||||
// Failed as expected, but does it fail for the expected reason.
|
||||
if actualErr != nil && !testCase.shouldPass {
|
||||
if !strings.Contains(actualErr.Error(), testCase.expectedErr.Error()) {
|
||||
t.Errorf("Test %d: %s: Expected to fail with error \"%s\", but instead failed with error \"%s\" instead", i+1, instanceType, testCase.expectedErr.Error(), actualErr.Error())
|
||||
}
|
||||
}
|
||||
// Passes as expected, but asserting the results.
|
||||
if actualErr == nil && testCase.shouldPass {
|
||||
expectedResult := testCase.expectedResult
|
||||
// Asserting the MaxParts.
|
||||
if actualResult.MaxParts != expectedResult.MaxParts {
|
||||
t.Errorf("Test %d: %s: Expected the MaxParts to be %d, but instead found it to be %d", i+1, instanceType, expectedResult.MaxParts, actualResult.MaxParts)
|
||||
}
|
||||
// Asserting Object Name.
|
||||
if actualResult.Object != expectedResult.Object {
|
||||
t.Errorf("Test %d: %s: Expected Object name to be \"%s\", but instead found it to be \"%s\"", i+1, instanceType, expectedResult.Object, actualResult.Object)
|
||||
}
|
||||
// Asserting UploadID.
|
||||
if actualResult.UploadID != expectedResult.UploadID {
|
||||
t.Errorf("Test %d: %s: Expected UploadID to be \"%s\", but instead found it to be \"%s\"", i+1, instanceType, expectedResult.UploadID, actualResult.UploadID)
|
||||
}
|
||||
// Asserting NextPartNumberMarker.
|
||||
if actualResult.NextPartNumberMarker != expectedResult.NextPartNumberMarker {
|
||||
t.Errorf("Test %d: %s: Expected NextPartNumberMarker to be \"%d\", but instead found it to be \"%d\"", i+1, instanceType, expectedResult.NextPartNumberMarker, actualResult.NextPartNumberMarker)
|
||||
}
|
||||
// Asserting PartNumberMarker.
|
||||
if actualResult.PartNumberMarker != expectedResult.PartNumberMarker {
|
||||
t.Errorf("Test %d: %s: Expected PartNumberMarker to be \"%d\", but instead found it to be \"%d\"", i+1, instanceType, expectedResult.PartNumberMarker, actualResult.PartNumberMarker)
|
||||
}
|
||||
// Asserting the BucketName.
|
||||
if actualResult.Bucket != expectedResult.Bucket {
|
||||
t.Errorf("Test %d: %s: Expected Bucket to be \"%s\", but instead found it to be \"%s\"", i+1, instanceType, expectedResult.Bucket, actualResult.Bucket)
|
||||
}
|
||||
// Asserting IsTruncated.
|
||||
if actualResult.IsTruncated != testCase.expectedResult.IsTruncated {
|
||||
t.Errorf("Test %d: %s: Expected IsTruncated to be \"%v\", but found it to \"%v\"", i+1, instanceType, expectedResult.IsTruncated, actualResult.IsTruncated)
|
||||
}
|
||||
// Asserting the number of Parts.
|
||||
if len(expectedResult.Parts) != len(actualResult.Parts) {
|
||||
t.Errorf("Test %d: %s: Expected the result to contain info of %d Parts, but found %d instead", i+1, instanceType, len(expectedResult.Parts), len(actualResult.Parts))
|
||||
} else {
|
||||
// Iterating over the partInfos and asserting the fields.
|
||||
for j, actualMetaData := range actualResult.Parts {
|
||||
// Asserting the PartNumber in the PartInfo.
|
||||
if actualMetaData.PartNumber != expectedResult.Parts[j].PartNumber {
|
||||
t.Errorf("Test %d: %s: Part %d: Expected PartNumber to be \"%d\", but instead found \"%d\"", i+1, instanceType, j+1, expectedResult.Parts[j].PartNumber, actualMetaData.PartNumber)
|
||||
}
|
||||
// Asserting the Size in the PartInfo.
|
||||
if actualMetaData.Size != expectedResult.Parts[j].Size {
|
||||
t.Errorf("Test %d: %s: Part %d: Expected Part Size to be \"%d\", but instead found \"%d\"", i+1, instanceType, j+1, expectedResult.Parts[j].Size, actualMetaData.Size)
|
||||
}
|
||||
// Asserting the ETag in the PartInfo.
|
||||
if actualMetaData.ETag != expectedResult.Parts[j].ETag {
|
||||
t.Errorf("Test %d: %s: Part %d: Expected Etag to be \"%s\", but instead found \"%s\"", i+1, instanceType, j+1, expectedResult.Parts[j].ETag, actualMetaData.ETag)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
@ -99,6 +99,87 @@ func testObjectAPIIsUploadIDExists(obj ObjectLayer, instanceType string, t *test
|
||||
}
|
||||
}
|
||||
|
||||
// Wrapper for calling TestPutObjectPartDiskNotFound tests for both XL
|
||||
// write quorum.
|
||||
func TestPutObjectPartDiskNotFound(t *testing.T) {
|
||||
ExecObjectLayerDiskNotFoundTest(t, testPutObjectPartDiskNotFound)
|
||||
}
|
||||
|
||||
// testPutObjectPartDiskNotFound - Tests validate PutObjectPart behavior when disks go offline.
|
||||
func testPutObjectPartDiskNotFound(obj ObjectLayer, instanceType string, disks []string, t *testing.T) {
|
||||
bucketNames := []string{"minio-bucket", "minio-2-bucket"}
|
||||
objectNames := []string{"minio-object-1.txt"}
|
||||
uploadIDs := []string{}
|
||||
|
||||
// bucketnames[0].
|
||||
// objectNames[0].
|
||||
// uploadIds [0].
|
||||
// Create bucket before intiating NewMultipartUpload.
|
||||
err := obj.MakeBucket(bucketNames[0])
|
||||
if err != nil {
|
||||
// Failed to create newbucket, abort.
|
||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||
}
|
||||
|
||||
// Initiate Multipart Upload on the above created bucket.
|
||||
uploadID, err := obj.NewMultipartUpload(bucketNames[0], objectNames[0], nil)
|
||||
if err != nil {
|
||||
// Failed to create NewMultipartUpload, abort.
|
||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||
}
|
||||
|
||||
// Remove some random disk.
|
||||
for _, disk := range disks[:6] {
|
||||
removeAll(disk)
|
||||
}
|
||||
|
||||
uploadIDs = append(uploadIDs, uploadID)
|
||||
|
||||
// Create multipart parts.
|
||||
// Need parts to be uploaded before MultipartLists can be called and tested.
|
||||
createPartCases := []struct {
|
||||
bucketName string
|
||||
objName string
|
||||
uploadID string
|
||||
PartID int
|
||||
inputReaderData string
|
||||
inputMd5 string
|
||||
intputDataSize int64
|
||||
expectedMd5 string
|
||||
}{
|
||||
// Case 1-5.
|
||||
// Creating sequence of parts for same uploadID.
|
||||
// Used to ensure that the ListMultipartResult produces one output for the four parts uploaded below for the given upload ID.
|
||||
{bucketNames[0], objectNames[0], uploadIDs[0], 1, "abcd", "e2fc714c4727ee9395f324cd2e7f331f", int64(len("abcd")), "e2fc714c4727ee9395f324cd2e7f331f"},
|
||||
{bucketNames[0], objectNames[0], uploadIDs[0], 2, "efgh", "1f7690ebdd9b4caf8fab49ca1757bf27", int64(len("efgh")), "1f7690ebdd9b4caf8fab49ca1757bf27"},
|
||||
{bucketNames[0], objectNames[0], uploadIDs[0], 3, "ijkl", "09a0877d04abf8759f99adec02baf579", int64(len("ijkl")), "09a0877d04abf8759f99adec02baf579"},
|
||||
{bucketNames[0], objectNames[0], uploadIDs[0], 4, "mnop", "e132e96a5ddad6da8b07bba6f6131fef", int64(len("mnop")), "e132e96a5ddad6da8b07bba6f6131fef"},
|
||||
{bucketNames[0], objectNames[0], uploadIDs[0], 5, "mnop", "e132e96a5ddad6da8b07bba6f6131fef", int64(len("mnop")), "e132e96a5ddad6da8b07bba6f6131fef"},
|
||||
}
|
||||
// Iterating over creatPartCases to generate multipart chunks.
|
||||
for _, testCase := range createPartCases {
|
||||
_, err = obj.PutObjectPart(testCase.bucketName, testCase.objName, testCase.uploadID, testCase.PartID, testCase.intputDataSize,
|
||||
bytes.NewBufferString(testCase.inputReaderData), testCase.inputMd5)
|
||||
if err != nil {
|
||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
// This causes quorum failure verify.
|
||||
removeAll(disks[len(disks)-1])
|
||||
|
||||
// Object part upload should fail with quorum not available.
|
||||
testCase := createPartCases[len(createPartCases)-1]
|
||||
_, err = obj.PutObjectPart(testCase.bucketName, testCase.objName, testCase.uploadID, testCase.PartID, testCase.intputDataSize, bytes.NewBufferString(testCase.inputReaderData), testCase.inputMd5)
|
||||
if err == nil {
|
||||
t.Fatalf("Test %s: expected to fail but passed instead", instanceType)
|
||||
}
|
||||
expectedErr := InsufficientWriteQuorum{}
|
||||
if err.Error() != expectedErr.Error() {
|
||||
t.Fatalf("Test %s: expected error %s, got %s instead.", instanceType, expectedErr, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Wrapper for calling PutObjectPart tests for both XL multiple disks and single node setup.
|
||||
func TestObjectAPIPutObjectPart(t *testing.T) {
|
||||
ExecObjectLayerTest(t, testObjectAPIPutObjectPart)
|
||||
@ -129,7 +210,9 @@ func testObjectAPIPutObjectPart(obj ObjectLayer, instanceType string, t *testing
|
||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||
}
|
||||
|
||||
failCases := []struct {
|
||||
// Collection of non-exhaustive PutObjectPart test cases, valid errors
|
||||
// and success responses.
|
||||
testCases := []struct {
|
||||
bucketName string
|
||||
objName string
|
||||
uploadID string
|
||||
@ -196,11 +279,10 @@ func testObjectAPIPutObjectPart(obj ObjectLayer, instanceType string, t *testing
|
||||
{bucket, object, uploadID, 4, "mnop", "e132e96a5ddad6da8b07bba6f6131fef", int64(len("abcd")), true, "", nil},
|
||||
}
|
||||
|
||||
for i, testCase := range failCases {
|
||||
actualMd5Hex, actualErr := obj.PutObjectPart(testCase.bucketName, testCase.objName, testCase.uploadID, testCase.PartID, testCase.intputDataSize,
|
||||
bytes.NewBufferString(testCase.inputReaderData), testCase.inputMd5)
|
||||
// Validate all the test cases.
|
||||
for i, testCase := range testCases {
|
||||
actualMd5Hex, actualErr := obj.PutObjectPart(testCase.bucketName, testCase.objName, testCase.uploadID, testCase.PartID, testCase.intputDataSize, bytes.NewBufferString(testCase.inputReaderData), testCase.inputMd5)
|
||||
// All are test cases above are expected to fail.
|
||||
|
||||
if actualErr != nil && testCase.shouldPass {
|
||||
t.Errorf("Test %d: %s: Expected to pass, but failed with: <ERROR> %s.", i+1, instanceType, actualErr.Error())
|
||||
}
|
||||
@ -239,7 +321,7 @@ func testListMultipartUploads(obj ObjectLayer, instanceType string, t *testing.T
|
||||
// bucketnames[0].
|
||||
// objectNames[0].
|
||||
// uploadIds [0].
|
||||
// Create bucket before intiating NewMultipartUpload.
|
||||
// Create bucket before initiating NewMultipartUpload.
|
||||
err := obj.MakeBucket(bucketNames[0])
|
||||
if err != nil {
|
||||
// Failed to create newbucket, abort.
|
||||
@ -287,7 +369,8 @@ func testListMultipartUploads(obj ObjectLayer, instanceType string, t *testing.T
|
||||
// Initiate Multipart Upload on bucketNames[2].
|
||||
// Used to test the listing for the case of multiple objects for a given bucket.
|
||||
for i := 0; i < 6; i++ {
|
||||
uploadID, err := obj.NewMultipartUpload(bucketNames[2], objectNames[i], nil)
|
||||
var uploadID string
|
||||
uploadID, err = obj.NewMultipartUpload(bucketNames[2], objectNames[i], nil)
|
||||
if err != nil {
|
||||
// Failed to create NewMultipartUpload, abort.
|
||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||
@ -933,15 +1016,17 @@ func testListMultipartUploads(obj ObjectLayer, instanceType string, t *testing.T
|
||||
},
|
||||
}
|
||||
|
||||
// Collection of non-exhaustive ListMultipartUploads test cases, valid errors
|
||||
// and success responses.
|
||||
testCases := []struct {
|
||||
// Inputs to ListObjects.
|
||||
// Inputs to ListMultipartUploads.
|
||||
bucket string
|
||||
prefix string
|
||||
keyMarker string
|
||||
uploadIDMarker string
|
||||
delimiter string
|
||||
maxUploads int
|
||||
// Expected output of ListObjects.
|
||||
// Expected output of ListMultipartUploads.
|
||||
expectedResult ListMultipartsInfo
|
||||
expectedErr error
|
||||
// Flag indicating whether the test is expected to pass or not.
|
||||
@ -1123,6 +1208,249 @@ func testListMultipartUploads(obj ObjectLayer, instanceType string, t *testing.T
|
||||
}
|
||||
}
|
||||
|
||||
// Wrapper for calling TestListObjectPartsDiskNotFound tests for both XL multiple disks and single node setup.
|
||||
func TestListObjectPartsDiskNotFound(t *testing.T) {
|
||||
ExecObjectLayerDiskNotFoundTest(t, testListObjectPartsDiskNotFound)
|
||||
}
|
||||
|
||||
// testListObjectParts - Tests validate listing of object parts when disks go offline.
|
||||
func testListObjectPartsDiskNotFound(obj ObjectLayer, instanceType string, disks []string, t *testing.T) {
|
||||
|
||||
bucketNames := []string{"minio-bucket", "minio-2-bucket"}
|
||||
objectNames := []string{"minio-object-1.txt"}
|
||||
uploadIDs := []string{}
|
||||
|
||||
// bucketnames[0].
|
||||
// objectNames[0].
|
||||
// uploadIds [0].
|
||||
// Create bucket before intiating NewMultipartUpload.
|
||||
err := obj.MakeBucket(bucketNames[0])
|
||||
if err != nil {
|
||||
// Failed to create newbucket, abort.
|
||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||
}
|
||||
// Initiate Multipart Upload on the above created bucket.
|
||||
uploadID, err := obj.NewMultipartUpload(bucketNames[0], objectNames[0], nil)
|
||||
if err != nil {
|
||||
// Failed to create NewMultipartUpload, abort.
|
||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||
}
|
||||
|
||||
// Remove some random disk.
|
||||
removeRandomDisk(disks, 1)
|
||||
|
||||
uploadIDs = append(uploadIDs, uploadID)
|
||||
|
||||
// Create multipart parts.
|
||||
// Need parts to be uploaded before MultipartLists can be called and tested.
|
||||
createPartCases := []struct {
|
||||
bucketName string
|
||||
objName string
|
||||
uploadID string
|
||||
PartID int
|
||||
inputReaderData string
|
||||
inputMd5 string
|
||||
intputDataSize int64
|
||||
expectedMd5 string
|
||||
}{
|
||||
// Case 1-4.
|
||||
// Creating sequence of parts for same uploadID.
|
||||
// Used to ensure that the ListMultipartResult produces one output for the four parts uploaded below for the given upload ID.
|
||||
{bucketNames[0], objectNames[0], uploadIDs[0], 1, "abcd", "e2fc714c4727ee9395f324cd2e7f331f", int64(len("abcd")), "e2fc714c4727ee9395f324cd2e7f331f"},
|
||||
{bucketNames[0], objectNames[0], uploadIDs[0], 2, "efgh", "1f7690ebdd9b4caf8fab49ca1757bf27", int64(len("efgh")), "1f7690ebdd9b4caf8fab49ca1757bf27"},
|
||||
{bucketNames[0], objectNames[0], uploadIDs[0], 3, "ijkl", "09a0877d04abf8759f99adec02baf579", int64(len("abcd")), "09a0877d04abf8759f99adec02baf579"},
|
||||
{bucketNames[0], objectNames[0], uploadIDs[0], 4, "mnop", "e132e96a5ddad6da8b07bba6f6131fef", int64(len("abcd")), "e132e96a5ddad6da8b07bba6f6131fef"},
|
||||
}
|
||||
// Iterating over creatPartCases to generate multipart chunks.
|
||||
for _, testCase := range createPartCases {
|
||||
_, err := obj.PutObjectPart(testCase.bucketName, testCase.objName, testCase.uploadID, testCase.PartID, testCase.intputDataSize,
|
||||
bytes.NewBufferString(testCase.inputReaderData), testCase.inputMd5)
|
||||
if err != nil {
|
||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
// Remove one more random disk.
|
||||
removeRandomDisk(disks, 1)
|
||||
|
||||
partInfos := []ListPartsInfo{
|
||||
// partinfos - 0.
|
||||
{
|
||||
Bucket: bucketNames[0],
|
||||
Object: objectNames[0],
|
||||
MaxParts: 10,
|
||||
UploadID: uploadIDs[0],
|
||||
Parts: []partInfo{
|
||||
{
|
||||
PartNumber: 1,
|
||||
Size: 4,
|
||||
ETag: "e2fc714c4727ee9395f324cd2e7f331f",
|
||||
},
|
||||
{
|
||||
PartNumber: 2,
|
||||
Size: 4,
|
||||
ETag: "1f7690ebdd9b4caf8fab49ca1757bf27",
|
||||
},
|
||||
{
|
||||
PartNumber: 3,
|
||||
Size: 4,
|
||||
ETag: "09a0877d04abf8759f99adec02baf579",
|
||||
},
|
||||
{
|
||||
PartNumber: 4,
|
||||
Size: 4,
|
||||
ETag: "e132e96a5ddad6da8b07bba6f6131fef",
|
||||
},
|
||||
},
|
||||
},
|
||||
// partinfos - 1.
|
||||
{
|
||||
Bucket: bucketNames[0],
|
||||
Object: objectNames[0],
|
||||
MaxParts: 3,
|
||||
NextPartNumberMarker: 3,
|
||||
IsTruncated: true,
|
||||
UploadID: uploadIDs[0],
|
||||
Parts: []partInfo{
|
||||
{
|
||||
PartNumber: 1,
|
||||
Size: 4,
|
||||
ETag: "e2fc714c4727ee9395f324cd2e7f331f",
|
||||
},
|
||||
{
|
||||
PartNumber: 2,
|
||||
Size: 4,
|
||||
ETag: "1f7690ebdd9b4caf8fab49ca1757bf27",
|
||||
},
|
||||
{
|
||||
PartNumber: 3,
|
||||
Size: 4,
|
||||
ETag: "09a0877d04abf8759f99adec02baf579",
|
||||
},
|
||||
},
|
||||
},
|
||||
// partinfos - 2.
|
||||
{
|
||||
Bucket: bucketNames[0],
|
||||
Object: objectNames[0],
|
||||
MaxParts: 2,
|
||||
IsTruncated: false,
|
||||
UploadID: uploadIDs[0],
|
||||
Parts: []partInfo{
|
||||
{
|
||||
PartNumber: 4,
|
||||
Size: 4,
|
||||
ETag: "e132e96a5ddad6da8b07bba6f6131fef",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// Collection of non-exhaustive ListObjectParts test cases, valid errors
|
||||
// and success responses.
|
||||
testCases := []struct {
|
||||
bucket string
|
||||
object string
|
||||
uploadID string
|
||||
partNumberMarker int
|
||||
maxParts int
|
||||
// Expected output of ListPartsInfo.
|
||||
expectedResult ListPartsInfo
|
||||
expectedErr error
|
||||
// Flag indicating whether the test is expected to pass or not.
|
||||
shouldPass bool
|
||||
}{
|
||||
// Test cases with invalid bucket names (Test number 1-4).
|
||||
{".test", "", "", 0, 0, ListPartsInfo{}, BucketNameInvalid{Bucket: ".test"}, false},
|
||||
{"Test", "", "", 0, 0, ListPartsInfo{}, BucketNameInvalid{Bucket: "Test"}, false},
|
||||
{"---", "", "", 0, 0, ListPartsInfo{}, BucketNameInvalid{Bucket: "---"}, false},
|
||||
{"ad", "", "", 0, 0, ListPartsInfo{}, BucketNameInvalid{Bucket: "ad"}, false},
|
||||
// Test cases for listing uploadID with single part.
|
||||
// Valid bucket names, but they donot exist (Test number 5-7).
|
||||
{"volatile-bucket-1", "", "", 0, 0, ListPartsInfo{}, BucketNotFound{Bucket: "volatile-bucket-1"}, false},
|
||||
{"volatile-bucket-2", "", "", 0, 0, ListPartsInfo{}, BucketNotFound{Bucket: "volatile-bucket-2"}, false},
|
||||
{"volatile-bucket-3", "", "", 0, 0, ListPartsInfo{}, BucketNotFound{Bucket: "volatile-bucket-3"}, false},
|
||||
// Test case for Asserting for invalid objectName (Test number 8).
|
||||
{bucketNames[0], "", "", 0, 0, ListPartsInfo{}, ObjectNameInvalid{Bucket: bucketNames[0]}, false},
|
||||
// Asserting for Invalid UploadID (Test number 9).
|
||||
{bucketNames[0], objectNames[0], "abc", 0, 0, ListPartsInfo{}, InvalidUploadID{UploadID: "abc"}, false},
|
||||
// Test case for uploadID with multiple parts (Test number 12).
|
||||
{bucketNames[0], objectNames[0], uploadIDs[0], 0, 10, partInfos[0], nil, true},
|
||||
// Test case with maxParts set to less than number of parts (Test number 13).
|
||||
{bucketNames[0], objectNames[0], uploadIDs[0], 0, 3, partInfos[1], nil, true},
|
||||
// Test case with partNumberMarker set (Test number 14)-.
|
||||
{bucketNames[0], objectNames[0], uploadIDs[0], 3, 2, partInfos[2], nil, true},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
actualResult, actualErr := obj.ListObjectParts(testCase.bucket, testCase.object, testCase.uploadID, testCase.partNumberMarker, testCase.maxParts)
|
||||
if actualErr != nil && testCase.shouldPass {
|
||||
t.Errorf("Test %d: %s: Expected to pass, but failed with: <ERROR> %s", i+1, instanceType, actualErr.Error())
|
||||
}
|
||||
if actualErr == nil && !testCase.shouldPass {
|
||||
t.Errorf("Test %d: %s: Expected to fail with <ERROR> \"%s\", but passed instead", i+1, instanceType, testCase.expectedErr.Error())
|
||||
}
|
||||
// Failed as expected, but does it fail for the expected reason.
|
||||
if actualErr != nil && !testCase.shouldPass {
|
||||
if !strings.Contains(actualErr.Error(), testCase.expectedErr.Error()) {
|
||||
t.Errorf("Test %d: %s: Expected to fail with error \"%s\", but instead failed with error \"%s\" instead", i+1, instanceType, testCase.expectedErr.Error(), actualErr.Error())
|
||||
}
|
||||
}
|
||||
// Passes as expected, but asserting the results.
|
||||
if actualErr == nil && testCase.shouldPass {
|
||||
expectedResult := testCase.expectedResult
|
||||
// Asserting the MaxParts.
|
||||
if actualResult.MaxParts != expectedResult.MaxParts {
|
||||
t.Errorf("Test %d: %s: Expected the MaxParts to be %d, but instead found it to be %d", i+1, instanceType, expectedResult.MaxParts, actualResult.MaxParts)
|
||||
}
|
||||
// Asserting Object Name.
|
||||
if actualResult.Object != expectedResult.Object {
|
||||
t.Errorf("Test %d: %s: Expected Object name to be \"%s\", but instead found it to be \"%s\"", i+1, instanceType, expectedResult.Object, actualResult.Object)
|
||||
}
|
||||
// Asserting UploadID.
|
||||
if actualResult.UploadID != expectedResult.UploadID {
|
||||
t.Errorf("Test %d: %s: Expected UploadID to be \"%s\", but instead found it to be \"%s\"", i+1, instanceType, expectedResult.UploadID, actualResult.UploadID)
|
||||
}
|
||||
// Asserting NextPartNumberMarker.
|
||||
if actualResult.NextPartNumberMarker != expectedResult.NextPartNumberMarker {
|
||||
t.Errorf("Test %d: %s: Expected NextPartNumberMarker to be \"%d\", but instead found it to be \"%d\"", i+1, instanceType, expectedResult.NextPartNumberMarker, actualResult.NextPartNumberMarker)
|
||||
}
|
||||
// Asserting PartNumberMarker.
|
||||
if actualResult.PartNumberMarker != expectedResult.PartNumberMarker {
|
||||
t.Errorf("Test %d: %s: Expected PartNumberMarker to be \"%d\", but instead found it to be \"%d\"", i+1, instanceType, expectedResult.PartNumberMarker, actualResult.PartNumberMarker)
|
||||
}
|
||||
// Asserting the BucketName.
|
||||
if actualResult.Bucket != expectedResult.Bucket {
|
||||
t.Errorf("Test %d: %s: Expected Bucket to be \"%s\", but instead found it to be \"%s\"", i+1, instanceType, expectedResult.Bucket, actualResult.Bucket)
|
||||
}
|
||||
// Asserting IsTruncated.
|
||||
if actualResult.IsTruncated != testCase.expectedResult.IsTruncated {
|
||||
t.Errorf("Test %d: %s: Expected IsTruncated to be \"%v\", but found it to \"%v\"", i+1, instanceType, expectedResult.IsTruncated, actualResult.IsTruncated)
|
||||
}
|
||||
// Asserting the number of Parts.
|
||||
if len(expectedResult.Parts) != len(actualResult.Parts) {
|
||||
t.Errorf("Test %d: %s: Expected the result to contain info of %d Parts, but found %d instead", i+1, instanceType, len(expectedResult.Parts), len(actualResult.Parts))
|
||||
} else {
|
||||
// Iterating over the partInfos and asserting the fields.
|
||||
for j, actualMetaData := range actualResult.Parts {
|
||||
// Asserting the PartNumber in the PartInfo.
|
||||
if actualMetaData.PartNumber != expectedResult.Parts[j].PartNumber {
|
||||
t.Errorf("Test %d: %s: Part %d: Expected PartNumber to be \"%d\", but instead found \"%d\"", i+1, instanceType, j+1, expectedResult.Parts[j].PartNumber, actualMetaData.PartNumber)
|
||||
}
|
||||
// Asserting the Size in the PartInfo.
|
||||
if actualMetaData.Size != expectedResult.Parts[j].Size {
|
||||
t.Errorf("Test %d: %s: Part %d: Expected Part Size to be \"%d\", but instead found \"%d\"", i+1, instanceType, j+1, expectedResult.Parts[j].Size, actualMetaData.Size)
|
||||
}
|
||||
// Asserting the ETag in the PartInfo.
|
||||
if actualMetaData.ETag != expectedResult.Parts[j].ETag {
|
||||
t.Errorf("Test %d: %s: Part %d: Expected Etag to be \"%s\", but instead found \"%s\"", i+1, instanceType, j+1, expectedResult.Parts[j].ETag, actualMetaData.ETag)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Wrapper for calling TestListObjectParts tests for both XL multiple disks and single node setup.
|
||||
func TestListObjectParts(t *testing.T) {
|
||||
ExecObjectLayerTest(t, testListObjectParts)
|
||||
@ -1255,6 +1583,8 @@ func testListObjectParts(obj ObjectLayer, instanceType string, t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
// Collection of non-exhaustive ListObjectParts test cases, valid errors
|
||||
// and success responses.
|
||||
testCases := []struct {
|
||||
bucket string
|
||||
object string
|
||||
|
231
object-api-putobject_test.go
Normal file
231
object-api-putobject_test.go
Normal file
@ -0,0 +1,231 @@
|
||||
/*
|
||||
* Minio Cloud Storage, (C) 2016 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// Wrapper for calling PutObject tests for both XL multiple disks and single node setup.
|
||||
func TestObjectAPIPutObject(t *testing.T) {
|
||||
ExecObjectLayerTest(t, testObjectAPIPutObject)
|
||||
}
|
||||
|
||||
// Tests validate correctness of PutObject.
|
||||
func testObjectAPIPutObject(obj ObjectLayer, instanceType string, t *testing.T) {
|
||||
// Generating cases for which the PutObject fails.
|
||||
bucket := "minio-bucket"
|
||||
object := "minio-object"
|
||||
|
||||
// Create bucket.
|
||||
err := obj.MakeBucket(bucket)
|
||||
if err != nil {
|
||||
// Failed to create newbucket, abort.
|
||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||
}
|
||||
|
||||
// Creating a dummy bucket for tests.
|
||||
err = obj.MakeBucket("unused-bucket")
|
||||
if err != nil {
|
||||
// Failed to create newbucket, abort.
|
||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||
}
|
||||
|
||||
failCases := []struct {
|
||||
bucketName string
|
||||
objName string
|
||||
inputReaderData string
|
||||
inputMeta map[string]string
|
||||
intputDataSize int64
|
||||
// flag indicating whether the test should pass.
|
||||
shouldPass bool
|
||||
// expected error output.
|
||||
expectedMd5 string
|
||||
expectedError error
|
||||
}{
|
||||
// Test case 1-4.
|
||||
// Cases with invalid bucket name.
|
||||
{".test", "obj", "", nil, 0, false, "", fmt.Errorf("%s", "Bucket name invalid: .test")},
|
||||
{"------", "obj", "", nil, 0, false, "", fmt.Errorf("%s", "Bucket name invalid: ------")},
|
||||
{"$this-is-not-valid-too", "obj", "", nil, 0, false, "",
|
||||
fmt.Errorf("%s", "Bucket name invalid: $this-is-not-valid-too")},
|
||||
{"a", "obj", "", nil, 0, false, "", fmt.Errorf("%s", "Bucket name invalid: a")},
|
||||
// Test case - 5.
|
||||
// Case with invalid object names.
|
||||
{bucket, "", "", nil, 0, false, "", fmt.Errorf("%s", "Object name invalid: minio-bucket#")},
|
||||
// Test case - 6.
|
||||
// Valid object and bucket names but non-existent bucket.
|
||||
{"abc", "def", "", nil, 0, false, "", fmt.Errorf("%s", "Bucket not found: abc")},
|
||||
// Test case - 7.
|
||||
// Input to replicate Md5 mismatch.
|
||||
{bucket, object, "", map[string]string{"md5Sum": "a35"}, 0, false, "",
|
||||
fmt.Errorf("%s", "Bad digest: Expected a35 is not valid with what we calculated "+"d41d8cd98f00b204e9800998ecf8427e")},
|
||||
// Test case - 8.
|
||||
// Input with size more than the size of actual data inside the reader.
|
||||
{bucket, object, "abcd", map[string]string{"md5Sum": "a35"}, int64(len("abcd") + 1), false, "",
|
||||
fmt.Errorf("%s", "Bad digest: Expected a35 is not valid with what we calculated e2fc714c4727ee9395f324cd2e7f331f")},
|
||||
// Test case - 9.
|
||||
// Input with size less than the size of actual data inside the reader.
|
||||
{bucket, object, "abcd", map[string]string{"md5Sum": "a35"}, int64(len("abcd") - 1), false, "",
|
||||
fmt.Errorf("%s", "Bad digest: Expected a35 is not valid with what we calculated e2fc714c4727ee9395f324cd2e7f331f")},
|
||||
// Test case - 10-13.
|
||||
// Validating for success cases.
|
||||
{bucket, object, "abcd", map[string]string{"md5Sum": "e2fc714c4727ee9395f324cd2e7f331f"}, int64(len("abcd")), true, "", nil},
|
||||
{bucket, object, "efgh", map[string]string{"md5Sum": "1f7690ebdd9b4caf8fab49ca1757bf27"}, int64(len("efgh")), true, "", nil},
|
||||
{bucket, object, "ijkl", map[string]string{"md5Sum": "09a0877d04abf8759f99adec02baf579"}, int64(len("ijkl")), true, "", nil},
|
||||
{bucket, object, "mnop", map[string]string{"md5Sum": "e132e96a5ddad6da8b07bba6f6131fef"}, int64(len("mnop")), true, "", nil},
|
||||
}
|
||||
|
||||
for i, testCase := range failCases {
|
||||
actualMd5Hex, actualErr := obj.PutObject(testCase.bucketName, testCase.objName, testCase.intputDataSize, bytes.NewBufferString(testCase.inputReaderData), testCase.inputMeta)
|
||||
// All are test cases above are expected to fail.
|
||||
|
||||
if actualErr != nil && testCase.shouldPass {
|
||||
t.Errorf("Test %d: %s: Expected to pass, but failed with: <ERROR> %s.", i+1, instanceType, actualErr.Error())
|
||||
}
|
||||
if actualErr == nil && !testCase.shouldPass {
|
||||
t.Errorf("Test %d: %s: Expected to fail with <ERROR> \"%s\", but passed instead.", i+1, instanceType, testCase.expectedError.Error())
|
||||
}
|
||||
// Failed as expected, but does it fail for the expected reason.
|
||||
if actualErr != nil && !testCase.shouldPass {
|
||||
if testCase.expectedError.Error() != actualErr.Error() {
|
||||
t.Errorf("Test %d: %s: Expected to fail with error \"%s\", but instead failed with error \"%s\" instead.", i+1,
|
||||
instanceType, testCase.expectedError.Error(), actualErr.Error())
|
||||
}
|
||||
}
|
||||
// Test passes as expected, but the output values are verified for correctness here.
|
||||
if actualErr == nil && testCase.shouldPass {
|
||||
// Asserting whether the md5 output is correct.
|
||||
if testCase.inputMeta["md5Sum"] != actualMd5Hex {
|
||||
t.Errorf("Test %d: %s: Calculated Md5 different from the actual one %s.", i+1, instanceType, actualMd5Hex)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Wrapper for calling PutObject tests for both XL multiple disks case
|
||||
// when quorum is not availabel.
|
||||
func TestObjectAPIPutObjectDiskNotFound(t *testing.T) {
|
||||
ExecObjectLayerDiskNotFoundTest(t, testObjectAPIPutObjectDiskNotFOund)
|
||||
}
|
||||
|
||||
// Tests validate correctness of PutObject.
|
||||
func testObjectAPIPutObjectDiskNotFOund(obj ObjectLayer, instanceType string, disks []string, t *testing.T) {
|
||||
// Generating cases for which the PutObject fails.
|
||||
bucket := "minio-bucket"
|
||||
object := "minio-object"
|
||||
|
||||
// Create bucket.
|
||||
err := obj.MakeBucket(bucket)
|
||||
if err != nil {
|
||||
// Failed to create newbucket, abort.
|
||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||
}
|
||||
|
||||
// Creating a dummy bucket for tests.
|
||||
err = obj.MakeBucket("unused-bucket")
|
||||
if err != nil {
|
||||
// Failed to create newbucket, abort.
|
||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||
}
|
||||
|
||||
// Take 6 disks down, one more we loose quorum on 16 disk node.
|
||||
for _, disk := range disks[:6] {
|
||||
removeAll(disk)
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
bucketName string
|
||||
objName string
|
||||
inputReaderData string
|
||||
inputMeta map[string]string
|
||||
intputDataSize int64
|
||||
// flag indicating whether the test should pass.
|
||||
shouldPass bool
|
||||
// expected error output.
|
||||
expectedMd5 string
|
||||
expectedError error
|
||||
}{
|
||||
// Validating for success cases.
|
||||
{bucket, object, "abcd", map[string]string{"md5Sum": "e2fc714c4727ee9395f324cd2e7f331f"}, int64(len("abcd")), true, "", nil},
|
||||
{bucket, object, "efgh", map[string]string{"md5Sum": "1f7690ebdd9b4caf8fab49ca1757bf27"}, int64(len("efgh")), true, "", nil},
|
||||
{bucket, object, "ijkl", map[string]string{"md5Sum": "09a0877d04abf8759f99adec02baf579"}, int64(len("ijkl")), true, "", nil},
|
||||
{bucket, object, "mnop", map[string]string{"md5Sum": "e132e96a5ddad6da8b07bba6f6131fef"}, int64(len("mnop")), true, "", nil},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
actualMd5Hex, actualErr := obj.PutObject(testCase.bucketName, testCase.objName, testCase.intputDataSize, bytes.NewBufferString(testCase.inputReaderData), testCase.inputMeta)
|
||||
if actualErr != nil && testCase.shouldPass {
|
||||
t.Errorf("Test %d: %s: Expected to pass, but failed with: <ERROR> %s.", i+1, instanceType, actualErr.Error())
|
||||
}
|
||||
if actualErr == nil && !testCase.shouldPass {
|
||||
t.Errorf("Test %d: %s: Expected to fail with <ERROR> \"%s\", but passed instead.", i+1, instanceType, testCase.expectedError.Error())
|
||||
}
|
||||
// Failed as expected, but does it fail for the expected reason.
|
||||
if actualErr != nil && !testCase.shouldPass {
|
||||
if testCase.expectedError.Error() != actualErr.Error() {
|
||||
t.Errorf("Test %d: %s: Expected to fail with error \"%s\", but instead failed with error \"%s\" instead.", i+1,
|
||||
instanceType, testCase.expectedError.Error(), actualErr.Error())
|
||||
}
|
||||
}
|
||||
// Test passes as expected, but the output values are verified for correctness here.
|
||||
if actualErr == nil && testCase.shouldPass {
|
||||
// Asserting whether the md5 output is correct.
|
||||
if testCase.inputMeta["md5Sum"] != actualMd5Hex {
|
||||
t.Errorf("Test %d: %s: Calculated Md5 different from the actual one %s.", i+1, instanceType, actualMd5Hex)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// This causes quorum failure verify.
|
||||
removeAll(disks[len(disks)-1])
|
||||
|
||||
// Validate the last test.
|
||||
testCase := struct {
|
||||
bucketName string
|
||||
objName string
|
||||
inputReaderData string
|
||||
inputMeta map[string]string
|
||||
intputDataSize int64
|
||||
// flag indicating whether the test should pass.
|
||||
shouldPass bool
|
||||
// expected error output.
|
||||
expectedMd5 string
|
||||
expectedError error
|
||||
}{
|
||||
bucket,
|
||||
object,
|
||||
"mnop",
|
||||
map[string]string{"md5Sum": "e132e96a5ddad6da8b07bba6f6131fef"},
|
||||
int64(len("mnop")),
|
||||
false,
|
||||
"",
|
||||
InsufficientWriteQuorum{},
|
||||
}
|
||||
_, actualErr := obj.PutObject(testCase.bucketName, testCase.objName, testCase.intputDataSize, bytes.NewBufferString(testCase.inputReaderData), testCase.inputMeta)
|
||||
if actualErr != nil && testCase.shouldPass {
|
||||
t.Errorf("Test %d: %s: Expected to pass, but failed with: <ERROR> %s.", len(testCases)+1, instanceType, actualErr.Error())
|
||||
}
|
||||
// Failed as expected, but does it fail for the expected reason.
|
||||
if actualErr != nil && !testCase.shouldPass {
|
||||
if testCase.expectedError.Error() != actualErr.Error() {
|
||||
t.Errorf("Test %d: %s: Expected to fail with error \"%s\", but instead failed with error \"%s\" instead.", len(testCases)+1, instanceType, testCase.expectedError.Error(), actualErr.Error())
|
||||
}
|
||||
}
|
||||
}
|
@ -103,10 +103,6 @@ func (xl xlObjects) shouldHeal(onlineDisks []StorageAPI) (heal bool) {
|
||||
// - error if any.
|
||||
func (xl xlObjects) listOnlineDisks(partsMetadata []xlMetaV1, errs []error) (onlineDisks []StorageAPI, version int64, err error) {
|
||||
onlineDisks = make([]StorageAPI, len(xl.storageDisks))
|
||||
// Do we have read Quorum?.
|
||||
if !isQuorum(errs, xl.readQuorum) {
|
||||
return nil, 0, errXLReadQuorum
|
||||
}
|
||||
|
||||
// List all the file versions from partsMetadata list.
|
||||
versions := listObjectVersions(partsMetadata, errs)
|
||||
|
@ -316,6 +316,9 @@ func (xl xlObjects) putObjectPart(bucket string, object string, uploadID string,
|
||||
|
||||
// Read metadata associated with the object from all disks.
|
||||
partsMetadata, errs := xl.readAllXLMetadata(minioMetaBucket, uploadIDPath)
|
||||
if !isQuorum(errs, xl.writeQuorum) {
|
||||
return "", toObjectErr(errXLWriteQuorum, bucket, object)
|
||||
}
|
||||
|
||||
// List all online disks.
|
||||
onlineDisks, higherVersion, err := xl.listOnlineDisks(partsMetadata, errs)
|
||||
@ -559,9 +562,9 @@ func (xl xlObjects) CompleteMultipartUpload(bucket string, object string, upload
|
||||
|
||||
// Read metadata associated with the object from all disks.
|
||||
partsMetadata, errs := xl.readAllXLMetadata(minioMetaBucket, uploadIDPath)
|
||||
// Do we have readQuorum?.
|
||||
if !isQuorum(errs, xl.readQuorum) {
|
||||
return "", toObjectErr(errXLReadQuorum, minioMetaBucket, uploadIDPath)
|
||||
// Do we have writeQuorum?.
|
||||
if !isQuorum(errs, xl.writeQuorum) {
|
||||
return "", toObjectErr(errXLWriteQuorum, bucket, object)
|
||||
}
|
||||
|
||||
// Calculate full object size.
|
||||
|
@ -54,6 +54,10 @@ func (xl xlObjects) GetObject(bucket, object string, startOffset int64, length i
|
||||
|
||||
// Read metadata associated with the object from all disks.
|
||||
metaArr, errs := xl.readAllXLMetadata(bucket, object)
|
||||
// Do we have read quorum?
|
||||
if !isQuorum(errs, xl.readQuorum) {
|
||||
return toObjectErr(errXLReadQuorum, bucket, object)
|
||||
}
|
||||
|
||||
// List all online disks.
|
||||
onlineDisks, highestVersion, err := xl.listOnlineDisks(metaArr, errs)
|
||||
@ -308,6 +312,10 @@ func (xl xlObjects) PutObject(bucket string, object string, size int64, data io.
|
||||
|
||||
// Read metadata associated with the object from all disks.
|
||||
partsMetadata, errs := xl.readAllXLMetadata(bucket, object)
|
||||
// Do we have write quroum?.
|
||||
if !isQuorum(errs, xl.writeQuorum) {
|
||||
return "", toObjectErr(errXLWriteQuorum, bucket, object)
|
||||
}
|
||||
|
||||
// List all online disks.
|
||||
onlineDisks, higherVersion, err := xl.listOnlineDisks(partsMetadata, errs)
|
||||
|
Loading…
Reference in New Issue
Block a user