mirror of https://github.com/minio/minio.git
api: CopyObjectPart was copying wrong offsets due to shadowing. (#3838)
startOffset was re-assigned to '0' so it would end up copying wrong content ignoring the requested startOffset. This also fixes the corruption issue we observed while using docker registry. Fixes https://github.com/docker/distribution/issues/2205 Also fixes #3842 - incorrect routing.
This commit is contained in:
parent
0c8c463a63
commit
05e53f1b34
|
@ -56,6 +56,8 @@ const (
|
||||||
ErrInvalidBucketName
|
ErrInvalidBucketName
|
||||||
ErrInvalidDigest
|
ErrInvalidDigest
|
||||||
ErrInvalidRange
|
ErrInvalidRange
|
||||||
|
ErrInvalidCopyPartRange
|
||||||
|
ErrInvalidCopyPartRangeSource
|
||||||
ErrInvalidMaxKeys
|
ErrInvalidMaxKeys
|
||||||
ErrInvalidMaxUploads
|
ErrInvalidMaxUploads
|
||||||
ErrInvalidMaxParts
|
ErrInvalidMaxParts
|
||||||
|
@ -540,6 +542,16 @@ var errorCodeResponse = map[APIErrorCode]APIError{
|
||||||
Description: "Configurations overlap. Configurations on the same bucket cannot share a common event type.",
|
Description: "Configurations overlap. Configurations on the same bucket cannot share a common event type.",
|
||||||
HTTPStatusCode: http.StatusBadRequest,
|
HTTPStatusCode: http.StatusBadRequest,
|
||||||
},
|
},
|
||||||
|
ErrInvalidCopyPartRange: {
|
||||||
|
Code: "InvalidArgument",
|
||||||
|
Description: "The x-amz-copy-source-range value must be of the form bytes=first-last where first and last are the zero-based offsets of the first and last bytes to copy",
|
||||||
|
HTTPStatusCode: http.StatusBadRequest,
|
||||||
|
},
|
||||||
|
ErrInvalidCopyPartRangeSource: {
|
||||||
|
Code: "InvalidArgument",
|
||||||
|
Description: "Range specified is not valid for source object",
|
||||||
|
HTTPStatusCode: http.StatusBadRequest,
|
||||||
|
},
|
||||||
|
|
||||||
/// S3 extensions.
|
/// S3 extensions.
|
||||||
ErrContentSHA256Mismatch: {
|
ErrContentSHA256Mismatch: {
|
||||||
|
|
|
@ -0,0 +1,106 @@
|
||||||
|
/*
|
||||||
|
* Minio Cloud Storage, (C) 2017 Minio, Inc.
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package cmd
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Writes S3 compatible copy part range error.
|
||||||
|
func writeCopyPartErr(w http.ResponseWriter, err error, url *url.URL) {
|
||||||
|
switch err {
|
||||||
|
case errInvalidRange:
|
||||||
|
writeErrorResponse(w, ErrInvalidCopyPartRange, url)
|
||||||
|
return
|
||||||
|
case errInvalidRangeSource:
|
||||||
|
writeErrorResponse(w, ErrInvalidCopyPartRangeSource, url)
|
||||||
|
return
|
||||||
|
default:
|
||||||
|
writeErrorResponse(w, ErrInternalError, url)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parses x-amz-copy-source-range for CopyObjectPart API. Specifically written to
|
||||||
|
// differentiate the behavior between regular httpRange header v/s x-amz-copy-source-range.
|
||||||
|
// The range of bytes to copy from the source object. The range value must use the form
|
||||||
|
// bytes=first-last, where the first and last are the zero-based byte offsets to copy.
|
||||||
|
// For example, bytes=0-9 indicates that you want to copy the first ten bytes of the source.
|
||||||
|
// http://docs.aws.amazon.com/AmazonS3/latest/API/mpUploadUploadPartCopy.html
|
||||||
|
func parseCopyPartRange(rangeString string, resourceSize int64) (hrange *httpRange, err error) {
|
||||||
|
// Return error if given range string doesn't start with byte range prefix.
|
||||||
|
if !strings.HasPrefix(rangeString, byteRangePrefix) {
|
||||||
|
return nil, fmt.Errorf("'%s' does not start with '%s'", rangeString, byteRangePrefix)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Trim byte range prefix.
|
||||||
|
byteRangeString := strings.TrimPrefix(rangeString, byteRangePrefix)
|
||||||
|
|
||||||
|
// Check if range string contains delimiter '-', else return error. eg. "bytes=8"
|
||||||
|
sepIndex := strings.Index(byteRangeString, "-")
|
||||||
|
if sepIndex == -1 {
|
||||||
|
return nil, errInvalidRange
|
||||||
|
}
|
||||||
|
|
||||||
|
offsetBeginString := byteRangeString[:sepIndex]
|
||||||
|
offsetBegin := int64(-1)
|
||||||
|
// Convert offsetBeginString only if its not empty.
|
||||||
|
if len(offsetBeginString) > 0 {
|
||||||
|
if !validBytePos.MatchString(offsetBeginString) {
|
||||||
|
return nil, errInvalidRange
|
||||||
|
}
|
||||||
|
if offsetBegin, err = strconv.ParseInt(offsetBeginString, 10, 64); err != nil {
|
||||||
|
return nil, errInvalidRange
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
offsetEndString := byteRangeString[sepIndex+1:]
|
||||||
|
offsetEnd := int64(-1)
|
||||||
|
// Convert offsetEndString only if its not empty.
|
||||||
|
if len(offsetEndString) > 0 {
|
||||||
|
if !validBytePos.MatchString(offsetEndString) {
|
||||||
|
return nil, errInvalidRange
|
||||||
|
}
|
||||||
|
if offsetEnd, err = strconv.ParseInt(offsetEndString, 10, 64); err != nil {
|
||||||
|
return nil, errInvalidRange
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// rangeString contains first byte positions. eg. "bytes=2-" or
|
||||||
|
// rangeString contains last bye positions. eg. "bytes=-2"
|
||||||
|
if offsetBegin == -1 || offsetEnd == -1 {
|
||||||
|
return nil, errInvalidRange
|
||||||
|
}
|
||||||
|
|
||||||
|
// Last byte position should not be greater than first byte
|
||||||
|
// position. eg. "bytes=5-2"
|
||||||
|
if offsetBegin > offsetEnd {
|
||||||
|
return nil, errInvalidRange
|
||||||
|
}
|
||||||
|
|
||||||
|
// First and last byte positions should not be >= resourceSize.
|
||||||
|
if offsetBegin >= resourceSize || offsetEnd >= resourceSize {
|
||||||
|
return nil, errInvalidRangeSource
|
||||||
|
}
|
||||||
|
|
||||||
|
// Success..
|
||||||
|
return &httpRange{offsetBegin, offsetEnd, resourceSize}, nil
|
||||||
|
}
|
|
@ -0,0 +1,85 @@
|
||||||
|
/*
|
||||||
|
* Minio Cloud Storage, (C) 2017 Minio, Inc.
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package cmd
|
||||||
|
|
||||||
|
import "testing"
|
||||||
|
|
||||||
|
// Test parseCopyPartRange()
|
||||||
|
func TestParseCopyPartRange(t *testing.T) {
|
||||||
|
// Test success cases.
|
||||||
|
successCases := []struct {
|
||||||
|
rangeString string
|
||||||
|
offsetBegin int64
|
||||||
|
offsetEnd int64
|
||||||
|
length int64
|
||||||
|
}{
|
||||||
|
{"bytes=2-5", 2, 5, 4},
|
||||||
|
{"bytes=2-9", 2, 9, 8},
|
||||||
|
{"bytes=2-2", 2, 2, 1},
|
||||||
|
{"bytes=0000-0006", 0, 6, 7},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, successCase := range successCases {
|
||||||
|
hrange, err := parseCopyPartRange(successCase.rangeString, 10)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("expected: <nil>, got: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if hrange.offsetBegin != successCase.offsetBegin {
|
||||||
|
t.Fatalf("expected: %d, got: %d", successCase.offsetBegin, hrange.offsetBegin)
|
||||||
|
}
|
||||||
|
|
||||||
|
if hrange.offsetEnd != successCase.offsetEnd {
|
||||||
|
t.Fatalf("expected: %d, got: %d", successCase.offsetEnd, hrange.offsetEnd)
|
||||||
|
}
|
||||||
|
if hrange.getLength() != successCase.length {
|
||||||
|
t.Fatalf("expected: %d, got: %d", successCase.length, hrange.getLength())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test invalid range strings.
|
||||||
|
invalidRangeStrings := []string{
|
||||||
|
"bytes=8",
|
||||||
|
"bytes=5-2",
|
||||||
|
"bytes=+2-5",
|
||||||
|
"bytes=2-+5",
|
||||||
|
"bytes=2--5",
|
||||||
|
"bytes=-",
|
||||||
|
"",
|
||||||
|
"2-5",
|
||||||
|
"bytes = 2-5",
|
||||||
|
"bytes=2 - 5",
|
||||||
|
"bytes=0-0,-1",
|
||||||
|
"bytes=2-5 ",
|
||||||
|
}
|
||||||
|
for _, rangeString := range invalidRangeStrings {
|
||||||
|
if _, err := parseCopyPartRange(rangeString, 10); err == nil {
|
||||||
|
t.Fatalf("expected: an error, got: <nil> for range %s", rangeString)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test error range strings.
|
||||||
|
errorRangeString := []string{
|
||||||
|
"bytes=10-10",
|
||||||
|
"bytes=20-30",
|
||||||
|
}
|
||||||
|
for _, rangeString := range errorRangeString {
|
||||||
|
if _, err := parseCopyPartRange(rangeString, 10); err != errInvalidRangeSource {
|
||||||
|
t.Fatalf("expected: %s, got: %s", errInvalidRangeSource, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -463,7 +463,6 @@ func (fs fsObjects) CopyObjectPart(srcBucket, srcObject, dstBucket, dstObject, u
|
||||||
pipeReader, pipeWriter := io.Pipe()
|
pipeReader, pipeWriter := io.Pipe()
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
var startOffset int64 // Read the whole file.
|
|
||||||
if gerr := fs.GetObject(srcBucket, srcObject, startOffset, length, pipeWriter); gerr != nil {
|
if gerr := fs.GetObject(srcBucket, srcObject, startOffset, length, pipeWriter); gerr != nil {
|
||||||
errorIf(gerr, "Unable to read %s/%s.", srcBucket, srcObject)
|
errorIf(gerr, "Unable to read %s/%s.", srcBucket, srcObject)
|
||||||
pipeWriter.CloseWithError(gerr)
|
pipeWriter.CloseWithError(gerr)
|
||||||
|
|
|
@ -17,7 +17,6 @@
|
||||||
package cmd
|
package cmd
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
)
|
)
|
||||||
|
@ -252,9 +251,6 @@ func (e IncompleteBody) Error() string {
|
||||||
return e.Bucket + "#" + e.Object + "has incomplete body"
|
return e.Bucket + "#" + e.Object + "has incomplete body"
|
||||||
}
|
}
|
||||||
|
|
||||||
// errInvalidRange - returned when given range value is not valid.
|
|
||||||
var errInvalidRange = errors.New("Invalid range")
|
|
||||||
|
|
||||||
// InvalidRange - invalid range typed error.
|
// InvalidRange - invalid range typed error.
|
||||||
type InvalidRange struct {
|
type InvalidRange struct {
|
||||||
offsetBegin int64
|
offsetBegin int64
|
||||||
|
|
|
@ -601,17 +601,13 @@ func (api objectAPIHandlers) CopyObjectPartHandler(w http.ResponseWriter, r *htt
|
||||||
var hrange *httpRange
|
var hrange *httpRange
|
||||||
rangeHeader := r.Header.Get("x-amz-copy-source-range")
|
rangeHeader := r.Header.Get("x-amz-copy-source-range")
|
||||||
if rangeHeader != "" {
|
if rangeHeader != "" {
|
||||||
if hrange, err = parseRequestRange(rangeHeader, objInfo.Size); err != nil {
|
if hrange, err = parseCopyPartRange(rangeHeader, objInfo.Size); err != nil {
|
||||||
// Handle only errInvalidRange
|
// Handle only errInvalidRange
|
||||||
// Ignore other parse error and treat it as regular Get request like Amazon S3.
|
// Ignore other parse error and treat it as regular Get request like Amazon S3.
|
||||||
if err == errInvalidRange {
|
errorIf(err, "Unable to extract range %s", rangeHeader)
|
||||||
writeErrorResponse(w, ErrInvalidRange, r.URL)
|
writeCopyPartErr(w, err, r.URL)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// log the error.
|
|
||||||
errorIf(err, "Invalid request range")
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Verify before x-amz-copy-source preconditions before continuing with CopyObject.
|
// Verify before x-amz-copy-source preconditions before continuing with CopyObject.
|
||||||
|
@ -623,8 +619,8 @@ func (api objectAPIHandlers) CopyObjectPartHandler(w http.ResponseWriter, r *htt
|
||||||
var startOffset int64
|
var startOffset int64
|
||||||
length := objInfo.Size
|
length := objInfo.Size
|
||||||
if hrange != nil {
|
if hrange != nil {
|
||||||
startOffset = hrange.offsetBegin
|
|
||||||
length = hrange.getLength()
|
length = hrange.getLength()
|
||||||
|
startOffset = hrange.offsetBegin
|
||||||
}
|
}
|
||||||
|
|
||||||
/// maximum copy size for multipart objects in a single operation
|
/// maximum copy size for multipart objects in a single operation
|
||||||
|
@ -661,6 +657,12 @@ func (api objectAPIHandlers) PutObjectPartHandler(w http.ResponseWriter, r *http
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// X-Amz-Copy-Source shouldn't be set for this call.
|
||||||
|
if _, ok := r.Header["X-Amz-Copy-Source"]; ok {
|
||||||
|
writeErrorResponse(w, ErrInvalidCopySource, r.URL)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
// get Content-Md5 sent by client and verify if valid
|
// get Content-Md5 sent by client and verify if valid
|
||||||
md5Bytes, err := checkValidMD5(r.Header.Get("Content-Md5"))
|
md5Bytes, err := checkValidMD5(r.Header.Get("Content-Md5"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -26,6 +26,7 @@ import (
|
||||||
"net/http/httptest"
|
"net/http/httptest"
|
||||||
"net/url"
|
"net/url"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
@ -939,6 +940,124 @@ func testAPIPutObjectHandler(obj ObjectLayer, instanceType, bucketName string, a
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Tests sanity of attempting to copying each parts at offsets from an existing
|
||||||
|
// file and create a new object. Also validates if the written is same as what we
|
||||||
|
// expected.
|
||||||
|
func TestAPICopyObjectPartHandlerSanity(t *testing.T) {
|
||||||
|
defer DetectTestLeak(t)()
|
||||||
|
ExecObjectLayerAPITest(t, testAPICopyObjectPartHandlerSanity, []string{"CopyObjectPart"})
|
||||||
|
}
|
||||||
|
|
||||||
|
func testAPICopyObjectPartHandlerSanity(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,
|
||||||
|
credentials credential, t *testing.T) {
|
||||||
|
|
||||||
|
objectName := "test-object"
|
||||||
|
// register event notifier.
|
||||||
|
err := initEventNotifier(obj)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Initializing event notifiers failed")
|
||||||
|
}
|
||||||
|
|
||||||
|
// set of byte data for PutObject.
|
||||||
|
// object has to be created before running tests for Copy Object.
|
||||||
|
// this is required even to assert the copied object,
|
||||||
|
bytesData := []struct {
|
||||||
|
byteData []byte
|
||||||
|
}{
|
||||||
|
{generateBytesData(6 * humanize.MiByte)},
|
||||||
|
}
|
||||||
|
|
||||||
|
// set of inputs for uploading the objects before tests for downloading is done.
|
||||||
|
putObjectInputs := []struct {
|
||||||
|
bucketName string
|
||||||
|
objectName string
|
||||||
|
contentLength int64
|
||||||
|
textData []byte
|
||||||
|
metaData map[string]string
|
||||||
|
}{
|
||||||
|
// case - 1.
|
||||||
|
{bucketName, objectName, int64(len(bytesData[0].byteData)), bytesData[0].byteData, make(map[string]string)},
|
||||||
|
}
|
||||||
|
sha256sum := ""
|
||||||
|
// iterate through the above set of inputs and upload the object.
|
||||||
|
for i, input := range putObjectInputs {
|
||||||
|
// uploading the object.
|
||||||
|
_, err = obj.PutObject(input.bucketName, input.objectName, input.contentLength,
|
||||||
|
bytes.NewBuffer(input.textData), input.metaData, sha256sum)
|
||||||
|
// if object upload fails stop the test.
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Put Object case %d: Error uploading object: <ERROR> %v", i+1, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Initiate Multipart upload for testing PutObjectPartHandler.
|
||||||
|
testObject := "testobject"
|
||||||
|
|
||||||
|
// PutObjectPart API HTTP Handler has to be tested in isolation,
|
||||||
|
// that is without any other handler being registered,
|
||||||
|
// That's why NewMultipartUpload is initiated using ObjectLayer.
|
||||||
|
uploadID, err := obj.NewMultipartUpload(bucketName, testObject, nil)
|
||||||
|
if err != nil {
|
||||||
|
// Failed to create NewMultipartUpload, abort.
|
||||||
|
t.Fatalf("Minio %s : <ERROR> %s", instanceType, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
a := 0
|
||||||
|
b := globalMinPartSize - 1
|
||||||
|
var parts []completePart
|
||||||
|
for partNumber := 1; partNumber <= 2; partNumber++ {
|
||||||
|
// initialize HTTP NewRecorder, this records any mutations to response writer inside the handler.
|
||||||
|
rec := httptest.NewRecorder()
|
||||||
|
cpPartURL := getCopyObjectPartURL("", bucketName, testObject, uploadID, fmt.Sprintf("%d", partNumber))
|
||||||
|
|
||||||
|
// construct HTTP request for copy object.
|
||||||
|
var req *http.Request
|
||||||
|
req, err = newTestSignedRequestV4("PUT", cpPartURL, 0, nil, credentials.AccessKey, credentials.SecretKey)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Test failed to create HTTP request for copy object part: <ERROR> %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// "X-Amz-Copy-Source" header contains the information about the source bucket and the object to copied.
|
||||||
|
req.Header.Set("X-Amz-Copy-Source", url.QueryEscape(pathJoin(bucketName, objectName)))
|
||||||
|
req.Header.Set("X-Amz-Copy-Source-Range", fmt.Sprintf("bytes=%d-%d", a, b))
|
||||||
|
|
||||||
|
// Since `apiRouter` satisfies `http.Handler` it has a ServeHTTP to execute the logic of the handler.
|
||||||
|
// Call the ServeHTTP to execute the handler, `func (api objectAPIHandlers) CopyObjectHandler` handles the request.
|
||||||
|
a = globalMinPartSize
|
||||||
|
b = len(bytesData[0].byteData) - 1
|
||||||
|
apiRouter.ServeHTTP(rec, req)
|
||||||
|
if rec.Code != http.StatusOK {
|
||||||
|
t.Fatalf("Test failed to create HTTP request for copy %d", rec.Code)
|
||||||
|
}
|
||||||
|
|
||||||
|
resp := &CopyObjectPartResponse{}
|
||||||
|
if err = xmlDecoder(rec.Body, resp, rec.Result().ContentLength); err != nil {
|
||||||
|
t.Fatalf("Test failed to decode XML response: <ERROR> %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
parts = append(parts, completePart{
|
||||||
|
PartNumber: partNumber,
|
||||||
|
ETag: strings.Trim(resp.ETag, "\""),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
result, err := obj.CompleteMultipartUpload(bucketName, testObject, uploadID, parts)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Test: %s complete multipart upload failed: <ERROR> %v", instanceType, err)
|
||||||
|
}
|
||||||
|
if result.Size != int64(len(bytesData[0].byteData)) {
|
||||||
|
t.Fatalf("Test: %s expected size not written: expected %d, got %d", instanceType, len(bytesData[0].byteData), result.Size)
|
||||||
|
}
|
||||||
|
|
||||||
|
var buf bytes.Buffer
|
||||||
|
if err = obj.GetObject(bucketName, testObject, 0, int64(len(bytesData[0].byteData)), &buf); err != nil {
|
||||||
|
t.Fatalf("Test: %s reading completed file failed: <ERROR> %v", instanceType, err)
|
||||||
|
}
|
||||||
|
if !bytes.Equal(buf.Bytes(), bytesData[0].byteData) {
|
||||||
|
t.Fatalf("Test: %s returned data is not expected corruption detected:", instanceType)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Wrapper for calling Copy Object Part API handler tests for both XL multiple disks and single node setup.
|
// Wrapper for calling Copy Object Part API handler tests for both XL multiple disks and single node setup.
|
||||||
func TestAPICopyObjectPartHandler(t *testing.T) {
|
func TestAPICopyObjectPartHandler(t *testing.T) {
|
||||||
defer DetectTestLeak(t)()
|
defer DetectTestLeak(t)()
|
||||||
|
@ -1069,10 +1188,23 @@ func testAPICopyObjectPartHandler(obj ObjectLayer, instanceType, bucketName stri
|
||||||
accessKey: credentials.AccessKey,
|
accessKey: credentials.AccessKey,
|
||||||
secretKey: credentials.SecretKey,
|
secretKey: credentials.SecretKey,
|
||||||
|
|
||||||
expectedRespStatus: http.StatusRequestedRangeNotSatisfiable,
|
expectedRespStatus: http.StatusBadRequest,
|
||||||
},
|
},
|
||||||
|
|
||||||
// Test case - 6.
|
// Test case - 6.
|
||||||
|
// Test case with ivalid byte range for exceeding source size boundaries.
|
||||||
|
{
|
||||||
|
bucketName: bucketName,
|
||||||
|
uploadID: uploadID,
|
||||||
|
copySourceHeader: url.QueryEscape("/" + bucketName + "/" + objectName),
|
||||||
|
copySourceRange: "bytes=0-6144",
|
||||||
|
accessKey: credentials.AccessKey,
|
||||||
|
secretKey: credentials.SecretKey,
|
||||||
|
|
||||||
|
expectedRespStatus: http.StatusBadRequest,
|
||||||
|
},
|
||||||
|
|
||||||
|
// Test case - 7.
|
||||||
// Test case with object name missing from source.
|
// Test case with object name missing from source.
|
||||||
// fail with BadRequest.
|
// fail with BadRequest.
|
||||||
{
|
{
|
||||||
|
@ -1085,7 +1217,7 @@ func testAPICopyObjectPartHandler(obj ObjectLayer, instanceType, bucketName stri
|
||||||
expectedRespStatus: http.StatusBadRequest,
|
expectedRespStatus: http.StatusBadRequest,
|
||||||
},
|
},
|
||||||
|
|
||||||
// Test case - 7.
|
// Test case - 8.
|
||||||
// Test case with non-existent source file.
|
// Test case with non-existent source file.
|
||||||
// Case for the purpose of failing `api.ObjectAPI.GetObjectInfo`.
|
// Case for the purpose of failing `api.ObjectAPI.GetObjectInfo`.
|
||||||
// Expecting the response status code to http.StatusNotFound (404).
|
// Expecting the response status code to http.StatusNotFound (404).
|
||||||
|
@ -1099,7 +1231,7 @@ func testAPICopyObjectPartHandler(obj ObjectLayer, instanceType, bucketName stri
|
||||||
expectedRespStatus: http.StatusNotFound,
|
expectedRespStatus: http.StatusNotFound,
|
||||||
},
|
},
|
||||||
|
|
||||||
// Test case - 8.
|
// Test case - 9.
|
||||||
// Test case with non-existent source file.
|
// Test case with non-existent source file.
|
||||||
// Case for the purpose of failing `api.ObjectAPI.PutObjectPart`.
|
// Case for the purpose of failing `api.ObjectAPI.PutObjectPart`.
|
||||||
// Expecting the response status code to http.StatusNotFound (404).
|
// Expecting the response status code to http.StatusNotFound (404).
|
||||||
|
@ -1113,7 +1245,7 @@ func testAPICopyObjectPartHandler(obj ObjectLayer, instanceType, bucketName stri
|
||||||
expectedRespStatus: http.StatusNotFound,
|
expectedRespStatus: http.StatusNotFound,
|
||||||
},
|
},
|
||||||
|
|
||||||
// Test case - 9.
|
// Test case - 10.
|
||||||
// Case with invalid AccessKey.
|
// Case with invalid AccessKey.
|
||||||
{
|
{
|
||||||
bucketName: bucketName,
|
bucketName: bucketName,
|
||||||
|
@ -1125,7 +1257,7 @@ func testAPICopyObjectPartHandler(obj ObjectLayer, instanceType, bucketName stri
|
||||||
expectedRespStatus: http.StatusForbidden,
|
expectedRespStatus: http.StatusForbidden,
|
||||||
},
|
},
|
||||||
|
|
||||||
// Test case - 10.
|
// Test case - 11.
|
||||||
// Case with non-existent upload id.
|
// Case with non-existent upload id.
|
||||||
{
|
{
|
||||||
bucketName: bucketName,
|
bucketName: bucketName,
|
||||||
|
@ -1136,7 +1268,7 @@ func testAPICopyObjectPartHandler(obj ObjectLayer, instanceType, bucketName stri
|
||||||
|
|
||||||
expectedRespStatus: http.StatusNotFound,
|
expectedRespStatus: http.StatusNotFound,
|
||||||
},
|
},
|
||||||
// Test case - 11.
|
// Test case - 12.
|
||||||
// invalid part number.
|
// invalid part number.
|
||||||
{
|
{
|
||||||
bucketName: bucketName,
|
bucketName: bucketName,
|
||||||
|
@ -1147,7 +1279,7 @@ func testAPICopyObjectPartHandler(obj ObjectLayer, instanceType, bucketName stri
|
||||||
secretKey: credentials.SecretKey,
|
secretKey: credentials.SecretKey,
|
||||||
expectedRespStatus: http.StatusOK,
|
expectedRespStatus: http.StatusOK,
|
||||||
},
|
},
|
||||||
// Test case - 12.
|
// Test case - 13.
|
||||||
// maximum part number.
|
// maximum part number.
|
||||||
{
|
{
|
||||||
bucketName: bucketName,
|
bucketName: bucketName,
|
||||||
|
|
|
@ -54,3 +54,10 @@ var errServerTimeMismatch = errors.New("Server times are too far apart")
|
||||||
// errReservedBucket - bucket name is reserved for Minio, usually
|
// errReservedBucket - bucket name is reserved for Minio, usually
|
||||||
// returned for 'minio', '.minio.sys'
|
// returned for 'minio', '.minio.sys'
|
||||||
var errReservedBucket = errors.New("All access to this bucket is disabled")
|
var errReservedBucket = errors.New("All access to this bucket is disabled")
|
||||||
|
|
||||||
|
// errInvalidRange - returned when given range value is not valid.
|
||||||
|
var errInvalidRange = errors.New("Invalid range")
|
||||||
|
|
||||||
|
// errInvalidRangeSource - returned when given range value exceeds
|
||||||
|
// the source object size.
|
||||||
|
var errInvalidRangeSource = errors.New("Range specified exceeds source object size")
|
||||||
|
|
|
@ -546,7 +546,6 @@ func (xl xlObjects) CopyObjectPart(srcBucket, srcObject, dstBucket, dstObject, u
|
||||||
pipeReader, pipeWriter := io.Pipe()
|
pipeReader, pipeWriter := io.Pipe()
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
var startOffset int64 // Read the whole file.
|
|
||||||
if gerr := xl.GetObject(srcBucket, srcObject, startOffset, length, pipeWriter); gerr != nil {
|
if gerr := xl.GetObject(srcBucket, srcObject, startOffset, length, pipeWriter); gerr != nil {
|
||||||
errorIf(gerr, "Unable to read %s of the object `%s/%s`.", srcBucket, srcObject)
|
errorIf(gerr, "Unable to read %s of the object `%s/%s`.", srcBucket, srcObject)
|
||||||
pipeWriter.CloseWithError(toObjectErr(gerr, srcBucket, srcObject))
|
pipeWriter.CloseWithError(toObjectErr(gerr, srcBucket, srcObject))
|
||||||
|
|
Loading…
Reference in New Issue