Add GetObjectNInfo to object layer (#6449)

The new call combines GetObjectInfo and GetObject, and returns an
object with a ReadCloser interface.

Also adds a number of end-to-end encryption tests at the handler
level.
This commit is contained in:
Aditya Manthramurthy 2018-09-20 19:22:09 -07:00 committed by Harshavardhana
parent 7d0645fb3a
commit 36e51d0cee
30 changed files with 2335 additions and 439 deletions

View File

@ -24,6 +24,8 @@ import (
"net/http" "net/http"
"strconv" "strconv"
"time" "time"
"github.com/minio/minio/cmd/crypto"
) )
// Returns a hexadecimal representation of time at the // Returns a hexadecimal representation of time at the
@ -61,13 +63,10 @@ func encodeResponseJSON(response interface{}) []byte {
} }
// Write object header // Write object header
func setObjectHeaders(w http.ResponseWriter, objInfo ObjectInfo, contentRange *httpRange) { func setObjectHeaders(w http.ResponseWriter, objInfo ObjectInfo, rs *HTTPRangeSpec) (err error) {
// set common headers // set common headers
setCommonHeaders(w) setCommonHeaders(w)
// Set content length.
w.Header().Set("Content-Length", strconv.FormatInt(objInfo.Size, 10))
// Set last modified time. // Set last modified time.
lastModified := objInfo.ModTime.UTC().Format(http.TimeFormat) lastModified := objInfo.ModTime.UTC().Format(http.TimeFormat)
w.Header().Set("Last-Modified", lastModified) w.Header().Set("Last-Modified", lastModified)
@ -95,10 +94,30 @@ func setObjectHeaders(w http.ResponseWriter, objInfo ObjectInfo, contentRange *h
w.Header().Set(k, v) w.Header().Set(k, v)
} }
// for providing ranged content var totalObjectSize int64
if contentRange != nil && contentRange.offsetBegin > -1 { switch {
// Override content-length case crypto.IsEncrypted(objInfo.UserDefined):
w.Header().Set("Content-Length", strconv.FormatInt(contentRange.getLength(), 10)) totalObjectSize, err = objInfo.DecryptedSize()
w.Header().Set("Content-Range", contentRange.String()) if err != nil {
return err
}
default:
totalObjectSize = objInfo.Size
} }
// for providing ranged content
start, rangeLen, err := rs.GetOffsetLength(totalObjectSize)
if err != nil {
return err
}
// Set content length.
w.Header().Set("Content-Length", strconv.FormatInt(rangeLen, 10))
if rs != nil {
contentRange := fmt.Sprintf("bytes %d-%d/%d", start, start+rangeLen-1, totalObjectSize)
w.Header().Set("Content-Range", contentRange)
}
return nil
} }

View File

@ -84,7 +84,7 @@ func testGetBucketLocationHandler(obj ObjectLayer, instanceType, bucketName stri
// initialize httptest Recorder, this records any mutations to response writer inside the handler. // initialize httptest Recorder, this records any mutations to response writer inside the handler.
rec := httptest.NewRecorder() rec := httptest.NewRecorder()
// construct HTTP request for Get bucket location. // construct HTTP request for Get bucket location.
req, err := newTestSignedRequestV4("GET", getBucketLocationURL("", testCase.bucketName), 0, nil, testCase.accessKey, testCase.secretKey) req, err := newTestSignedRequestV4("GET", getBucketLocationURL("", testCase.bucketName), 0, nil, testCase.accessKey, testCase.secretKey, nil)
if err != nil { if err != nil {
t.Fatalf("Test %d: %s: Failed to create HTTP request for GetBucketLocationHandler: <ERROR> %v", i+1, instanceType, err) t.Fatalf("Test %d: %s: Failed to create HTTP request for GetBucketLocationHandler: <ERROR> %v", i+1, instanceType, err)
} }
@ -116,7 +116,7 @@ func testGetBucketLocationHandler(obj ObjectLayer, instanceType, bucketName stri
// initialize HTTP NewRecorder, this records any mutations to response writer inside the handler. // initialize HTTP NewRecorder, this records any mutations to response writer inside the handler.
recV2 := httptest.NewRecorder() recV2 := httptest.NewRecorder()
// construct HTTP request for PUT bucket policy endpoint. // construct HTTP request for PUT bucket policy endpoint.
reqV2, err := newTestSignedRequestV2("GET", getBucketLocationURL("", testCase.bucketName), 0, nil, testCase.accessKey, testCase.secretKey) reqV2, err := newTestSignedRequestV2("GET", getBucketLocationURL("", testCase.bucketName), 0, nil, testCase.accessKey, testCase.secretKey, nil)
if err != nil { if err != nil {
t.Fatalf("Test %d: %s: Failed to create HTTP request for PutBucketPolicyHandler: <ERROR> %v", i+1, instanceType, err) t.Fatalf("Test %d: %s: Failed to create HTTP request for PutBucketPolicyHandler: <ERROR> %v", i+1, instanceType, err)
@ -220,7 +220,7 @@ func testHeadBucketHandler(obj ObjectLayer, instanceType, bucketName string, api
// initialize HTTP NewRecorder, this records any mutations to response writer inside the handler. // initialize HTTP NewRecorder, this records any mutations to response writer inside the handler.
rec := httptest.NewRecorder() rec := httptest.NewRecorder()
// construct HTTP request for HEAD bucket. // construct HTTP request for HEAD bucket.
req, err := newTestSignedRequestV4("HEAD", getHEADBucketURL("", testCase.bucketName), 0, nil, testCase.accessKey, testCase.secretKey) req, err := newTestSignedRequestV4("HEAD", getHEADBucketURL("", testCase.bucketName), 0, nil, testCase.accessKey, testCase.secretKey, nil)
if err != nil { if err != nil {
t.Fatalf("Test %d: %s: Failed to create HTTP request for HeadBucketHandler: <ERROR> %v", i+1, instanceType, err) t.Fatalf("Test %d: %s: Failed to create HTTP request for HeadBucketHandler: <ERROR> %v", i+1, instanceType, err)
} }
@ -235,7 +235,7 @@ func testHeadBucketHandler(obj ObjectLayer, instanceType, bucketName string, api
// initialize HTTP NewRecorder, this records any mutations to response writer inside the handler. // initialize HTTP NewRecorder, this records any mutations to response writer inside the handler.
recV2 := httptest.NewRecorder() recV2 := httptest.NewRecorder()
// construct HTTP request for PUT bucket policy endpoint. // construct HTTP request for PUT bucket policy endpoint.
reqV2, err := newTestSignedRequestV2("HEAD", getHEADBucketURL("", testCase.bucketName), 0, nil, testCase.accessKey, testCase.secretKey) reqV2, err := newTestSignedRequestV2("HEAD", getHEADBucketURL("", testCase.bucketName), 0, nil, testCase.accessKey, testCase.secretKey, nil)
if err != nil { if err != nil {
t.Fatalf("Test %d: %s: Failed to create HTTP request for PutBucketPolicyHandler: <ERROR> %v", i+1, instanceType, err) t.Fatalf("Test %d: %s: Failed to create HTTP request for PutBucketPolicyHandler: <ERROR> %v", i+1, instanceType, err)
@ -437,7 +437,7 @@ func testListMultipartUploadsHandler(obj ObjectLayer, instanceType, bucketName s
// construct HTTP request for List multipart uploads endpoint. // construct HTTP request for List multipart uploads endpoint.
u := getListMultipartUploadsURLWithParams("", testCase.bucket, testCase.prefix, testCase.keyMarker, testCase.uploadIDMarker, testCase.delimiter, testCase.maxUploads) u := getListMultipartUploadsURLWithParams("", testCase.bucket, testCase.prefix, testCase.keyMarker, testCase.uploadIDMarker, testCase.delimiter, testCase.maxUploads)
req, gerr := newTestSignedRequestV4("GET", u, 0, nil, testCase.accessKey, testCase.secretKey) req, gerr := newTestSignedRequestV4("GET", u, 0, nil, testCase.accessKey, testCase.secretKey, nil)
if gerr != nil { if gerr != nil {
t.Fatalf("Test %d: %s: Failed to create HTTP request for ListMultipartUploadsHandler: <ERROR> %v", i+1, instanceType, gerr) t.Fatalf("Test %d: %s: Failed to create HTTP request for ListMultipartUploadsHandler: <ERROR> %v", i+1, instanceType, gerr)
} }
@ -454,7 +454,7 @@ func testListMultipartUploadsHandler(obj ObjectLayer, instanceType, bucketName s
// construct HTTP request for PUT bucket policy endpoint. // construct HTTP request for PUT bucket policy endpoint.
// verify response for V2 signed HTTP request. // verify response for V2 signed HTTP request.
reqV2, err := newTestSignedRequestV2("GET", u, 0, nil, testCase.accessKey, testCase.secretKey) reqV2, err := newTestSignedRequestV2("GET", u, 0, nil, testCase.accessKey, testCase.secretKey, nil)
if err != nil { if err != nil {
t.Fatalf("Test %d: %s: Failed to create HTTP request for PutBucketPolicyHandler: <ERROR> %v", i+1, instanceType, err) t.Fatalf("Test %d: %s: Failed to create HTTP request for PutBucketPolicyHandler: <ERROR> %v", i+1, instanceType, err)
} }
@ -471,7 +471,7 @@ func testListMultipartUploadsHandler(obj ObjectLayer, instanceType, bucketName s
// construct HTTP request for List multipart uploads endpoint. // construct HTTP request for List multipart uploads endpoint.
u := getListMultipartUploadsURLWithParams("", bucketName, "", "", "", "", "") u := getListMultipartUploadsURLWithParams("", bucketName, "", "", "", "", "")
req, err := newTestSignedRequestV4("GET", u, 0, nil, "", "") // Generate an anonymous request. req, err := newTestSignedRequestV4("GET", u, 0, nil, "", "", nil) // Generate an anonymous request.
if err != nil { if err != nil {
t.Fatalf("Test %s: Failed to create HTTP request for ListMultipartUploadsHandler: <ERROR> %v", instanceType, err) t.Fatalf("Test %s: Failed to create HTTP request for ListMultipartUploadsHandler: <ERROR> %v", instanceType, err)
} }
@ -551,7 +551,7 @@ func testListBucketsHandler(obj ObjectLayer, instanceType, bucketName string, ap
for i, testCase := range testCases { for i, testCase := range testCases {
// initialize HTTP NewRecorder, this records any mutations to response writer inside the handler. // initialize HTTP NewRecorder, this records any mutations to response writer inside the handler.
rec := httptest.NewRecorder() rec := httptest.NewRecorder()
req, lerr := newTestSignedRequestV4("GET", getListBucketURL(""), 0, nil, testCase.accessKey, testCase.secretKey) req, lerr := newTestSignedRequestV4("GET", getListBucketURL(""), 0, nil, testCase.accessKey, testCase.secretKey, nil)
if lerr != nil { if lerr != nil {
t.Fatalf("Test %d: %s: Failed to create HTTP request for ListBucketsHandler: <ERROR> %v", i+1, instanceType, lerr) t.Fatalf("Test %d: %s: Failed to create HTTP request for ListBucketsHandler: <ERROR> %v", i+1, instanceType, lerr)
} }
@ -568,7 +568,7 @@ func testListBucketsHandler(obj ObjectLayer, instanceType, bucketName string, ap
// construct HTTP request for PUT bucket policy endpoint. // construct HTTP request for PUT bucket policy endpoint.
// verify response for V2 signed HTTP request. // verify response for V2 signed HTTP request.
reqV2, err := newTestSignedRequestV2("GET", getListBucketURL(""), 0, nil, testCase.accessKey, testCase.secretKey) reqV2, err := newTestSignedRequestV2("GET", getListBucketURL(""), 0, nil, testCase.accessKey, testCase.secretKey, nil)
if err != nil { if err != nil {
t.Fatalf("Test %d: %s: Failed to create HTTP request for PutBucketPolicyHandler: <ERROR> %v", i+1, instanceType, err) t.Fatalf("Test %d: %s: Failed to create HTTP request for PutBucketPolicyHandler: <ERROR> %v", i+1, instanceType, err)
@ -745,7 +745,7 @@ func testAPIDeleteMultipleObjectsHandler(obj ObjectLayer, instanceType, bucketNa
// Generate a signed or anonymous request based on the testCase // Generate a signed or anonymous request based on the testCase
if testCase.accessKey != "" { if testCase.accessKey != "" {
req, err = newTestSignedRequestV4("POST", getDeleteMultipleObjectsURL("", bucketName), req, err = newTestSignedRequestV4("POST", getDeleteMultipleObjectsURL("", bucketName),
int64(len(testCase.objects)), bytes.NewReader(testCase.objects), testCase.accessKey, testCase.secretKey) int64(len(testCase.objects)), bytes.NewReader(testCase.objects), testCase.accessKey, testCase.secretKey, nil)
} else { } else {
req, err = newTestRequest("POST", getDeleteMultipleObjectsURL("", bucketName), req, err = newTestRequest("POST", getDeleteMultipleObjectsURL("", bucketName),
int64(len(testCase.objects)), bytes.NewReader(testCase.objects)) int64(len(testCase.objects)), bytes.NewReader(testCase.objects))
@ -785,7 +785,7 @@ func testAPIDeleteMultipleObjectsHandler(obj ObjectLayer, instanceType, bucketNa
nilBucket := "dummy-bucket" nilBucket := "dummy-bucket"
nilObject := "" nilObject := ""
nilReq, err := newTestSignedRequestV4("POST", getDeleteMultipleObjectsURL("", nilBucket), 0, nil, "", "") nilReq, err := newTestSignedRequestV4("POST", getDeleteMultipleObjectsURL("", nilBucket), 0, nil, "", "", nil)
if err != nil { if err != nil {
t.Errorf("Minio %s: Failed to create HTTP request for testing the response when object Layer is set to `nil`.", instanceType) t.Errorf("Minio %s: Failed to create HTTP request for testing the response when object Layer is set to `nil`.", instanceType)
} }

View File

@ -254,7 +254,7 @@ func testPutBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string
recV4 := httptest.NewRecorder() recV4 := httptest.NewRecorder()
// construct HTTP request for PUT bucket policy endpoint. // construct HTTP request for PUT bucket policy endpoint.
reqV4, err := newTestSignedRequestV4("PUT", getPutPolicyURL("", testCase.bucketName), reqV4, err := newTestSignedRequestV4("PUT", getPutPolicyURL("", testCase.bucketName),
int64(testCase.policyLen), testCase.bucketPolicyReader, testCase.accessKey, testCase.secretKey) int64(testCase.policyLen), testCase.bucketPolicyReader, testCase.accessKey, testCase.secretKey, nil)
if err != nil { if err != nil {
t.Fatalf("Test %d: %s: Failed to create HTTP request for PutBucketPolicyHandler: <ERROR> %v", i+1, instanceType, err) t.Fatalf("Test %d: %s: Failed to create HTTP request for PutBucketPolicyHandler: <ERROR> %v", i+1, instanceType, err)
} }
@ -268,7 +268,7 @@ func testPutBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string
recV2 := httptest.NewRecorder() recV2 := httptest.NewRecorder()
// construct HTTP request for PUT bucket policy endpoint. // construct HTTP request for PUT bucket policy endpoint.
reqV2, err := newTestSignedRequestV2("PUT", getPutPolicyURL("", testCase.bucketName), reqV2, err := newTestSignedRequestV2("PUT", getPutPolicyURL("", testCase.bucketName),
int64(testCase.policyLen), testCase.bucketPolicyReader, testCase.accessKey, testCase.secretKey) int64(testCase.policyLen), testCase.bucketPolicyReader, testCase.accessKey, testCase.secretKey, nil)
if err != nil { if err != nil {
t.Fatalf("Test %d: %s: Failed to create HTTP request for PutBucketPolicyHandler: <ERROR> %v", i+1, instanceType, err) t.Fatalf("Test %d: %s: Failed to create HTTP request for PutBucketPolicyHandler: <ERROR> %v", i+1, instanceType, err)
} }
@ -304,7 +304,7 @@ func testPutBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string
nilBucket := "dummy-bucket" nilBucket := "dummy-bucket"
nilReq, err := newTestSignedRequestV4("PUT", getPutPolicyURL("", nilBucket), nilReq, err := newTestSignedRequestV4("PUT", getPutPolicyURL("", nilBucket),
0, nil, "", "") 0, nil, "", "", nil)
if err != nil { if err != nil {
t.Errorf("Minio %s: Failed to create HTTP request for testing the response when object Layer is set to `nil`.", instanceType) t.Errorf("Minio %s: Failed to create HTTP request for testing the response when object Layer is set to `nil`.", instanceType)
@ -346,7 +346,7 @@ func testGetBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string
recV4 := httptest.NewRecorder() recV4 := httptest.NewRecorder()
// construct HTTP request for PUT bucket policy endpoint. // construct HTTP request for PUT bucket policy endpoint.
reqV4, err := newTestSignedRequestV4("PUT", getPutPolicyURL("", testPolicy.bucketName), reqV4, err := newTestSignedRequestV4("PUT", getPutPolicyURL("", testPolicy.bucketName),
int64(len(bucketPolicyStr)), bytes.NewReader([]byte(bucketPolicyStr)), testPolicy.accessKey, testPolicy.secretKey) int64(len(bucketPolicyStr)), bytes.NewReader([]byte(bucketPolicyStr)), testPolicy.accessKey, testPolicy.secretKey, nil)
if err != nil { if err != nil {
t.Fatalf("Test %d: Failed to create HTTP request for PutBucketPolicyHandler: <ERROR> %v", i+1, err) t.Fatalf("Test %d: Failed to create HTTP request for PutBucketPolicyHandler: <ERROR> %v", i+1, err)
} }
@ -360,7 +360,7 @@ func testGetBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string
recV2 := httptest.NewRecorder() recV2 := httptest.NewRecorder()
// construct HTTP request for PUT bucket policy endpoint. // construct HTTP request for PUT bucket policy endpoint.
reqV2, err := newTestSignedRequestV2("PUT", getPutPolicyURL("", testPolicy.bucketName), reqV2, err := newTestSignedRequestV2("PUT", getPutPolicyURL("", testPolicy.bucketName),
int64(len(bucketPolicyStr)), bytes.NewReader([]byte(bucketPolicyStr)), testPolicy.accessKey, testPolicy.secretKey) int64(len(bucketPolicyStr)), bytes.NewReader([]byte(bucketPolicyStr)), testPolicy.accessKey, testPolicy.secretKey, nil)
if err != nil { if err != nil {
t.Fatalf("Test %d: Failed to create HTTP request for PutBucketPolicyHandler: <ERROR> %v", i+1, err) t.Fatalf("Test %d: Failed to create HTTP request for PutBucketPolicyHandler: <ERROR> %v", i+1, err)
} }
@ -417,7 +417,7 @@ func testGetBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string
recV4 := httptest.NewRecorder() recV4 := httptest.NewRecorder()
// construct HTTP request for PUT bucket policy endpoint. // construct HTTP request for PUT bucket policy endpoint.
reqV4, err := newTestSignedRequestV4("GET", getGetPolicyURL("", testCase.bucketName), reqV4, err := newTestSignedRequestV4("GET", getGetPolicyURL("", testCase.bucketName),
0, nil, testCase.accessKey, testCase.secretKey) 0, nil, testCase.accessKey, testCase.secretKey, nil)
if err != nil { if err != nil {
t.Fatalf("Test %d: Failed to create HTTP request for GetBucketPolicyHandler: <ERROR> %v", i+1, err) t.Fatalf("Test %d: Failed to create HTTP request for GetBucketPolicyHandler: <ERROR> %v", i+1, err)
@ -456,7 +456,7 @@ func testGetBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string
recV2 := httptest.NewRecorder() recV2 := httptest.NewRecorder()
// construct HTTP request for PUT bucket policy endpoint. // construct HTTP request for PUT bucket policy endpoint.
reqV2, err := newTestSignedRequestV2("GET", getGetPolicyURL("", testCase.bucketName), reqV2, err := newTestSignedRequestV2("GET", getGetPolicyURL("", testCase.bucketName),
0, nil, testCase.accessKey, testCase.secretKey) 0, nil, testCase.accessKey, testCase.secretKey, nil)
if err != nil { if err != nil {
t.Fatalf("Test %d: Failed to create HTTP request for GetBucketPolicyHandler: <ERROR> %v", i+1, err) t.Fatalf("Test %d: Failed to create HTTP request for GetBucketPolicyHandler: <ERROR> %v", i+1, err)
} }
@ -511,7 +511,7 @@ func testGetBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string
nilBucket := "dummy-bucket" nilBucket := "dummy-bucket"
nilReq, err := newTestSignedRequestV4("GET", getGetPolicyURL("", nilBucket), nilReq, err := newTestSignedRequestV4("GET", getGetPolicyURL("", nilBucket),
0, nil, "", "") 0, nil, "", "", nil)
if err != nil { if err != nil {
t.Errorf("Minio %s: Failed to create HTTP request for testing the response when object Layer is set to `nil`.", instanceType) t.Errorf("Minio %s: Failed to create HTTP request for testing the response when object Layer is set to `nil`.", instanceType)
@ -591,7 +591,7 @@ func testDeleteBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName str
recV4 := httptest.NewRecorder() recV4 := httptest.NewRecorder()
// construct HTTP request for PUT bucket policy endpoint. // construct HTTP request for PUT bucket policy endpoint.
reqV4, err := newTestSignedRequestV4("PUT", getPutPolicyURL("", testPolicy.bucketName), reqV4, err := newTestSignedRequestV4("PUT", getPutPolicyURL("", testPolicy.bucketName),
int64(len(bucketPolicyStr)), bytes.NewReader([]byte(bucketPolicyStr)), testPolicy.accessKey, testPolicy.secretKey) int64(len(bucketPolicyStr)), bytes.NewReader([]byte(bucketPolicyStr)), testPolicy.accessKey, testPolicy.secretKey, nil)
if err != nil { if err != nil {
t.Fatalf("Test %d: Failed to create HTTP request for PutBucketPolicyHandler: <ERROR> %v", i+1, err) t.Fatalf("Test %d: Failed to create HTTP request for PutBucketPolicyHandler: <ERROR> %v", i+1, err)
} }
@ -641,7 +641,7 @@ func testDeleteBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName str
recV4 := httptest.NewRecorder() recV4 := httptest.NewRecorder()
// construct HTTP request for Delete bucket policy endpoint. // construct HTTP request for Delete bucket policy endpoint.
reqV4, err := newTestSignedRequestV4("DELETE", getDeletePolicyURL("", testCase.bucketName), reqV4, err := newTestSignedRequestV4("DELETE", getDeletePolicyURL("", testCase.bucketName),
0, nil, testCase.accessKey, testCase.secretKey) 0, nil, testCase.accessKey, testCase.secretKey, nil)
if err != nil { if err != nil {
t.Fatalf("Test %d: Failed to create HTTP request for GetBucketPolicyHandler: <ERROR> %v", i+1, err) t.Fatalf("Test %d: Failed to create HTTP request for GetBucketPolicyHandler: <ERROR> %v", i+1, err)
} }
@ -663,7 +663,7 @@ func testDeleteBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName str
recV2 := httptest.NewRecorder() recV2 := httptest.NewRecorder()
// construct HTTP request for PUT bucket policy endpoint. // construct HTTP request for PUT bucket policy endpoint.
reqV2, err := newTestSignedRequestV2("PUT", getPutPolicyURL("", testPolicy.bucketName), reqV2, err := newTestSignedRequestV2("PUT", getPutPolicyURL("", testPolicy.bucketName),
int64(len(bucketPolicyStr)), bytes.NewReader([]byte(bucketPolicyStr)), testPolicy.accessKey, testPolicy.secretKey) int64(len(bucketPolicyStr)), bytes.NewReader([]byte(bucketPolicyStr)), testPolicy.accessKey, testPolicy.secretKey, nil)
if err != nil { if err != nil {
t.Fatalf("Test %d: Failed to create HTTP request for PutBucketPolicyHandler: <ERROR> %v", i+1, err) t.Fatalf("Test %d: Failed to create HTTP request for PutBucketPolicyHandler: <ERROR> %v", i+1, err)
} }
@ -680,7 +680,7 @@ func testDeleteBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName str
recV2 := httptest.NewRecorder() recV2 := httptest.NewRecorder()
// construct HTTP request for Delete bucket policy endpoint. // construct HTTP request for Delete bucket policy endpoint.
reqV2, err := newTestSignedRequestV2("DELETE", getDeletePolicyURL("", testCase.bucketName), reqV2, err := newTestSignedRequestV2("DELETE", getDeletePolicyURL("", testCase.bucketName),
0, nil, testCase.accessKey, testCase.secretKey) 0, nil, testCase.accessKey, testCase.secretKey, nil)
if err != nil { if err != nil {
t.Fatalf("Test %d: Failed to create HTTP request for GetBucketPolicyHandler: <ERROR> %v", i+1, err) t.Fatalf("Test %d: Failed to create HTTP request for GetBucketPolicyHandler: <ERROR> %v", i+1, err)
} }
@ -714,7 +714,7 @@ func testDeleteBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName str
nilBucket := "dummy-bucket" nilBucket := "dummy-bucket"
nilReq, err := newTestSignedRequestV4("DELETE", getDeletePolicyURL("", nilBucket), nilReq, err := newTestSignedRequestV4("DELETE", getDeletePolicyURL("", nilBucket),
0, nil, "", "") 0, nil, "", "", nil)
if err != nil { if err != nil {
t.Errorf("Minio %s: Failed to create HTTP request for testing the response when object Layer is set to `nil`.", instanceType) t.Errorf("Minio %s: Failed to create HTTP request for testing the response when object Layer is set to `nil`.", instanceType)

View File

@ -17,11 +17,8 @@
package cmd package cmd
import ( import (
"fmt"
"net/http" "net/http"
"net/url" "net/url"
"strconv"
"strings"
) )
// Writes S3 compatible copy part range error. // Writes S3 compatible copy part range error.
@ -39,68 +36,35 @@ func writeCopyPartErr(w http.ResponseWriter, err error, url *url.URL) {
} }
} }
// Parses x-amz-copy-source-range for CopyObjectPart API. Specifically written to // Parses x-amz-copy-source-range for CopyObjectPart API. Its behavior
// differentiate the behavior between regular httpRange header v/s x-amz-copy-source-range. // is different from regular HTTP range header. It only supports the
// The range of bytes to copy from the source object. The range value must use the form // form `bytes=first-last` where first and last are zero-based byte
// bytes=first-last, where the first and last are the zero-based byte offsets to copy. // offsets. See
// For example, bytes=0-9 indicates that you want to copy the first ten bytes of the source.
// http://docs.aws.amazon.com/AmazonS3/latest/API/mpUploadUploadPartCopy.html // http://docs.aws.amazon.com/AmazonS3/latest/API/mpUploadUploadPartCopy.html
func parseCopyPartRange(rangeString string, resourceSize int64) (hrange *httpRange, err error) { // for full details. This function treats an empty rangeString as
// Return error if given range string doesn't start with byte range prefix. // referring to the whole resource.
if !strings.HasPrefix(rangeString, byteRangePrefix) { //
return nil, fmt.Errorf("'%s' does not start with '%s'", rangeString, byteRangePrefix) // In addition to parsing the range string, it also validates the
} // specified range against the given object size, so that Copy API
// specific error can be returned.
// Trim byte range prefix. func parseCopyPartRange(rangeString string, resourceSize int64) (offset, length int64, err error) {
byteRangeString := strings.TrimPrefix(rangeString, byteRangePrefix) var hrange *HTTPRangeSpec
if rangeString != "" {
// Check if range string contains delimiter '-', else return error. eg. "bytes=8" hrange, err = parseRequestRangeSpec(rangeString)
sepIndex := strings.Index(byteRangeString, "-") if err != nil {
if sepIndex == -1 { return -1, -1, err
return nil, errInvalidRange
}
offsetBeginString := byteRangeString[:sepIndex]
offsetBegin := int64(-1)
// Convert offsetBeginString only if its not empty.
if len(offsetBeginString) > 0 {
if !validBytePos.MatchString(offsetBeginString) {
return nil, errInvalidRange
} }
if offsetBegin, err = strconv.ParseInt(offsetBeginString, 10, 64); err != nil {
return nil, errInvalidRange // Require that both start and end are specified.
if hrange.IsSuffixLength || hrange.Start == -1 || hrange.End == -1 {
return -1, -1, errInvalidRange
}
// Validate specified range against object size.
if hrange.Start >= resourceSize || hrange.End >= resourceSize {
return -1, -1, errInvalidRangeSource
} }
} }
offsetEndString := byteRangeString[sepIndex+1:] return hrange.GetOffsetLength(resourceSize)
offsetEnd := int64(-1)
// Convert offsetEndString only if its not empty.
if len(offsetEndString) > 0 {
if !validBytePos.MatchString(offsetEndString) {
return nil, errInvalidRange
}
if offsetEnd, err = strconv.ParseInt(offsetEndString, 10, 64); err != nil {
return nil, errInvalidRange
}
}
// rangeString contains first byte positions. eg. "bytes=2-" or
// rangeString contains last bye positions. eg. "bytes=-2"
if offsetBegin == -1 || offsetEnd == -1 {
return nil, errInvalidRange
}
// Last byte position should not be greater than first byte
// position. eg. "bytes=5-2"
if offsetBegin > offsetEnd {
return nil, errInvalidRange
}
// First and last byte positions should not be >= resourceSize.
if offsetBegin >= resourceSize || offsetEnd >= resourceSize {
return nil, errInvalidRangeSource
}
// Success..
return &httpRange{offsetBegin, offsetEnd, resourceSize}, nil
} }

View File

@ -25,29 +25,26 @@ func TestParseCopyPartRange(t *testing.T) {
rangeString string rangeString string
offsetBegin int64 offsetBegin int64
offsetEnd int64 offsetEnd int64
length int64
}{ }{
{"bytes=2-5", 2, 5, 4}, {"bytes=2-5", 2, 5},
{"bytes=2-9", 2, 9, 8}, {"bytes=2-9", 2, 9},
{"bytes=2-2", 2, 2, 1}, {"bytes=2-2", 2, 2},
{"bytes=0000-0006", 0, 6, 7}, {"", 0, 9},
{"bytes=0000-0006", 0, 6},
} }
for _, successCase := range successCases { for _, successCase := range successCases {
hrange, err := parseCopyPartRange(successCase.rangeString, 10) start, length, err := parseCopyPartRange(successCase.rangeString, 10)
if err != nil { if err != nil {
t.Fatalf("expected: <nil>, got: %s", err) t.Fatalf("expected: <nil>, got: %s", err)
} }
if hrange.offsetBegin != successCase.offsetBegin { if start != successCase.offsetBegin {
t.Fatalf("expected: %d, got: %d", successCase.offsetBegin, hrange.offsetBegin) t.Fatalf("expected: %d, got: %d", successCase.offsetBegin, start)
} }
if hrange.offsetEnd != successCase.offsetEnd { if start+length-1 != successCase.offsetEnd {
t.Fatalf("expected: %d, got: %d", successCase.offsetEnd, hrange.offsetEnd) t.Fatalf("expected: %d, got: %d", successCase.offsetEnd, start+length-1)
}
if hrange.getLength() != successCase.length {
t.Fatalf("expected: %d, got: %d", successCase.length, hrange.getLength())
} }
} }
@ -59,7 +56,6 @@ func TestParseCopyPartRange(t *testing.T) {
"bytes=2-+5", "bytes=2-+5",
"bytes=2--5", "bytes=2--5",
"bytes=-", "bytes=-",
"",
"2-5", "2-5",
"bytes = 2-5", "bytes = 2-5",
"bytes=2 - 5", "bytes=2 - 5",
@ -67,7 +63,7 @@ func TestParseCopyPartRange(t *testing.T) {
"bytes=2-5 ", "bytes=2-5 ",
} }
for _, rangeString := range invalidRangeStrings { for _, rangeString := range invalidRangeStrings {
if _, err := parseCopyPartRange(rangeString, 10); err == nil { if _, _, err := parseCopyPartRange(rangeString, 10); err == nil {
t.Fatalf("expected: an error, got: <nil> for range %s", rangeString) t.Fatalf("expected: an error, got: <nil> for range %s", rangeString)
} }
} }
@ -78,7 +74,7 @@ func TestParseCopyPartRange(t *testing.T) {
"bytes=20-30", "bytes=20-30",
} }
for _, rangeString := range errorRangeString { for _, rangeString := range errorRangeString {
if _, err := parseCopyPartRange(rangeString, 10); err != errInvalidRangeSource { if _, _, err := parseCopyPartRange(rangeString, 10); err != errInvalidRangeSource {
t.Fatalf("expected: %s, got: %s", errInvalidRangeSource, err) t.Fatalf("expected: %s, got: %s", errInvalidRangeSource, err)
} }
} }

View File

@ -31,6 +31,7 @@ import (
"github.com/djherbis/atime" "github.com/djherbis/atime"
"github.com/minio/minio/cmd/crypto"
"github.com/minio/minio/cmd/logger" "github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/hash" "github.com/minio/minio/pkg/hash"
"github.com/minio/minio/pkg/wildcard" "github.com/minio/minio/pkg/wildcard"
@ -57,6 +58,7 @@ type cacheObjects struct {
// file path patterns to exclude from cache // file path patterns to exclude from cache
exclude []string exclude []string
// Object functions pointing to the corresponding functions of backend implementation. // Object functions pointing to the corresponding functions of backend implementation.
GetObjectNInfoFn func(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, h http.Header) (gr *GetObjectReader, err error)
GetObjectFn func(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, etag string, opts ObjectOptions) (err error) GetObjectFn func(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, etag string, opts ObjectOptions) (err error)
GetObjectInfoFn func(ctx context.Context, bucket, object string, opts ObjectOptions) (objInfo ObjectInfo, err error) GetObjectInfoFn func(ctx context.Context, bucket, object string, opts ObjectOptions) (objInfo ObjectInfo, err error)
PutObjectFn func(ctx context.Context, bucket, object string, data *hash.Reader, metadata map[string]string, opts ObjectOptions) (objInfo ObjectInfo, err error) PutObjectFn func(ctx context.Context, bucket, object string, data *hash.Reader, metadata map[string]string, opts ObjectOptions) (objInfo ObjectInfo, err error)
@ -88,6 +90,7 @@ type CacheObjectLayer interface {
ListBuckets(ctx context.Context) (buckets []BucketInfo, err error) ListBuckets(ctx context.Context) (buckets []BucketInfo, err error)
DeleteBucket(ctx context.Context, bucket string) error DeleteBucket(ctx context.Context, bucket string) error
// Object operations. // Object operations.
GetObjectNInfo(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, h http.Header) (gr *GetObjectReader, err error)
GetObject(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, etag string, opts ObjectOptions) (err error) GetObject(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, etag string, opts ObjectOptions) (err error)
GetObjectInfo(ctx context.Context, bucket, object string, opts ObjectOptions) (objInfo ObjectInfo, err error) GetObjectInfo(ctx context.Context, bucket, object string, opts ObjectOptions) (objInfo ObjectInfo, err error)
PutObject(ctx context.Context, bucket, object string, data *hash.Reader, metadata map[string]string, opts ObjectOptions) (objInfo ObjectInfo, err error) PutObject(ctx context.Context, bucket, object string, data *hash.Reader, metadata map[string]string, opts ObjectOptions) (objInfo ObjectInfo, err error)
@ -103,6 +106,11 @@ type CacheObjectLayer interface {
StorageInfo(ctx context.Context) CacheStorageInfo StorageInfo(ctx context.Context) CacheStorageInfo
} }
// IsCacheable returns if the object should be saved in the cache.
func (o ObjectInfo) IsCacheable() bool {
return !crypto.IsEncrypted(o.UserDefined)
}
// backendDownError returns true if err is due to backend failure or faulty disk if in server mode // backendDownError returns true if err is due to backend failure or faulty disk if in server mode
func backendDownError(err error) bool { func backendDownError(err error) bool {
_, backendDown := err.(BackendDown) _, backendDown := err.(BackendDown)
@ -175,6 +183,86 @@ func (c cacheObjects) getMetadata(objInfo ObjectInfo) map[string]string {
return metadata return metadata
} }
func (c cacheObjects) GetObjectNInfo(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, h http.Header) (gr *GetObjectReader, err error) {
bkReader, bkErr := c.GetObjectNInfoFn(ctx, bucket, object, rs, h)
if c.isCacheExclude(bucket, object) || !bkReader.ObjInfo.IsCacheable() {
return bkReader, bkErr
}
// fetch cacheFSObjects if object is currently cached or nearest available cache drive
dcache, err := c.cache.getCachedFSLoc(ctx, bucket, object)
if err != nil {
return bkReader, bkErr
}
backendDown := backendDownError(bkErr)
if bkErr != nil && !backendDown {
if _, ok := err.(ObjectNotFound); ok {
// Delete the cached entry if backend object was deleted.
dcache.Delete(ctx, bucket, object)
}
return nil, bkErr
}
if !backendDown && filterFromCache(bkReader.ObjInfo.UserDefined) {
return bkReader, bkErr
}
if cacheReader, cacheErr := dcache.GetObjectNInfo(ctx, bucket, object, rs, h); cacheErr == nil {
if backendDown {
// If the backend is down, serve the request from cache.
return cacheReader, nil
}
if cacheReader.ObjInfo.ETag == bkReader.ObjInfo.ETag && !isStaleCache(bkReader.ObjInfo) {
// Object is not stale, so serve from cache
return cacheReader, nil
}
// Object is stale, so delete from cache
dcache.Delete(ctx, bucket, object)
}
// Since we got here, we are serving the request from backend,
// and also adding the object to the cache.
if rs != nil {
// We don't cache partial objects.
return bkReader, bkErr
}
if !dcache.diskAvailable(bkReader.ObjInfo.Size * cacheSizeMultiplier) {
// cache only objects < 1/100th of disk capacity
return bkReader, bkErr
}
if bkErr != nil {
return nil, bkErr
}
// Initialize pipe.
pipeReader, pipeWriter := io.Pipe()
teeReader := io.TeeReader(bkReader, pipeWriter)
hashReader, herr := hash.NewReader(pipeReader, bkReader.ObjInfo.Size, "", "")
if herr != nil {
bkReader.Close()
return nil, herr
}
go func() {
opts := ObjectOptions{}
putErr := dcache.Put(ctx, bucket, object, hashReader, c.getMetadata(bkReader.ObjInfo), opts)
// close the write end of the pipe, so the error gets
// propagated to getObjReader
pipeWriter.CloseWithError(putErr)
}()
cleanupBackend := func() { bkReader.Close() }
gr = NewGetObjectReaderFromReader(teeReader, bkReader.ObjInfo, cleanupBackend)
return gr, nil
}
// Uses cached-object to serve the request. If object is not cached it serves the request from the backend and also // Uses cached-object to serve the request. If object is not cached it serves the request from the backend and also
// stores it in the cache for serving subsequent requests. // stores it in the cache for serving subsequent requests.
func (c cacheObjects) GetObject(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, etag string, opts ObjectOptions) (err error) { func (c cacheObjects) GetObject(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, etag string, opts ObjectOptions) (err error) {

View File

@ -0,0 +1,182 @@
/*
* Minio Cloud Storage, (C) 2018 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"bytes"
"errors"
"fmt"
"io"
"io/ioutil"
"testing"
)
var alphabets = []byte("abcdefghijklmnopqrstuvwxyz0123456789")
// DummyDataGen returns a reader that repeats the bytes in `alphabets`
// upto the desired length.
type DummyDataGen struct {
b []byte
idx, length int64
}
// NewDummyDataGen returns a ReadSeeker over the first `totalLength`
// bytes from the infinite stream consisting of repeated
// concatenations of `alphabets`.
//
// The skipOffset (generally = 0) can be used to skip a given number
// of bytes from the beginning of the infinite stream. This is useful
// to compare such streams of bytes that may be split up, because:
//
// Given the function:
//
// f := func(r io.Reader) string {
// b, _ := ioutil.ReadAll(r)
// return string(b)
// }
//
// for example, the following is true:
//
// f(NewDummyDataGen(100, 0)) == f(NewDummyDataGen(50, 0)) + f(NewDummyDataGen(50, 50))
func NewDummyDataGen(totalLength, skipOffset int64) io.ReadSeeker {
if totalLength < 0 {
panic("Negative length passed to DummyDataGen!")
}
if skipOffset < 0 {
panic("Negative rotations are not allowed")
}
skipOffset = skipOffset % int64(len(alphabets))
as := make([]byte, 2*len(alphabets))
copy(as, alphabets)
copy(as[len(alphabets):], alphabets)
b := as[skipOffset : skipOffset+int64(len(alphabets))]
return &DummyDataGen{
length: totalLength,
b: b,
}
}
func (d *DummyDataGen) Read(b []byte) (n int, err error) {
k := len(b)
numLetters := int64(len(d.b))
for k > 0 && d.idx < d.length {
w := copy(b[len(b)-k:], d.b[d.idx%numLetters:])
k -= w
d.idx += int64(w)
n += w
}
if d.idx >= d.length {
extraBytes := d.idx - d.length
n -= int(extraBytes)
if n < 0 {
n = 0
}
err = io.EOF
}
return
}
func (d *DummyDataGen) Seek(offset int64, whence int) (int64, error) {
switch whence {
case io.SeekStart:
if offset < 0 {
return 0, errors.New("Invalid offset")
}
d.idx = offset
case io.SeekCurrent:
if d.idx+offset < 0 {
return 0, errors.New("Invalid offset")
}
d.idx += offset
case io.SeekEnd:
if d.length+offset < 0 {
return 0, errors.New("Invalid offset")
}
d.idx = d.length + offset
}
return d.idx, nil
}
func TestDummyDataGenerator(t *testing.T) {
readAll := func(r io.Reader) string {
b, _ := ioutil.ReadAll(r)
return string(b)
}
checkEq := func(a, b string) {
if a != b {
t.Fatalf("Unexpected equality failure")
}
}
checkEq(readAll(NewDummyDataGen(0, 0)), "")
checkEq(readAll(NewDummyDataGen(10, 0)), readAll(NewDummyDataGen(10, int64(len(alphabets)))))
checkEq(readAll(NewDummyDataGen(100, 0)), readAll(NewDummyDataGen(50, 0))+readAll(NewDummyDataGen(50, 50)))
r := NewDummyDataGen(100, 0)
r.Seek(int64(len(alphabets)), 0)
checkEq(readAll(r), readAll(NewDummyDataGen(100-int64(len(alphabets)), 0)))
}
// Compares all the bytes returned by the given readers. Any Read
// errors cause a `false` result. A string describing the error is
// also returned.
func cmpReaders(r1, r2 io.Reader) (bool, string) {
bufLen := 32 * 1024
b1, b2 := make([]byte, bufLen), make([]byte, bufLen)
for i := 0; true; i++ {
n1, e1 := io.ReadFull(r1, b1)
n2, e2 := io.ReadFull(r2, b2)
if n1 != n2 {
return false, fmt.Sprintf("Read %d != %d bytes from the readers", n1, n2)
}
if !bytes.Equal(b1[:n1], b2[:n2]) {
return false, fmt.Sprintf("After reading %d equal buffers (32Kib each), we got the following two strings:\n%v\n%v\n",
i, b1, b2)
}
// Check if stream has ended
if (e1 == io.ErrUnexpectedEOF && e2 == io.ErrUnexpectedEOF) || (e1 == io.EOF && e2 == io.EOF) {
break
}
if e1 != nil || e2 != nil {
return false, fmt.Sprintf("Got unexpected error values: %v == %v", e1, e2)
}
}
return true, ""
}
func TestCmpReaders(t *testing.T) {
{
r1 := bytes.NewBuffer([]byte("abc"))
r2 := bytes.NewBuffer([]byte("abc"))
ok, msg := cmpReaders(r1, r2)
if !(ok && msg == "") {
t.Fatalf("unexpected")
}
}
{
r1 := bytes.NewBuffer([]byte("abc"))
r2 := bytes.NewBuffer([]byte("abcd"))
ok, _ := cmpReaders(r1, r2)
if ok {
t.Fatalf("unexpected")
}
}
}

View File

@ -19,6 +19,7 @@ package cmd
import ( import (
"context" "context"
"io" "io"
"net/http"
"github.com/minio/minio/pkg/hash" "github.com/minio/minio/pkg/hash"
"github.com/minio/minio/pkg/madmin" "github.com/minio/minio/pkg/madmin"
@ -59,6 +60,10 @@ func (api *DummyObjectLayer) ListObjectsV2(ctx context.Context, bucket, prefix,
return return
} }
func (api *DummyObjectLayer) GetObjectNInfo(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, h http.Header) (gr *GetObjectReader, err error) {
return
}
func (api *DummyObjectLayer) GetObject(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, etag string, opts ObjectOptions) (err error) { func (api *DummyObjectLayer) GetObject(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, etag string, opts ObjectOptions) (err error) {
return return
} }

View File

@ -82,7 +82,7 @@ func hasServerSideEncryptionHeader(header http.Header) bool {
// ParseSSECopyCustomerRequest parses the SSE-C header fields of the provided request. // ParseSSECopyCustomerRequest parses the SSE-C header fields of the provided request.
// It returns the client provided key on success. // It returns the client provided key on success.
func ParseSSECopyCustomerRequest(r *http.Request, metadata map[string]string) (key []byte, err error) { func ParseSSECopyCustomerRequest(h http.Header, metadata map[string]string) (key []byte, err error) {
if !globalIsSSL { // minio only supports HTTP or HTTPS requests not both at the same time if !globalIsSSL { // minio only supports HTTP or HTTPS requests not both at the same time
// we cannot use r.TLS == nil here because Go's http implementation reflects on // we cannot use r.TLS == nil here because Go's http implementation reflects on
// the net.Conn and sets the TLS field of http.Request only if it's an tls.Conn. // the net.Conn and sets the TLS field of http.Request only if it's an tls.Conn.
@ -90,10 +90,10 @@ func ParseSSECopyCustomerRequest(r *http.Request, metadata map[string]string) (k
// will always fail -> r.TLS is always nil even for TLS requests. // will always fail -> r.TLS is always nil even for TLS requests.
return nil, errInsecureSSERequest return nil, errInsecureSSERequest
} }
if crypto.S3.IsEncrypted(metadata) && crypto.SSECopy.IsRequested(r.Header) { if crypto.S3.IsEncrypted(metadata) && crypto.SSECopy.IsRequested(h) {
return nil, crypto.ErrIncompatibleEncryptionMethod return nil, crypto.ErrIncompatibleEncryptionMethod
} }
k, err := crypto.SSECopy.ParseHTTP(r.Header) k, err := crypto.SSECopy.ParseHTTP(h)
return k[:], err return k[:], err
} }
@ -240,7 +240,7 @@ func DecryptCopyRequest(client io.Writer, r *http.Request, bucket, object string
err error err error
) )
if crypto.SSECopy.IsRequested(r.Header) { if crypto.SSECopy.IsRequested(r.Header) {
key, err = ParseSSECopyCustomerRequest(r, metadata) key, err = ParseSSECopyCustomerRequest(r.Header, metadata)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -312,6 +312,127 @@ func newDecryptWriterWithObjectKey(client io.Writer, objectEncryptionKey []byte,
return writer, nil return writer, nil
} }
// Adding support for reader based interface
// DecryptRequestWithSequenceNumberR - same as
// DecryptRequestWithSequenceNumber but with a reader
func DecryptRequestWithSequenceNumberR(client io.Reader, h http.Header, bucket, object string, seqNumber uint32, metadata map[string]string) (io.Reader, error) {
if crypto.S3.IsEncrypted(metadata) {
return newDecryptReader(client, nil, bucket, object, seqNumber, metadata)
}
key, err := ParseSSECustomerHeader(h)
if err != nil {
return nil, err
}
delete(metadata, crypto.SSECKey) // make sure we do not save the key by accident
return newDecryptReader(client, key, bucket, object, seqNumber, metadata)
}
// DecryptCopyRequestR - same as DecryptCopyRequest, but with a
// Reader
func DecryptCopyRequestR(client io.Reader, h http.Header, bucket, object string, metadata map[string]string) (io.Reader, error) {
var (
key []byte
err error
)
if crypto.SSECopy.IsRequested(h) {
key, err = ParseSSECopyCustomerRequest(h, metadata)
if err != nil {
return nil, err
}
}
delete(metadata, crypto.SSECopyKey) // make sure we do not save the key by accident
return newDecryptReader(client, key, bucket, object, 0, metadata)
}
func newDecryptReader(client io.Reader, key []byte, bucket, object string, seqNumber uint32, metadata map[string]string) (io.Reader, error) {
objectEncryptionKey, err := decryptObjectInfo(key, bucket, object, metadata)
if err != nil {
return nil, err
}
return newDecryptReaderWithObjectKey(client, objectEncryptionKey, seqNumber, metadata)
}
func newDecryptReaderWithObjectKey(client io.Reader, objectEncryptionKey []byte, seqNumber uint32, metadata map[string]string) (io.Reader, error) {
reader, err := sio.DecryptReader(client, sio.Config{
Key: objectEncryptionKey,
SequenceNumber: seqNumber,
})
if err != nil {
return nil, crypto.ErrInvalidCustomerKey
}
return reader, nil
}
// GetEncryptedOffsetLength - returns encrypted offset and length
// along with sequence number
func GetEncryptedOffsetLength(startOffset, length int64, objInfo ObjectInfo) (seqNumber uint32, encStartOffset, encLength int64) {
if len(objInfo.Parts) == 0 || !crypto.IsMultiPart(objInfo.UserDefined) {
seqNumber, encStartOffset, encLength = getEncryptedSinglePartOffsetLength(startOffset, length, objInfo)
return
}
seqNumber, encStartOffset, encLength = getEncryptedMultipartsOffsetLength(startOffset, length, objInfo)
return
}
// DecryptBlocksRequestR - same as DecryptBlocksRequest but with a
// reader
func DecryptBlocksRequestR(inputReader io.Reader, h http.Header, offset,
length int64, seqNumber uint32, partStart int, oi ObjectInfo, copySource bool) (
io.Reader, error) {
bucket, object := oi.Bucket, oi.Name
// Single part case
if len(oi.Parts) == 0 || !crypto.IsMultiPart(oi.UserDefined) {
var reader io.Reader
var err error
if copySource {
reader, err = DecryptCopyRequestR(inputReader, h, bucket, object, oi.UserDefined)
} else {
reader, err = DecryptRequestWithSequenceNumberR(inputReader, h, bucket, object, seqNumber, oi.UserDefined)
}
if err != nil {
return nil, err
}
return reader, nil
}
partDecRelOffset := int64(seqNumber) * sseDAREPackageBlockSize
partEncRelOffset := int64(seqNumber) * (sseDAREPackageBlockSize + sseDAREPackageMetaSize)
w := &DecryptBlocksReader{
reader: inputReader,
startSeqNum: seqNumber,
partDecRelOffset: partDecRelOffset,
partEncRelOffset: partEncRelOffset,
parts: oi.Parts,
partIndex: partStart,
header: h,
bucket: bucket,
object: object,
customerKeyHeader: h.Get(crypto.SSECKey),
copySource: copySource,
}
w.metadata = map[string]string{}
// Copy encryption metadata for internal use.
for k, v := range oi.UserDefined {
w.metadata[k] = v
}
if w.copySource {
w.customerKeyHeader = h.Get(crypto.SSECopyKey)
}
if err := w.buildDecrypter(w.parts[w.partIndex].Number); err != nil {
return nil, err
}
return w, nil
}
// DecryptRequestWithSequenceNumber decrypts the object with the client provided key. It also removes // DecryptRequestWithSequenceNumber decrypts the object with the client provided key. It also removes
// the client-side-encryption metadata from the object and sets the correct headers. // the client-side-encryption metadata from the object and sets the correct headers.
func DecryptRequestWithSequenceNumber(client io.Writer, r *http.Request, bucket, object string, seqNumber uint32, metadata map[string]string) (io.WriteCloser, error) { func DecryptRequestWithSequenceNumber(client io.Writer, r *http.Request, bucket, object string, seqNumber uint32, metadata map[string]string) (io.WriteCloser, error) {
@ -333,7 +454,131 @@ func DecryptRequest(client io.Writer, r *http.Request, bucket, object string, me
return DecryptRequestWithSequenceNumber(client, r, bucket, object, 0, metadata) return DecryptRequestWithSequenceNumber(client, r, bucket, object, 0, metadata)
} }
// DecryptBlocksWriter - decrypts multipart parts, while implementing a io.Writer compatible interface. // DecryptBlocksReader - decrypts multipart parts, while implementing
// a io.Reader compatible interface.
type DecryptBlocksReader struct {
// Source of the encrypted content that will be decrypted
reader io.Reader
// Current decrypter for the current encrypted data block
decrypter io.Reader
// Start sequence number
startSeqNum uint32
// Current part index
partIndex int
// Parts information
parts []objectPartInfo
header http.Header
bucket, object string
metadata map[string]string
partDecRelOffset, partEncRelOffset int64
copySource bool
// Customer Key
customerKeyHeader string
}
func (d *DecryptBlocksReader) buildDecrypter(partID int) error {
m := make(map[string]string)
for k, v := range d.metadata {
m[k] = v
}
// Initialize the first decrypter; new decrypters will be
// initialized in Read() operation as needed.
var key []byte
var err error
if d.copySource {
if crypto.SSEC.IsEncrypted(d.metadata) {
d.header.Set(crypto.SSECopyKey, d.customerKeyHeader)
key, err = ParseSSECopyCustomerRequest(d.header, d.metadata)
}
} else {
if crypto.SSEC.IsEncrypted(d.metadata) {
d.header.Set(crypto.SSECKey, d.customerKeyHeader)
key, err = ParseSSECustomerHeader(d.header)
}
}
if err != nil {
return err
}
objectEncryptionKey, err := decryptObjectInfo(key, d.bucket, d.object, m)
if err != nil {
return err
}
var partIDbin [4]byte
binary.LittleEndian.PutUint32(partIDbin[:], uint32(partID)) // marshal part ID
mac := hmac.New(sha256.New, objectEncryptionKey) // derive part encryption key from part ID and object key
mac.Write(partIDbin[:])
partEncryptionKey := mac.Sum(nil)
// make sure we do not save the key by accident
if d.copySource {
delete(m, crypto.SSECopyKey)
} else {
delete(m, crypto.SSECKey)
}
// Limit the reader, so the decryptor doesnt receive bytes
// from the next part (different DARE stream)
encLenToRead := d.parts[d.partIndex].Size - d.partEncRelOffset
decrypter, err := newDecryptReaderWithObjectKey(io.LimitReader(d.reader, encLenToRead), partEncryptionKey, d.startSeqNum, m)
if err != nil {
return err
}
d.decrypter = decrypter
return nil
}
func (d *DecryptBlocksReader) Read(p []byte) (int, error) {
var err error
var n1 int
decPartSize, _ := sio.DecryptedSize(uint64(d.parts[d.partIndex].Size))
unreadPartLen := int64(decPartSize) - d.partDecRelOffset
if int64(len(p)) < unreadPartLen {
n1, err = d.decrypter.Read(p)
if err != nil {
return 0, err
}
d.partDecRelOffset += int64(n1)
} else {
n1, err = io.ReadFull(d.decrypter, p[:unreadPartLen])
if err != nil {
return 0, err
}
// We should now proceed to next part, reset all
// values appropriately.
d.partEncRelOffset = 0
d.partDecRelOffset = 0
d.startSeqNum = 0
d.partIndex++
if d.partIndex == len(d.parts) {
return n1, io.EOF
}
err = d.buildDecrypter(d.parts[d.partIndex].Number)
if err != nil {
return 0, err
}
n1, err = d.decrypter.Read(p[n1:])
if err != nil {
return 0, err
}
d.partDecRelOffset += int64(n1)
}
return len(p), nil
}
// DecryptBlocksWriter - decrypts multipart parts, while implementing
// a io.Writer compatible interface.
type DecryptBlocksWriter struct { type DecryptBlocksWriter struct {
// Original writer where the plain data will be written // Original writer where the plain data will be written
writer io.Writer writer io.Writer
@ -367,7 +612,7 @@ func (w *DecryptBlocksWriter) buildDecrypter(partID int) error {
if w.copySource { if w.copySource {
if crypto.SSEC.IsEncrypted(w.metadata) { if crypto.SSEC.IsEncrypted(w.metadata) {
w.req.Header.Set(crypto.SSECopyKey, w.customerKeyHeader) w.req.Header.Set(crypto.SSECopyKey, w.customerKeyHeader)
key, err = ParseSSECopyCustomerRequest(w.req, w.metadata) key, err = ParseSSECopyCustomerRequest(w.req.Header, w.metadata)
} }
} else { } else {
if crypto.SSEC.IsEncrypted(w.metadata) { if crypto.SSEC.IsEncrypted(w.metadata) {
@ -666,6 +911,119 @@ func (o *ObjectInfo) DecryptedSize() (int64, error) {
return size, nil return size, nil
} }
// GetDecryptedRange - To decrypt the range (off, length) of the
// decrypted object stream, we need to read the range (encOff,
// encLength) of the encrypted object stream to decrypt it, and
// compute skipLen, the number of bytes to skip in the beginning of
// the encrypted range.
//
// In addition we also compute the object part number for where the
// requested range starts, along with the DARE sequence number within
// that part. For single part objects, the partStart will be 0.
func (o *ObjectInfo) GetDecryptedRange(rs *HTTPRangeSpec) (encOff, encLength, skipLen int64, seqNumber uint32, partStart int, err error) {
if !crypto.IsEncrypted(o.UserDefined) {
err = errors.New("Object is not encrypted")
return
}
if rs == nil {
// No range, so offsets refer to the whole object.
return 0, int64(o.Size), 0, 0, 0, nil
}
// Assemble slice of (decrypted) part sizes in `sizes`
var decObjSize int64 // decrypted total object size
var partSize uint64
partSize, err = sio.DecryptedSize(uint64(o.Size))
if err != nil {
return
}
sizes := []int64{int64(partSize)}
decObjSize = sizes[0]
if crypto.IsMultiPart(o.UserDefined) {
sizes = make([]int64, len(o.Parts))
decObjSize = 0
for i, part := range o.Parts {
partSize, err = sio.DecryptedSize(uint64(part.Size))
if err != nil {
return
}
t := int64(partSize)
sizes[i] = t
decObjSize += t
}
}
var off, length int64
off, length, err = rs.GetOffsetLength(decObjSize)
if err != nil {
return
}
// At this point, we have:
//
// 1. the decrypted part sizes in `sizes` (single element for
// single part object) and total decrypted object size `decObjSize`
//
// 2. the (decrypted) start offset `off` and (decrypted)
// length to read `length`
//
// These are the inputs to the rest of the algorithm below.
// Locate the part containing the start of the required range
var partEnd int
var cumulativeSum, encCumulativeSum int64
for i, size := range sizes {
if off < cumulativeSum+size {
partStart = i
break
}
cumulativeSum += size
encPartSize, _ := sio.EncryptedSize(uint64(size))
encCumulativeSum += int64(encPartSize)
}
// partStart is always found in the loop above,
// because off is validated.
sseDAREEncPackageBlockSize := int64(sseDAREPackageBlockSize + sseDAREPackageMetaSize)
startPkgNum := (off - cumulativeSum) / sseDAREPackageBlockSize
// Now we can calculate the number of bytes to skip
skipLen = (off - cumulativeSum) % sseDAREPackageBlockSize
encOff = encCumulativeSum + startPkgNum*sseDAREEncPackageBlockSize
// Locate the part containing the end of the required range
endOffset := off + length - 1
for i1, size := range sizes[partStart:] {
i := partStart + i1
if endOffset < cumulativeSum+size {
partEnd = i
break
}
cumulativeSum += size
encPartSize, _ := sio.EncryptedSize(uint64(size))
encCumulativeSum += int64(encPartSize)
}
// partEnd is always found in the loop above, because off and
// length are validated.
endPkgNum := (endOffset - cumulativeSum) / sseDAREPackageBlockSize
// Compute endEncOffset with one additional DARE package (so
// we read the package containing the last desired byte).
endEncOffset := encCumulativeSum + (endPkgNum+1)*sseDAREEncPackageBlockSize
// Check if the DARE package containing the end offset is a
// full sized package (as the last package in the part may be
// smaller)
lastPartSize, _ := sio.EncryptedSize(uint64(sizes[partEnd]))
if endEncOffset > encCumulativeSum+int64(lastPartSize) {
endEncOffset = encCumulativeSum + int64(lastPartSize)
}
encLength = endEncOffset - encOff
// Set the sequence number as the starting package number of
// the requested block
seqNumber = uint32(startPkgNum)
return encOff, encLength, skipLen, seqNumber, partStart, nil
}
// EncryptedSize returns the size of the object after encryption. // EncryptedSize returns the size of the object after encryption.
// An encrypted object is always larger than a plain object // An encrypted object is always larger than a plain object
// except for zero size objects. // except for zero size objects.
@ -716,7 +1074,7 @@ func DecryptCopyObjectInfo(info *ObjectInfo, headers http.Header) (apiErr APIErr
// decryption succeeded. // decryption succeeded.
// //
// DecryptObjectInfo also returns whether the object is encrypted or not. // DecryptObjectInfo also returns whether the object is encrypted or not.
func DecryptObjectInfo(info *ObjectInfo, headers http.Header) (encrypted bool, err error) { func DecryptObjectInfo(info ObjectInfo, headers http.Header) (encrypted bool, err error) {
// Directories are never encrypted. // Directories are never encrypted.
if info.IsDir { if info.IsDir {
return false, nil return false, nil
@ -734,7 +1092,7 @@ func DecryptObjectInfo(info *ObjectInfo, headers http.Header) (encrypted bool, e
err = errEncryptedObject err = errEncryptedObject
return return
} }
info.Size, err = info.DecryptedSize() _, err = info.DecryptedSize()
} }
return return
} }

View File

@ -22,7 +22,9 @@ import (
"net/http" "net/http"
"testing" "testing"
humanize "github.com/dustin/go-humanize"
"github.com/minio/minio/cmd/crypto" "github.com/minio/minio/cmd/crypto"
"github.com/minio/sio"
) )
var hasServerSideEncryptionHeaderTests = []struct { var hasServerSideEncryptionHeaderTests = []struct {
@ -325,7 +327,7 @@ func TestParseSSECopyCustomerRequest(t *testing.T) {
request.Header = headers request.Header = headers
globalIsSSL = test.useTLS globalIsSSL = test.useTLS
_, err := ParseSSECopyCustomerRequest(request, test.metadata) _, err := ParseSSECopyCustomerRequest(request.Header, test.metadata)
if err != test.err { if err != test.err {
t.Errorf("Test %d: Parse returned: %v want: %v", i, err, test.err) t.Errorf("Test %d: Parse returned: %v want: %v", i, err, test.err)
} }
@ -557,7 +559,7 @@ var decryptObjectInfoTests = []struct {
func TestDecryptObjectInfo(t *testing.T) { func TestDecryptObjectInfo(t *testing.T) {
for i, test := range decryptObjectInfoTests { for i, test := range decryptObjectInfoTests {
if encrypted, err := DecryptObjectInfo(&test.info, test.headers); err != test.expErr { if encrypted, err := DecryptObjectInfo(test.info, test.headers); err != test.expErr {
t.Errorf("Test %d: Decryption returned wrong error code: got %d , want %d", i, err, test.expErr) t.Errorf("Test %d: Decryption returned wrong error code: got %d , want %d", i, err, test.expErr)
} else if enc := crypto.IsEncrypted(test.info.UserDefined); encrypted && enc != encrypted { } else if enc := crypto.IsEncrypted(test.info.UserDefined); encrypted && enc != encrypted {
t.Errorf("Test %d: Decryption thinks object is encrypted but it is not", i) t.Errorf("Test %d: Decryption thinks object is encrypted but it is not", i)
@ -566,3 +568,285 @@ func TestDecryptObjectInfo(t *testing.T) {
} }
} }
} }
func TestGetDecryptedRange(t *testing.T) {
var (
pkgSz = int64(64) * humanize.KiByte
minPartSz = int64(5) * humanize.MiByte
maxPartSz = int64(5) * humanize.GiByte
getEncSize = func(s int64) int64 {
v, _ := sio.EncryptedSize(uint64(s))
return int64(v)
}
udMap = func(isMulti bool) map[string]string {
m := map[string]string{
crypto.SSESealAlgorithm: SSESealAlgorithmDareSha256,
crypto.SSEMultipart: "1",
}
if !isMulti {
delete(m, crypto.SSEMultipart)
}
return m
}
)
// Single part object tests
var (
mkSPObj = func(s int64) ObjectInfo {
return ObjectInfo{
Size: getEncSize(s),
UserDefined: udMap(false),
}
}
)
testSP := []struct {
decSz int64
oi ObjectInfo
}{
{0, mkSPObj(0)},
{1, mkSPObj(1)},
{pkgSz - 1, mkSPObj(pkgSz - 1)},
{pkgSz, mkSPObj(pkgSz)},
{2*pkgSz - 1, mkSPObj(2*pkgSz - 1)},
{minPartSz, mkSPObj(minPartSz)},
{maxPartSz, mkSPObj(maxPartSz)},
}
for i, test := range testSP {
{
// nil range
o, l, skip, sn, ps, err := test.oi.GetDecryptedRange(nil)
if err != nil {
t.Errorf("Case %d: unexpected err: %v", i, err)
}
if skip != 0 || sn != 0 || ps != 0 || o != 0 || l != getEncSize(test.decSz) {
t.Errorf("Case %d: test failed: %d %d %d %d %d", i, o, l, skip, sn, ps)
}
}
if test.decSz >= 10 {
// first 10 bytes
o, l, skip, sn, ps, err := test.oi.GetDecryptedRange(&HTTPRangeSpec{false, 0, 9})
if err != nil {
t.Errorf("Case %d: unexpected err: %v", i, err)
}
var rLen int64 = pkgSz + 32
if test.decSz < pkgSz {
rLen = test.decSz + 32
}
if skip != 0 || sn != 0 || ps != 0 || o != 0 || l != rLen {
t.Errorf("Case %d: test failed: %d %d %d %d %d", i, o, l, skip, sn, ps)
}
}
kb32 := int64(32) * humanize.KiByte
if test.decSz >= (64+32)*humanize.KiByte {
// Skip the first 32Kib, and read the next 64Kib
o, l, skip, sn, ps, err := test.oi.GetDecryptedRange(&HTTPRangeSpec{false, kb32, 3*kb32 - 1})
if err != nil {
t.Errorf("Case %d: unexpected err: %v", i, err)
}
var rLen int64 = (pkgSz + 32) * 2
if test.decSz < 2*pkgSz {
rLen = (pkgSz + 32) + (test.decSz - pkgSz + 32)
}
if skip != kb32 || sn != 0 || ps != 0 || o != 0 || l != rLen {
t.Errorf("Case %d: test failed: %d %d %d %d %d", i, o, l, skip, sn, ps)
}
}
if test.decSz >= (64*2+32)*humanize.KiByte {
// Skip the first 96Kib and read the next 64Kib
o, l, skip, sn, ps, err := test.oi.GetDecryptedRange(&HTTPRangeSpec{false, 3 * kb32, 5*kb32 - 1})
if err != nil {
t.Errorf("Case %d: unexpected err: %v", i, err)
}
var rLen int64 = (pkgSz + 32) * 2
if test.decSz-pkgSz < 2*pkgSz {
rLen = (pkgSz + 32) + (test.decSz - pkgSz + 32*2)
}
if skip != kb32 || sn != 1 || ps != 0 || o != pkgSz+32 || l != rLen {
t.Errorf("Case %d: test failed: %d %d %d %d %d", i, o, l, skip, sn, ps)
}
}
}
// Multipart object tests
var (
// make a multipart object-info given part sizes
mkMPObj = func(sizes []int64) ObjectInfo {
r := make([]objectPartInfo, len(sizes))
sum := int64(0)
for i, s := range sizes {
r[i].Number = i
r[i].Size = int64(getEncSize(s))
sum += r[i].Size
}
return ObjectInfo{
Size: sum,
UserDefined: udMap(true),
Parts: r,
}
}
// Simple useful utilities
repeat = func(k int64, n int) []int64 {
a := []int64{}
for i := 0; i < n; i++ {
a = append(a, k)
}
return a
}
lsum = func(s []int64) int64 {
sum := int64(0)
for _, i := range s {
if i < 0 {
return -1
}
sum += i
}
return sum
}
esum = func(oi ObjectInfo) int64 {
sum := int64(0)
for _, i := range oi.Parts {
sum += i.Size
}
return sum
}
)
s1 := []int64{5487701, 5487799, 3}
s2 := repeat(5487701, 5)
s3 := repeat(maxPartSz, 10000)
testMPs := []struct {
decSizes []int64
oi ObjectInfo
}{
{s1, mkMPObj(s1)},
{s2, mkMPObj(s2)},
{s3, mkMPObj(s3)},
}
// This function is a reference (re-)implementation of
// decrypted range computation, written solely for the purpose
// of the unit tests.
//
// `s` gives the decrypted part sizes, and the other
// parameters describe the desired read segment. When
// `isFromEnd` is true, `skipLen` argument is ignored.
decryptedRangeRef := func(s []int64, skipLen, readLen int64, isFromEnd bool) (o, l, skip int64, sn uint32, ps int) {
oSize := lsum(s)
if isFromEnd {
skipLen = oSize - readLen
}
if skipLen < 0 || readLen < 0 || oSize < 0 || skipLen+readLen > oSize {
t.Fatalf("Impossible read specified: %d %d %d", skipLen, readLen, oSize)
}
var cumulativeSum, cumulativeEncSum int64
toRead := readLen
readStart := false
for i, v := range s {
partOffset := int64(0)
partDarePkgOffset := int64(0)
if !readStart && cumulativeSum+v > skipLen {
// Read starts at the current part
readStart = true
partOffset = skipLen - cumulativeSum
// All return values except `l` are
// calculated here.
sn = uint32(partOffset / pkgSz)
skip = partOffset % pkgSz
ps = i
o = cumulativeEncSum + int64(sn)*(pkgSz+32)
partDarePkgOffset = partOffset - skip
}
if readStart {
currentPartBytes := v - partOffset
currentPartDareBytes := v - partDarePkgOffset
if currentPartBytes < toRead {
toRead -= currentPartBytes
l += getEncSize(currentPartDareBytes)
} else {
// current part has the last
// byte required
lbPartOffset := partOffset + toRead - 1
// round up the lbPartOffset
// to the end of the
// corresponding DARE package
lbPkgEndOffset := lbPartOffset - (lbPartOffset % pkgSz) + pkgSz
if lbPkgEndOffset > v {
lbPkgEndOffset = v
}
bytesToDrop := v - lbPkgEndOffset
// Last segment to update `l`
l += getEncSize(currentPartDareBytes - bytesToDrop)
break
}
}
cumulativeSum += v
cumulativeEncSum += getEncSize(v)
}
return
}
for i, test := range testMPs {
{
// nil range
o, l, skip, sn, ps, err := test.oi.GetDecryptedRange(nil)
if err != nil {
t.Errorf("Case %d: unexpected err: %v", i, err)
}
if o != 0 || l != esum(test.oi) || skip != 0 || sn != 0 || ps != 0 {
t.Errorf("Case %d: test failed: %d %d %d %d %d", i, o, l, skip, sn, ps)
}
}
// Skip 1Mib and read 1Mib (in the decrypted object)
//
// The check below ensures the object is large enough
// for the read.
if lsum(test.decSizes) >= 2*humanize.MiByte {
skipLen, readLen := int64(1)*humanize.MiByte, int64(1)*humanize.MiByte
o, l, skip, sn, ps, err := test.oi.GetDecryptedRange(&HTTPRangeSpec{false, skipLen, skipLen + readLen - 1})
if err != nil {
t.Errorf("Case %d: unexpected err: %v", i, err)
}
oRef, lRef, skipRef, snRef, psRef := decryptedRangeRef(test.decSizes, skipLen, readLen, false)
if o != oRef || l != lRef || skip != skipRef || sn != snRef || ps != psRef {
t.Errorf("Case %d: test failed: %d %d %d %d %d (Ref: %d %d %d %d %d)",
i, o, l, skip, sn, ps, oRef, lRef, skipRef, snRef, psRef)
}
}
// Read the last 6Mib+1 bytes of the (decrypted)
// object
//
// The check below ensures the object is large enough
// for the read.
readLen := int64(6)*humanize.MiByte + 1
if lsum(test.decSizes) >= readLen {
o, l, skip, sn, ps, err := test.oi.GetDecryptedRange(&HTTPRangeSpec{true, -readLen, -1})
if err != nil {
t.Errorf("Case %d: unexpected err: %v", i, err)
}
oRef, lRef, skipRef, snRef, psRef := decryptedRangeRef(test.decSizes, 0, readLen, true)
if o != oRef || l != lRef || skip != skipRef || sn != snRef || ps != psRef {
t.Errorf("Case %d: test failed: %d %d %d %d %d (Ref: %d %d %d %d %d)",
i, o, l, skip, sn, ps, oRef, lRef, skipRef, snRef, psRef)
}
}
}
}

View File

@ -17,10 +17,12 @@
package cmd package cmd
import ( import (
"bytes"
"context" "context"
"encoding/hex" "encoding/hex"
"io" "io"
"io/ioutil" "io/ioutil"
"net/http"
"os" "os"
"path" "path"
"sort" "sort"
@ -498,6 +500,86 @@ func (fs *FSObjects) CopyObject(ctx context.Context, srcBucket, srcObject, dstBu
return objInfo, nil return objInfo, nil
} }
// GetObjectNInfo - returns object info and a reader for object
// content.
func (fs *FSObjects) GetObjectNInfo(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, h http.Header) (gr *GetObjectReader, err error) {
if err = checkGetObjArgs(ctx, bucket, object); err != nil {
return nil, err
}
if _, err = fs.statBucketDir(ctx, bucket); err != nil {
return nil, toObjectErr(err, bucket)
}
// Lock the object before reading.
lock := fs.nsMutex.NewNSLock(bucket, object)
if err = lock.GetRLock(globalObjectTimeout); err != nil {
logger.LogIf(ctx, err)
return nil, err
}
nsUnlocker := lock.RUnlock
// For a directory, we need to send an reader that returns no bytes.
if hasSuffix(object, slashSeparator) {
// The lock taken above is released when
// objReader.Close() is called by the caller.
return NewGetObjectReaderFromReader(bytes.NewBuffer(nil), ObjectInfo{}, nsUnlocker), nil
}
// Otherwise we get the object info
var objInfo ObjectInfo
if objInfo, err = fs.getObjectInfo(ctx, bucket, object); err != nil {
nsUnlocker()
return nil, toObjectErr(err, bucket, object)
}
// Take a rwPool lock for NFS gateway type deployment
rwPoolUnlocker := func() {}
if bucket != minioMetaBucket {
fsMetaPath := pathJoin(fs.fsPath, minioMetaBucket, bucketMetaPrefix, bucket, object, fs.metaJSONFile)
_, err = fs.rwPool.Open(fsMetaPath)
if err != nil && err != errFileNotFound {
logger.LogIf(ctx, err)
nsUnlocker()
return nil, toObjectErr(err, bucket, object)
}
// Need to clean up lock after getObject is
// completed.
rwPoolUnlocker = func() { fs.rwPool.Close(fsMetaPath) }
}
objReaderFn, off, length, rErr := NewGetObjectReader(rs, objInfo, nsUnlocker, rwPoolUnlocker)
if rErr != nil {
return nil, rErr
}
// Read the object, doesn't exist returns an s3 compatible error.
fsObjPath := pathJoin(fs.fsPath, bucket, object)
readCloser, size, err := fsOpenFile(ctx, fsObjPath, off)
if err != nil {
rwPoolUnlocker()
nsUnlocker()
return nil, toObjectErr(err, bucket, object)
}
var reader io.Reader
reader = io.LimitReader(readCloser, length)
closeFn := func() {
readCloser.Close()
}
// Check if range is valid
if off > size || off+length > size {
err = InvalidRange{off, length, size}
logger.LogIf(ctx, err)
closeFn()
rwPoolUnlocker()
nsUnlocker()
return nil, err
}
return objReaderFn(reader, h, closeFn)
}
// GetObject - reads an object from the disk. // GetObject - reads an object from the disk.
// Supports additional parameters like offset and length // Supports additional parameters like offset and length
// which are synonymous with HTTP Range requests. // which are synonymous with HTTP Range requests.

View File

@ -615,6 +615,28 @@ func (a *azureObjects) ListObjectsV2(ctx context.Context, bucket, prefix, contin
return result, nil return result, nil
} }
// GetObjectNInfo - returns object info and locked object ReadCloser
func (a *azureObjects) GetObjectNInfo(ctx context.Context, bucket, object string, rs *minio.HTTPRangeSpec, h http.Header) (gr *minio.GetObjectReader, err error) {
var objInfo minio.ObjectInfo
objInfo, err = a.GetObjectInfo(ctx, bucket, object, minio.ObjectOptions{})
if err != nil {
return nil, err
}
var startOffset, length int64
startOffset, length, err = rs.GetOffsetLength(objInfo.Size)
if err != nil {
return nil, err
}
pr, pw := io.Pipe()
go func() {
err := a.GetObject(ctx, bucket, object, startOffset, length, pw, objInfo.ETag, minio.ObjectOptions{})
pw.CloseWithError(err)
}()
return minio.NewGetObjectReaderFromReader(pr, objInfo), nil
}
// GetObject - reads an object from azure. Supports additional // GetObject - reads an object from azure. Supports additional
// parameters like offset and length which are synonymous with // parameters like offset and length which are synonymous with
// HTTP Range requests. // HTTP Range requests.

View File

@ -23,6 +23,7 @@ import (
"hash" "hash"
"io" "io"
"io/ioutil" "io/ioutil"
"net/http"
"strings" "strings"
"sync" "sync"
@ -394,6 +395,28 @@ func (l *b2Objects) ListObjectsV2(ctx context.Context, bucket, prefix, continuat
return loi, nil return loi, nil
} }
// GetObjectNInfo - returns object info and locked object ReadCloser
func (l *b2Objects) GetObjectNInfo(ctx context.Context, bucket, object string, rs *minio.HTTPRangeSpec, h http.Header) (gr *minio.GetObjectReader, err error) {
var objInfo minio.ObjectInfo
objInfo, err = l.GetObjectInfo(ctx, bucket, object, minio.ObjectOptions{})
if err != nil {
return nil, err
}
var startOffset, length int64
startOffset, length, err = rs.GetOffsetLength(objInfo.Size)
if err != nil {
return nil, err
}
pr, pw := io.Pipe()
go func() {
err := l.GetObject(ctx, bucket, object, startOffset, length, pw, objInfo.ETag, minio.ObjectOptions{})
pw.CloseWithError(err)
}()
return minio.NewGetObjectReaderFromReader(pr, objInfo), nil
}
// GetObject reads an object from B2. Supports additional // GetObject reads an object from B2. Supports additional
// parameters like offset and length which are synonymous with // parameters like offset and length which are synonymous with
// HTTP Range requests. // HTTP Range requests.

View File

@ -736,6 +736,28 @@ func (l *gcsGateway) ListObjectsV2(ctx context.Context, bucket, prefix, continua
}, nil }, nil
} }
// GetObjectNInfo - returns object info and locked object ReadCloser
func (l *gcsGateway) GetObjectNInfo(ctx context.Context, bucket, object string, rs *minio.HTTPRangeSpec, h http.Header) (gr *minio.GetObjectReader, err error) {
var objInfo minio.ObjectInfo
objInfo, err = l.GetObjectInfo(ctx, bucket, object, minio.ObjectOptions{})
if err != nil {
return nil, err
}
var startOffset, length int64
startOffset, length, err = rs.GetOffsetLength(objInfo.Size)
if err != nil {
return nil, err
}
pr, pw := io.Pipe()
go func() {
err := l.GetObject(ctx, bucket, object, startOffset, length, pw, objInfo.ETag, minio.ObjectOptions{})
pw.CloseWithError(err)
}()
return minio.NewGetObjectReaderFromReader(pr, objInfo), nil
}
// GetObject - reads an object from GCS. Supports additional // GetObject - reads an object from GCS. Supports additional
// parameters like offset and length which are synonymous with // parameters like offset and length which are synonymous with
// HTTP Range requests. // HTTP Range requests.

View File

@ -506,6 +506,28 @@ func (t *tritonObjects) ListObjectsV2(ctx context.Context, bucket, prefix, conti
return result, nil return result, nil
} }
// GetObjectNInfo - returns object info and locked object ReadCloser
func (t *tritonObjects) GetObjectNInfo(ctx context.Context, bucket, object string, rs *minio.HTTPRangeSpec, h http.Header) (gr *minio.GetObjectReader, err error) {
var objInfo minio.ObjectInfo
objInfo, err = t.GetObjectInfo(ctx, bucket, object, minio.ObjectOptions{})
if err != nil {
return nil, err
}
var startOffset, length int64
startOffset, length, err = rs.GetOffsetLength(objInfo.Size)
if err != nil {
return nil, err
}
pr, pw := io.Pipe()
go func() {
err := t.GetObject(ctx, bucket, object, startOffset, length, pw, objInfo.ETag, minio.ObjectOptions{})
pw.CloseWithError(err)
}()
return minio.NewGetObjectReaderFromReader(pr, objInfo), nil
}
// GetObject - Reads an object from Manta. Supports additional parameters like // GetObject - Reads an object from Manta. Supports additional parameters like
// offset and length which are synonymous with HTTP Range requests. // offset and length which are synonymous with HTTP Range requests.
// //

View File

@ -68,7 +68,7 @@ ENVIRONMENT VARIABLES:
DOMAIN: DOMAIN:
MINIO_DOMAIN: To enable virtual-host-style requests, set this value to Minio host domain name. MINIO_DOMAIN: To enable virtual-host-style requests, set this value to Minio host domain name.
CACHE: CACHE:
MINIO_CACHE_DRIVES: List of mounted drives or directories delimited by ";". MINIO_CACHE_DRIVES: List of mounted drives or directories delimited by ";".
MINIO_CACHE_EXCLUDE: List of cache exclusion patterns delimited by ";". MINIO_CACHE_EXCLUDE: List of cache exclusion patterns delimited by ";".
@ -546,6 +546,28 @@ func ossGetObject(ctx context.Context, client *oss.Client, bucket, key string, s
return nil return nil
} }
// GetObjectNInfo - returns object info and locked object ReadCloser
func (l *ossObjects) GetObjectNInfo(ctx context.Context, bucket, object string, rs *minio.HTTPRangeSpec, h http.Header) (gr *minio.GetObjectReader, err error) {
var objInfo minio.ObjectInfo
objInfo, err = l.GetObjectInfo(ctx, bucket, object, minio.ObjectOptions{})
if err != nil {
return nil, err
}
var startOffset, length int64
startOffset, length, err = rs.GetOffsetLength(objInfo.Size)
if err != nil {
return nil, err
}
pr, pw := io.Pipe()
go func() {
err := l.GetObject(ctx, bucket, object, startOffset, length, pw, objInfo.ETag, minio.ObjectOptions{})
pw.CloseWithError(err)
}()
return minio.NewGetObjectReaderFromReader(pr, objInfo), nil
}
// GetObject reads an object on OSS. Supports additional // GetObject reads an object on OSS. Supports additional
// parameters like offset and length which are synonymous with // parameters like offset and length which are synonymous with
// HTTP Range requests. // HTTP Range requests.

View File

@ -327,6 +327,28 @@ func (l *s3Objects) ListObjectsV2(ctx context.Context, bucket, prefix, continuat
return minio.FromMinioClientListBucketV2Result(bucket, result), nil return minio.FromMinioClientListBucketV2Result(bucket, result), nil
} }
// GetObjectNInfo - returns object info and locked object ReadCloser
func (l *s3Objects) GetObjectNInfo(ctx context.Context, bucket, object string, rs *minio.HTTPRangeSpec, h http.Header) (gr *minio.GetObjectReader, err error) {
var objInfo minio.ObjectInfo
objInfo, err = l.GetObjectInfo(ctx, bucket, object, minio.ObjectOptions{})
if err != nil {
return nil, err
}
var startOffset, length int64
startOffset, length, err = rs.GetOffsetLength(objInfo.Size)
if err != nil {
return nil, err
}
pr, pw := io.Pipe()
go func() {
err := l.GetObject(ctx, bucket, object, startOffset, length, pw, objInfo.ETag, minio.ObjectOptions{})
pw.CloseWithError(err)
}()
return minio.NewGetObjectReaderFromReader(pr, objInfo), nil
}
// GetObject reads an object from S3. Supports additional // GetObject reads an object from S3. Supports additional
// parameters like offset and length which are synonymous with // parameters like offset and length which are synonymous with
// HTTP Range requests. // HTTP Range requests.

View File

@ -431,6 +431,28 @@ func (s *siaObjects) ListObjects(ctx context.Context, bucket string, prefix stri
return loi, nil return loi, nil
} }
// GetObjectNInfo - returns object info and locked object ReadCloser
func (s *siaObjects) GetObjectNInfo(ctx context.Context, bucket, object string, rs *minio.HTTPRangeSpec, h http.Header) (gr *minio.GetObjectReader, err error) {
var objInfo minio.ObjectInfo
objInfo, err = s.GetObjectInfo(ctx, bucket, object, minio.ObjectOptions{})
if err != nil {
return nil, err
}
var startOffset, length int64
startOffset, length, err = rs.GetOffsetLength(objInfo.Size)
if err != nil {
return nil, err
}
pr, pw := io.Pipe()
go func() {
err := s.GetObject(ctx, bucket, object, startOffset, length, pw, objInfo.ETag, minio.ObjectOptions{})
pw.CloseWithError(err)
}()
return minio.NewGetObjectReaderFromReader(pr, objInfo), nil
}
func (s *siaObjects) GetObject(ctx context.Context, bucket string, object string, startOffset int64, length int64, writer io.Writer, etag string, opts minio.ObjectOptions) error { func (s *siaObjects) GetObject(ctx context.Context, bucket string, object string, startOffset int64, length int64, writer io.Writer, etag string, opts minio.ObjectOptions) error {
dstFile := path.Join(s.TempDir, minio.MustGetUUID()) dstFile := path.Join(s.TempDir, minio.MustGetUUID())
defer os.Remove(dstFile) defer os.Remove(dstFile)

View File

@ -17,8 +17,8 @@
package cmd package cmd
import ( import (
"errors"
"fmt" "fmt"
"regexp"
"strconv" "strconv"
"strings" "strings"
) )
@ -27,27 +27,82 @@ const (
byteRangePrefix = "bytes=" byteRangePrefix = "bytes="
) )
// Valid byte position regexp // HTTPRangeSpec represents a range specification as supported by S3 GET
var validBytePos = regexp.MustCompile(`^[0-9]+$`) // object request.
//
// Case 1: Not present -> represented by a nil RangeSpec
// Case 2: bytes=1-10 (absolute start and end offsets) -> RangeSpec{false, 1, 10}
// Case 3: bytes=10- (absolute start offset with end offset unspecified) -> RangeSpec{false, 10, -1}
// Case 4: bytes=-30 (suffix length specification) -> RangeSpec{true, -30, -1}
type HTTPRangeSpec struct {
// Does the range spec refer to a suffix of the object?
IsSuffixLength bool
// HttpRange specifies the byte range to be sent to the client. // Start and end offset specified in range spec
type httpRange struct { Start, End int64
offsetBegin int64
offsetEnd int64
resourceSize int64
} }
// String populate range stringer interface // GetLength - get length of range
func (hrange httpRange) String() string { func (h *HTTPRangeSpec) GetLength(resourceSize int64) (rangeLength int64, err error) {
return fmt.Sprintf("bytes %d-%d/%d", hrange.offsetBegin, hrange.offsetEnd, hrange.resourceSize) switch {
case resourceSize < 0:
return 0, errors.New("Resource size cannot be negative")
case h == nil:
rangeLength = resourceSize
case h.IsSuffixLength:
specifiedLen := -h.Start
rangeLength = specifiedLen
if specifiedLen > resourceSize {
rangeLength = resourceSize
}
case h.Start >= resourceSize:
return 0, errInvalidRange
case h.End > -1:
end := h.End
if resourceSize <= end {
end = resourceSize - 1
}
rangeLength = end - h.Start + 1
case h.End == -1:
rangeLength = resourceSize - h.Start
default:
return 0, errors.New("Unexpected range specification case")
}
return rangeLength, nil
} }
// getlength - get length from the range. // GetOffsetLength computes the start offset and length of the range
func (hrange httpRange) getLength() int64 { // given the size of the resource
return 1 + hrange.offsetEnd - hrange.offsetBegin func (h *HTTPRangeSpec) GetOffsetLength(resourceSize int64) (start, length int64, err error) {
if h == nil {
// No range specified, implies whole object.
return 0, resourceSize, nil
}
length, err = h.GetLength(resourceSize)
if err != nil {
return 0, 0, err
}
start = h.Start
if h.IsSuffixLength {
start = resourceSize + h.Start
if start < 0 {
start = 0
}
}
return start, length, nil
} }
func parseRequestRange(rangeString string, resourceSize int64) (hrange *httpRange, err error) { // Parse a HTTP range header value into a HTTPRangeSpec
func parseRequestRangeSpec(rangeString string) (hrange *HTTPRangeSpec, err error) {
// Return error if given range string doesn't start with byte range prefix. // Return error if given range string doesn't start with byte range prefix.
if !strings.HasPrefix(rangeString, byteRangePrefix) { if !strings.HasPrefix(rangeString, byteRangePrefix) {
return nil, fmt.Errorf("'%s' does not start with '%s'", rangeString, byteRangePrefix) return nil, fmt.Errorf("'%s' does not start with '%s'", rangeString, byteRangePrefix)
@ -66,12 +121,12 @@ func parseRequestRange(rangeString string, resourceSize int64) (hrange *httpRang
offsetBegin := int64(-1) offsetBegin := int64(-1)
// Convert offsetBeginString only if its not empty. // Convert offsetBeginString only if its not empty.
if len(offsetBeginString) > 0 { if len(offsetBeginString) > 0 {
if !validBytePos.MatchString(offsetBeginString) { if offsetBeginString[0] == '+' {
return nil, fmt.Errorf("'%s' does not have a valid first byte position value", rangeString) return nil, fmt.Errorf("Byte position ('%s') must not have a sign", offsetBeginString)
} } else if offsetBegin, err = strconv.ParseInt(offsetBeginString, 10, 64); err != nil {
if offsetBegin, err = strconv.ParseInt(offsetBeginString, 10, 64); err != nil {
return nil, fmt.Errorf("'%s' does not have a valid first byte position value", rangeString) return nil, fmt.Errorf("'%s' does not have a valid first byte position value", rangeString)
} else if offsetBegin < 0 {
return nil, fmt.Errorf("First byte position is negative ('%d')", offsetBegin)
} }
} }
@ -79,57 +134,30 @@ func parseRequestRange(rangeString string, resourceSize int64) (hrange *httpRang
offsetEnd := int64(-1) offsetEnd := int64(-1)
// Convert offsetEndString only if its not empty. // Convert offsetEndString only if its not empty.
if len(offsetEndString) > 0 { if len(offsetEndString) > 0 {
if !validBytePos.MatchString(offsetEndString) { if offsetEndString[0] == '+' {
return nil, fmt.Errorf("'%s' does not have a valid last byte position value", rangeString) return nil, fmt.Errorf("Byte position ('%s') must not have a sign", offsetEndString)
} } else if offsetEnd, err = strconv.ParseInt(offsetEndString, 10, 64); err != nil {
if offsetEnd, err = strconv.ParseInt(offsetEndString, 10, 64); err != nil {
return nil, fmt.Errorf("'%s' does not have a valid last byte position value", rangeString) return nil, fmt.Errorf("'%s' does not have a valid last byte position value", rangeString)
} else if offsetEnd < 0 {
return nil, fmt.Errorf("Last byte position is negative ('%d')", offsetEnd)
} }
} }
// rangeString contains first and last byte positions. eg. "bytes=2-5"
switch { switch {
case offsetBegin > -1 && offsetEnd > -1: case offsetBegin > -1 && offsetEnd > -1:
if offsetBegin > offsetEnd { if offsetBegin > offsetEnd {
// Last byte position is not greater than first byte position. eg. "bytes=5-2"
return nil, fmt.Errorf("'%s' does not have valid range value", rangeString)
}
// First and last byte positions should not be >= resourceSize.
if offsetBegin >= resourceSize {
return nil, errInvalidRange return nil, errInvalidRange
} }
return &HTTPRangeSpec{false, offsetBegin, offsetEnd}, nil
if offsetEnd >= resourceSize {
offsetEnd = resourceSize - 1
}
case offsetBegin > -1: case offsetBegin > -1:
// rangeString contains only first byte position. eg. "bytes=8-" return &HTTPRangeSpec{false, offsetBegin, -1}, nil
if offsetBegin >= resourceSize {
// First byte position should not be >= resourceSize.
return nil, errInvalidRange
}
offsetEnd = resourceSize - 1
case offsetEnd > -1: case offsetEnd > -1:
// rangeString contains only last byte position. eg. "bytes=-3"
if offsetEnd == 0 { if offsetEnd == 0 {
// Last byte position should not be zero eg. "bytes=-0"
return nil, errInvalidRange return nil, errInvalidRange
} }
return &HTTPRangeSpec{true, -offsetEnd, -1}, nil
if offsetEnd >= resourceSize {
offsetBegin = 0
} else {
offsetBegin = resourceSize - offsetEnd
}
offsetEnd = resourceSize - 1
default: default:
// rangeString contains first and last byte positions missing. eg. "bytes=-" // rangeString contains first and last byte positions missing. eg. "bytes=-"
return nil, fmt.Errorf("'%s' does not have valid range value", rangeString) return nil, fmt.Errorf("'%s' does not have valid range value", rangeString)
} }
return &httpRange{offsetBegin, offsetEnd, resourceSize}, nil
} }

View File

@ -16,75 +16,87 @@
package cmd package cmd
import "testing" import (
"testing"
)
// Test parseRequestRange() func TestHTTPRequestRangeSpec(t *testing.T) {
func TestParseRequestRange(t *testing.T) { resourceSize := int64(10)
// Test success cases. validRangeSpecs := []struct {
successCases := []struct { spec string
rangeString string expOffset, expLength int64
offsetBegin int64
offsetEnd int64
length int64
}{ }{
{"bytes=2-5", 2, 5, 4}, {"bytes=0-", 0, 10},
{"bytes=2-20", 2, 9, 8}, {"bytes=1-", 1, 9},
{"bytes=2-2", 2, 2, 1}, {"bytes=0-9", 0, 10},
{"bytes=0000-0006", 0, 6, 7}, {"bytes=1-10", 1, 9},
{"bytes=2-", 2, 9, 8}, {"bytes=1-1", 1, 1},
{"bytes=-4", 6, 9, 4}, {"bytes=2-5", 2, 4},
{"bytes=-20", 0, 9, 10}, {"bytes=-5", 5, 5},
{"bytes=-1", 9, 1},
{"bytes=-1000", 0, 10},
} }
for i, testCase := range validRangeSpecs {
for _, successCase := range successCases { rs, err := parseRequestRangeSpec(testCase.spec)
hrange, err := parseRequestRange(successCase.rangeString, 10)
if err != nil { if err != nil {
t.Fatalf("expected: <nil>, got: %s", err) t.Errorf("unexpected err: %v", err)
} }
o, l, err := rs.GetOffsetLength(resourceSize)
if hrange.offsetBegin != successCase.offsetBegin { if err != nil {
t.Fatalf("expected: %d, got: %d", successCase.offsetBegin, hrange.offsetBegin) t.Errorf("unexpected err: %v", err)
} }
if o != testCase.expOffset || l != testCase.expLength {
if hrange.offsetEnd != successCase.offsetEnd { t.Errorf("Case %d: got bad offset/length: %d,%d expected: %d,%d",
t.Fatalf("expected: %d, got: %d", successCase.offsetEnd, hrange.offsetEnd) i, o, l, testCase.expOffset, testCase.expLength)
}
if hrange.getLength() != successCase.length {
t.Fatalf("expected: %d, got: %d", successCase.length, hrange.getLength())
} }
} }
// Test invalid range strings. unparsableRangeSpecs := []string{
invalidRangeStrings := []string{
"bytes=8",
"bytes=5-2",
"bytes=+2-5",
"bytes=2-+5",
"bytes=2--5",
"bytes=-", "bytes=-",
"bytes==",
"bytes==1-10",
"bytes=",
"bytes=aa",
"aa",
"", "",
"2-5", "bytes=1-10-",
"bytes = 2-5", "bytes=1--10",
"bytes=2 - 5", "bytes=-1-10",
"bytes=0-0,-1", "bytes=0-+3",
"bytes=2-5 ", "bytes=+3-+5",
"bytes=10-11,12-10", // Unsupported by S3/Minio (valid in RFC)
} }
for _, rangeString := range invalidRangeStrings { for i, urs := range unparsableRangeSpecs {
if _, err := parseRequestRange(rangeString, 10); err == nil { rs, err := parseRequestRangeSpec(urs)
t.Fatalf("expected: an error, got: <nil>") if err == nil {
t.Errorf("Case %d: Did not get an expected error - got %v", i, rs)
}
if err == errInvalidRange {
t.Errorf("Case %d: Got invalid range error instead of a parse error", i)
}
if rs != nil {
t.Errorf("Case %d: Got non-nil rs though err != nil: %v", i, rs)
} }
} }
// Test error range strings. invalidRangeSpecs := []string{
errorRangeString := []string{ "bytes=5-3",
"bytes=10-10", "bytes=10-10",
"bytes=20-30", "bytes=10-",
"bytes=20-", "bytes=100-",
"bytes=-0", "bytes=-0",
} }
for _, rangeString := range errorRangeString { for i, irs := range invalidRangeSpecs {
if _, err := parseRequestRange(rangeString, 10); err != errInvalidRange { var err1, err2 error
t.Fatalf("expected: %s, got: %s", errInvalidRange, err) var rs *HTTPRangeSpec
var o, l int64
rs, err1 = parseRequestRangeSpec(irs)
if err1 == nil {
o, l, err2 = rs.GetOffsetLength(resourceSize)
} }
if err1 == errInvalidRange || (err1 == nil && err2 == errInvalidRange) {
continue
}
t.Errorf("Case %d: Expected errInvalidRange but: %v %v %d %d %v", i, rs, err1, o, l, err2)
} }
} }

View File

@ -19,6 +19,7 @@ package cmd
import ( import (
"context" "context"
"io" "io"
"net/http"
"github.com/minio/minio-go/pkg/encrypt" "github.com/minio/minio-go/pkg/encrypt"
"github.com/minio/minio/pkg/hash" "github.com/minio/minio/pkg/hash"
@ -47,6 +48,13 @@ type ObjectLayer interface {
// Object operations. // Object operations.
// GetObjectNInfo returns a GetObjectReader that satisfies the
// ReadCloser interface. The Close method unlocks the object
// after reading, so it must always be called after usage.
//
// IMPORTANTLY, when implementations return err != nil, this
// function MUST NOT return a non-nil ReadCloser.
GetObjectNInfo(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, h http.Header) (reader *GetObjectReader, err error)
GetObject(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, etag string, opts ObjectOptions) (err error) GetObject(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, etag string, opts ObjectOptions) (err error)
GetObjectInfo(ctx context.Context, bucket, object string, opts ObjectOptions) (objInfo ObjectInfo, err error) GetObjectInfo(ctx context.Context, bucket, object string, opts ObjectOptions) (objInfo ObjectInfo, err error)
PutObject(ctx context.Context, bucket, object string, data *hash.Reader, metadata map[string]string, opts ObjectOptions) (objInfo ObjectInfo, err error) PutObject(ctx context.Context, bucket, object string, data *hash.Reader, metadata map[string]string, opts ObjectOptions) (objInfo ObjectInfo, err error)

View File

@ -20,15 +20,20 @@ import (
"context" "context"
"encoding/hex" "encoding/hex"
"fmt" "fmt"
"io"
"math/rand" "math/rand"
"net/http"
"path" "path"
"runtime" "runtime"
"strings" "strings"
"sync"
"time" "time"
"unicode/utf8" "unicode/utf8"
"github.com/minio/minio/cmd/crypto"
"github.com/minio/minio/cmd/logger" "github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/dns" "github.com/minio/minio/pkg/dns"
"github.com/minio/minio/pkg/ioutil"
"github.com/skyrings/skyring-common/tools/uuid" "github.com/skyrings/skyring-common/tools/uuid"
) )
@ -302,3 +307,141 @@ type byBucketName []BucketInfo
func (d byBucketName) Len() int { return len(d) } func (d byBucketName) Len() int { return len(d) }
func (d byBucketName) Swap(i, j int) { d[i], d[j] = d[j], d[i] } func (d byBucketName) Swap(i, j int) { d[i], d[j] = d[j], d[i] }
func (d byBucketName) Less(i, j int) bool { return d[i].Name < d[j].Name } func (d byBucketName) Less(i, j int) bool { return d[i].Name < d[j].Name }
// GetObjectReader is a type that wraps a reader with a lock to
// provide a ReadCloser interface that unlocks on Close()
type GetObjectReader struct {
ObjInfo ObjectInfo
pReader io.Reader
cleanUpFns []func()
once sync.Once
}
// NewGetObjectReaderFromReader sets up a GetObjectReader with a given
// reader. This ignores any object properties.
func NewGetObjectReaderFromReader(r io.Reader, oi ObjectInfo, cleanupFns ...func()) *GetObjectReader {
return &GetObjectReader{
ObjInfo: oi,
pReader: r,
cleanUpFns: cleanupFns,
}
}
// ObjReaderFn is a function type that takes a reader and returns
// GetObjectReader and an error. Request headers are passed to provide
// encryption parameters. cleanupFns allow cleanup funcs to be
// registered for calling after usage of the reader.
type ObjReaderFn func(inputReader io.Reader, h http.Header, cleanupFns ...func()) (r *GetObjectReader, err error)
// NewGetObjectReader creates a new GetObjectReader. The cleanUpFns
// are called on Close() in reverse order as passed here. NOTE: It is
// assumed that clean up functions do not panic (otherwise, they may
// not all run!).
func NewGetObjectReader(rs *HTTPRangeSpec, oi ObjectInfo, cleanUpFns ...func()) (
fn ObjReaderFn, off, length int64, err error) {
// Call the clean-up functions immediately in case of exit
// with error
defer func() {
if err != nil {
for i := len(cleanUpFns) - 1; i >= 0; i-- {
cleanUpFns[i]()
}
}
}()
isEncrypted := crypto.IsEncrypted(oi.UserDefined)
var skipLen int64
// Calculate range to read (different for
// e.g. encrypted/compressed objects)
switch {
case isEncrypted:
var seqNumber uint32
var partStart int
off, length, skipLen, seqNumber, partStart, err = oi.GetDecryptedRange(rs)
if err != nil {
return nil, 0, 0, err
}
var decSize int64
decSize, err = oi.DecryptedSize()
if err != nil {
return nil, 0, 0, err
}
var decRangeLength int64
decRangeLength, err = rs.GetLength(decSize)
if err != nil {
return nil, 0, 0, err
}
// We define a closure that performs decryption given
// a reader that returns the desired range of
// encrypted bytes. The header parameter is used to
// provide encryption parameters.
fn = func(inputReader io.Reader, h http.Header, cFns ...func()) (r *GetObjectReader, err error) {
cFns = append(cleanUpFns, cFns...)
// Attach decrypter on inputReader
var decReader io.Reader
decReader, err = DecryptBlocksRequestR(inputReader, h,
off, length, seqNumber, partStart, oi, false)
if err != nil {
// Call the cleanup funcs
for i := len(cFns) - 1; i >= 0; i-- {
cFns[i]()
}
return nil, err
}
// Apply the skipLen and limit on the
// decrypted stream
decReader = io.LimitReader(ioutil.NewSkipReader(decReader, skipLen), decRangeLength)
// Assemble the GetObjectReader
r = &GetObjectReader{
ObjInfo: oi,
pReader: decReader,
cleanUpFns: cFns,
}
return r, nil
}
default:
off, length, err = rs.GetOffsetLength(oi.Size)
if err != nil {
return nil, 0, 0, err
}
fn = func(inputReader io.Reader, _ http.Header, cFns ...func()) (r *GetObjectReader, err error) {
r = &GetObjectReader{
ObjInfo: oi,
pReader: inputReader,
cleanUpFns: append(cleanUpFns, cFns...),
}
return r, nil
}
}
return fn, off, length, nil
}
// Close - calls the cleanup actions in reverse order
func (g *GetObjectReader) Close() error {
// sync.Once is used here to ensure that Close() is
// idempotent.
g.once.Do(func() {
for i := len(g.cleanUpFns) - 1; i >= 0; i-- {
g.cleanUpFns[i]()
}
})
return nil
}
// Read - to implement Reader interface.
func (g *GetObjectReader) Read(p []byte) (n int, err error) {
n, err = g.pReader.Read(p)
if err != nil {
// Calling code may not Close() in case of error, so
// we ensure it.
g.Close()
}
return
}

View File

@ -74,10 +74,6 @@ func setHeadGetRespHeaders(w http.ResponseWriter, reqParams url.Values) {
func (api objectAPIHandlers) SelectObjectContentHandler(w http.ResponseWriter, r *http.Request) { func (api objectAPIHandlers) SelectObjectContentHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "SelectObject") ctx := newContext(r, w, "SelectObject")
var object, bucket string
vars := mux.Vars(r)
bucket = vars["bucket"]
object = vars["object"]
// Fetch object stat info. // Fetch object stat info.
objectAPI := api.ObjectAPI() objectAPI := api.ObjectAPI()
@ -86,28 +82,39 @@ func (api objectAPIHandlers) SelectObjectContentHandler(w http.ResponseWriter, r
return return
} }
getObjectInfo := objectAPI.GetObjectInfo vars := mux.Vars(r)
if api.CacheAPI() != nil { bucket := vars["bucket"]
getObjectInfo = api.CacheAPI().GetObjectInfo object := vars["object"]
}
opts := ObjectOptions{} // Check for auth type to return S3 compatible error.
// type to return the correct error (NoSuchKey vs AccessDenied)
if s3Error := checkRequestAuthType(ctx, r, policy.GetObjectAction, bucket, object); s3Error != ErrNone { if s3Error := checkRequestAuthType(ctx, r, policy.GetObjectAction, bucket, object); s3Error != ErrNone {
if getRequestAuthType(r) == authTypeAnonymous { if getRequestAuthType(r) == authTypeAnonymous {
// As per "Permission" section in // As per "Permission" section in
// https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectGET.html If // https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectGET.html
// the object you request does not exist, the error Amazon S3 returns // If the object you request does not exist,
// depends on whether you also have the s3:ListBucket permission. * If you // the error Amazon S3 returns depends on
// have the s3:ListBucket permission on the bucket, Amazon S3 will return // whether you also have the s3:ListBucket
// an HTTP status code 404 ("no such key") error. * if you dont have the // permission.
// s3:ListBucket permission, Amazon S3 will return an HTTP status code 403 // * If you have the s3:ListBucket permission
// ("access denied") error.` // on the bucket, Amazon S3 will return an
// HTTP status code 404 ("no such key")
// error.
// * if you dont have the s3:ListBucket
// permission, Amazon S3 will return an HTTP
// status code 403 ("access denied") error.`
if globalPolicySys.IsAllowed(policy.Args{ if globalPolicySys.IsAllowed(policy.Args{
Action: policy.ListBucketAction, Action: policy.ListBucketAction,
BucketName: bucket, BucketName: bucket,
ConditionValues: getConditionValues(r, ""), ConditionValues: getConditionValues(r, ""),
IsOwner: false, IsOwner: false,
}) { }) {
_, err := getObjectInfo(ctx, bucket, object, opts) getObjectInfo := objectAPI.GetObjectInfo
if api.CacheAPI() != nil {
getObjectInfo = api.CacheAPI().GetObjectInfo
}
_, err := getObjectInfo(ctx, bucket, object, ObjectOptions{})
if toAPIErrorCode(err) == ErrNoSuchKey { if toAPIErrorCode(err) == ErrNoSuchKey {
s3Error = ErrNoSuchKey s3Error = ErrNoSuchKey
} }
@ -116,21 +123,7 @@ func (api objectAPIHandlers) SelectObjectContentHandler(w http.ResponseWriter, r
writeErrorResponse(w, s3Error, r.URL) writeErrorResponse(w, s3Error, r.URL)
return return
} }
if r.ContentLength <= 0 {
writeErrorResponse(w, ErrEmptyRequestBody, r.URL)
return
}
var selectReq ObjectSelectRequest
if err := xmlDecoder(r.Body, &selectReq, r.ContentLength); err != nil {
writeErrorResponse(w, ErrMalformedXML, r.URL)
return
}
objInfo, err := getObjectInfo(ctx, bucket, object, opts)
if err != nil {
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
return
}
// Get request range. // Get request range.
rangeHeader := r.Header.Get("Range") rangeHeader := r.Header.Get("Range")
if rangeHeader != "" { if rangeHeader != "" {
@ -138,6 +131,40 @@ func (api objectAPIHandlers) SelectObjectContentHandler(w http.ResponseWriter, r
return return
} }
if r.ContentLength <= 0 {
writeErrorResponse(w, ErrEmptyRequestBody, r.URL)
return
}
var selectReq ObjectSelectRequest
if err := xmlDecoder(r.Body, &selectReq, r.ContentLength); err != nil {
writeErrorResponse(w, ErrMalformedXML, r.URL)
return
}
if !strings.EqualFold(string(selectReq.ExpressionType), "SQL") {
writeErrorResponse(w, ErrInvalidExpressionType, r.URL)
return
}
if len(selectReq.Expression) >= s3select.MaxExpressionLength {
writeErrorResponse(w, ErrExpressionTooLong, r.URL)
return
}
getObjectNInfo := objectAPI.GetObjectNInfo
if api.CacheAPI() != nil {
getObjectNInfo = api.CacheAPI().GetObjectNInfo
}
gr, err := getObjectNInfo(ctx, bucket, object, nil, r.Header)
if err != nil {
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
return
}
defer gr.Close()
objInfo := gr.ObjInfo
if selectReq.InputSerialization.CompressionType == SelectCompressionGZIP { if selectReq.InputSerialization.CompressionType == SelectCompressionGZIP {
if !strings.Contains(objInfo.ContentType, "gzip") { if !strings.Contains(objInfo.ContentType, "gzip") {
writeErrorResponse(w, ErrInvalidDataSource, r.URL) writeErrorResponse(w, ErrInvalidDataSource, r.URL)
@ -188,40 +215,16 @@ func (api objectAPIHandlers) SelectObjectContentHandler(w http.ResponseWriter, r
return return
} }
getObject := objectAPI.GetObject // Set encryption response headers
if api.CacheAPI() != nil && !crypto.IsEncrypted(objInfo.UserDefined) {
getObject = api.CacheAPI().GetObject
}
reader, pipewriter := io.Pipe()
// Get the object.
var startOffset int64
length := objInfo.Size
var writer io.Writer
writer = pipewriter
if objectAPI.IsEncryptionSupported() { if objectAPI.IsEncryptionSupported() {
if crypto.IsEncrypted(objInfo.UserDefined) { switch {
// Response writer should be limited early on for decryption upto required length, case crypto.S3.IsEncrypted(objInfo.UserDefined):
// additionally also skipping mod(offset)64KiB boundaries. w.Header().Set(crypto.SSEHeader, crypto.SSEAlgorithmAES256)
writer = ioutil.LimitedWriter(writer, startOffset%(64*1024), length) case crypto.IsEncrypted(objInfo.UserDefined):
w.Header().Set(crypto.SSECAlgorithm, r.Header.Get(crypto.SSECAlgorithm))
writer, startOffset, length, err = DecryptBlocksRequest(writer, r, bucket, w.Header().Set(crypto.SSECKeyMD5, r.Header.Get(crypto.SSECKeyMD5))
object, startOffset, length, objInfo, false)
if err != nil {
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
return
}
} }
} }
go func() {
defer reader.Close()
if gerr := getObject(ctx, bucket, object, 0, objInfo.Size, writer, objInfo.ETag, opts); gerr != nil {
pipewriter.CloseWithError(gerr)
return
}
pipewriter.Close() // Close writer explicitly signaling we wrote all data.
}()
//s3select //Options //s3select //Options
if selectReq.OutputSerialization.CSV.FieldDelimiter == "" { if selectReq.OutputSerialization.CSV.FieldDelimiter == "" {
@ -240,7 +243,7 @@ func (api objectAPIHandlers) SelectObjectContentHandler(w http.ResponseWriter, r
FieldDelimiter: selectReq.InputSerialization.CSV.FieldDelimiter, FieldDelimiter: selectReq.InputSerialization.CSV.FieldDelimiter,
Comments: selectReq.InputSerialization.CSV.Comments, Comments: selectReq.InputSerialization.CSV.Comments,
Name: "S3Object", // Default table name for all objects Name: "S3Object", // Default table name for all objects
ReadFrom: reader, ReadFrom: gr,
Compressed: string(selectReq.InputSerialization.CompressionType), Compressed: string(selectReq.InputSerialization.CompressionType),
Expression: selectReq.Expression, Expression: selectReq.Expression,
OutputFieldDelimiter: selectReq.OutputSerialization.CSV.FieldDelimiter, OutputFieldDelimiter: selectReq.OutputSerialization.CSV.FieldDelimiter,
@ -284,26 +287,36 @@ func (api objectAPIHandlers) GetObjectHandler(w http.ResponseWriter, r *http.Req
vars := mux.Vars(r) vars := mux.Vars(r)
bucket := vars["bucket"] bucket := vars["bucket"]
object := vars["object"] object := vars["object"]
opts := ObjectOptions{}
getObjectInfo := objectAPI.GetObjectInfo
if api.CacheAPI() != nil {
getObjectInfo = api.CacheAPI().GetObjectInfo
}
// Check for auth type to return S3 compatible error.
// type to return the correct error (NoSuchKey vs AccessDenied)
if s3Error := checkRequestAuthType(ctx, r, policy.GetObjectAction, bucket, object); s3Error != ErrNone { if s3Error := checkRequestAuthType(ctx, r, policy.GetObjectAction, bucket, object); s3Error != ErrNone {
if getRequestAuthType(r) == authTypeAnonymous { if getRequestAuthType(r) == authTypeAnonymous {
// As per "Permission" section in https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectGET.html // As per "Permission" section in
// If the object you request does not exist, the error Amazon S3 returns depends on whether you also have the s3:ListBucket permission. // https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectGET.html
// * If you have the s3:ListBucket permission on the bucket, Amazon S3 will return an HTTP status code 404 ("no such key") error. // If the object you request does not exist,
// * if you dont have the s3:ListBucket permission, Amazon S3 will return an HTTP status code 403 ("access denied") error.` // the error Amazon S3 returns depends on
// whether you also have the s3:ListBucket
// permission.
// * If you have the s3:ListBucket permission
// on the bucket, Amazon S3 will return an
// HTTP status code 404 ("no such key")
// error.
// * if you dont have the s3:ListBucket
// permission, Amazon S3 will return an HTTP
// status code 403 ("access denied") error.`
if globalPolicySys.IsAllowed(policy.Args{ if globalPolicySys.IsAllowed(policy.Args{
Action: policy.ListBucketAction, Action: policy.ListBucketAction,
BucketName: bucket, BucketName: bucket,
ConditionValues: getConditionValues(r, ""), ConditionValues: getConditionValues(r, ""),
IsOwner: false, IsOwner: false,
}) { }) {
_, err := getObjectInfo(ctx, bucket, object, opts) getObjectInfo := objectAPI.GetObjectInfo
if api.CacheAPI() != nil {
getObjectInfo = api.CacheAPI().GetObjectInfo
}
_, err := getObjectInfo(ctx, bucket, object, ObjectOptions{})
if toAPIErrorCode(err) == ErrNoSuchKey { if toAPIErrorCode(err) == ErrNoSuchKey {
s3Error = ErrNoSuchKey s3Error = ErrNoSuchKey
} }
@ -313,26 +326,20 @@ func (api objectAPIHandlers) GetObjectHandler(w http.ResponseWriter, r *http.Req
return return
} }
objInfo, err := getObjectInfo(ctx, bucket, object, opts) getObjectNInfo := objectAPI.GetObjectNInfo
if err != nil { if api.CacheAPI() != nil {
writeErrorResponse(w, toAPIErrorCode(err), r.URL) getObjectNInfo = api.CacheAPI().GetObjectNInfo
return
}
if objectAPI.IsEncryptionSupported() {
if _, err = DecryptObjectInfo(&objInfo, r.Header); err != nil {
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
return
}
} }
// Get request range. // Get request range.
var hrange *httpRange var rs *HTTPRangeSpec
rangeHeader := r.Header.Get("Range") rangeHeader := r.Header.Get("Range")
if rangeHeader != "" { if rangeHeader != "" {
if hrange, err = parseRequestRange(rangeHeader, objInfo.Size); err != nil { var err error
// Handle only errInvalidRange if rs, err = parseRequestRangeSpec(rangeHeader); err != nil {
// Ignore other parse error and treat it as regular Get request like Amazon S3. // Handle only errInvalidRange. Ignore other
// parse error and treat it as regular Get
// request like Amazon S3.
if err == errInvalidRange { if err == errInvalidRange {
writeErrorResponse(w, ErrInvalidRange, r.URL) writeErrorResponse(w, ErrInvalidRange, r.URL)
return return
@ -343,60 +350,53 @@ func (api objectAPIHandlers) GetObjectHandler(w http.ResponseWriter, r *http.Req
} }
} }
gr, err := getObjectNInfo(ctx, bucket, object, rs, r.Header)
if err != nil {
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
return
}
defer gr.Close()
objInfo := gr.ObjInfo
if objectAPI.IsEncryptionSupported() {
if _, err = DecryptObjectInfo(objInfo, r.Header); err != nil {
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
return
}
}
// Validate pre-conditions if any. // Validate pre-conditions if any.
if checkPreconditions(w, r, objInfo) { if checkPreconditions(w, r, objInfo) {
return return
} }
// Get the object. // Set encryption response headers
var startOffset int64
length := objInfo.Size
if hrange != nil {
startOffset = hrange.offsetBegin
length = hrange.getLength()
}
var writer io.Writer
writer = w
if objectAPI.IsEncryptionSupported() { if objectAPI.IsEncryptionSupported() {
s3Encrypted := crypto.S3.IsEncrypted(objInfo.UserDefined) switch {
if crypto.IsEncrypted(objInfo.UserDefined) { case crypto.S3.IsEncrypted(objInfo.UserDefined):
// Response writer should be limited early on for decryption upto required length, w.Header().Set(crypto.SSEHeader, crypto.SSEAlgorithmAES256)
// additionally also skipping mod(offset)64KiB boundaries. case crypto.IsEncrypted(objInfo.UserDefined):
writer = ioutil.LimitedWriter(writer, startOffset%(64*1024), length) w.Header().Set(crypto.SSECAlgorithm, r.Header.Get(crypto.SSECAlgorithm))
w.Header().Set(crypto.SSECKeyMD5, r.Header.Get(crypto.SSECKeyMD5))
writer, startOffset, length, err = DecryptBlocksRequest(writer, r, bucket, object, startOffset, length, objInfo, false)
if err != nil {
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
return
}
if s3Encrypted {
w.Header().Set(crypto.SSEHeader, crypto.SSEAlgorithmAES256)
} else {
w.Header().Set(crypto.SSECAlgorithm, r.Header.Get(crypto.SSECAlgorithm))
w.Header().Set(crypto.SSECKeyMD5, r.Header.Get(crypto.SSECKeyMD5))
}
} }
} }
setObjectHeaders(w, objInfo, hrange) if hErr := setObjectHeaders(w, objInfo, rs); hErr != nil {
setHeadGetRespHeaders(w, r.URL.Query()) writeErrorResponse(w, toAPIErrorCode(hErr), r.URL)
return
getObject := objectAPI.GetObject
if api.CacheAPI() != nil && !crypto.IsEncrypted(objInfo.UserDefined) {
getObject = api.CacheAPI().GetObject
} }
statusCodeWritten := false setHeadGetRespHeaders(w, r.URL.Query())
httpWriter := ioutil.WriteOnClose(writer)
if hrange != nil && hrange.offsetBegin > -1 { statusCodeWritten := false
httpWriter := ioutil.WriteOnClose(w)
if rs != nil {
statusCodeWritten = true statusCodeWritten = true
w.WriteHeader(http.StatusPartialContent) w.WriteHeader(http.StatusPartialContent)
} }
// Reads the object at startOffset and writes to mw. // Write object content to response body
if err = getObject(ctx, bucket, object, startOffset, length, httpWriter, objInfo.ETag, opts); err != nil { if _, err = io.Copy(httpWriter, gr); err != nil {
if !httpWriter.HasWritten() && !statusCodeWritten { // write error response only if no data or headers has been written to client yet if !httpWriter.HasWritten() && !statusCodeWritten { // write error response only if no data or headers has been written to client yet
writeErrorResponse(w, toAPIErrorCode(err), r.URL) writeErrorResponse(w, toAPIErrorCode(err), r.URL)
} }
@ -450,24 +450,38 @@ func (api objectAPIHandlers) HeadObjectHandler(w http.ResponseWriter, r *http.Re
bucket := vars["bucket"] bucket := vars["bucket"]
object := vars["object"] object := vars["object"]
getObjectInfo := objectAPI.GetObjectInfo getObjectNInfo := objectAPI.GetObjectNInfo
if api.CacheAPI() != nil { if api.CacheAPI() != nil {
getObjectInfo = api.CacheAPI().GetObjectInfo getObjectNInfo = api.CacheAPI().GetObjectNInfo
} }
opts := ObjectOptions{} opts := ObjectOptions{}
if s3Error := checkRequestAuthType(ctx, r, policy.GetObjectAction, bucket, object); s3Error != ErrNone { if s3Error := checkRequestAuthType(ctx, r, policy.GetObjectAction, bucket, object); s3Error != ErrNone {
if getRequestAuthType(r) == authTypeAnonymous { if getRequestAuthType(r) == authTypeAnonymous {
// As per "Permission" section in https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectHEAD.html // As per "Permission" section in
// If the object you request does not exist, the error Amazon S3 returns depends on whether you also have the s3:ListBucket permission. // https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectHEAD.html
// * If you have the s3:ListBucket permission on the bucket, Amazon S3 will return an HTTP status code 404 ("no such key") error. // If the object you request does not exist,
// * if you dont have the s3:ListBucket permission, Amazon S3 will return an HTTP status code 403 ("access denied") error.` // the error Amazon S3 returns depends on
// whether you also have the s3:ListBucket
// permission.
// * If you have the s3:ListBucket permission
// on the bucket, Amazon S3 will return an
// HTTP status code 404 ("no such key")
// error.
// * if you dont have the s3:ListBucket
// permission, Amazon S3 will return an HTTP
// status code 403 ("access denied") error.`
if globalPolicySys.IsAllowed(policy.Args{ if globalPolicySys.IsAllowed(policy.Args{
Action: policy.ListBucketAction, Action: policy.ListBucketAction,
BucketName: bucket, BucketName: bucket,
ConditionValues: getConditionValues(r, ""), ConditionValues: getConditionValues(r, ""),
IsOwner: false, IsOwner: false,
}) { }) {
getObjectInfo := objectAPI.GetObjectInfo
if api.CacheAPI() != nil {
getObjectInfo = api.CacheAPI().GetObjectInfo
}
_, err := getObjectInfo(ctx, bucket, object, opts) _, err := getObjectInfo(ctx, bucket, object, opts)
if toAPIErrorCode(err) == ErrNoSuchKey { if toAPIErrorCode(err) == ErrNoSuchKey {
s3Error = ErrNoSuchKey s3Error = ErrNoSuchKey
@ -478,22 +492,21 @@ func (api objectAPIHandlers) HeadObjectHandler(w http.ResponseWriter, r *http.Re
return return
} }
objInfo, err := getObjectInfo(ctx, bucket, object, opts) gr, err := getObjectNInfo(ctx, bucket, object, nil, r.Header)
if err != nil { if err != nil {
writeErrorResponseHeadersOnly(w, toAPIErrorCode(err)) writeErrorResponseHeadersOnly(w, toAPIErrorCode(err))
return return
} }
defer gr.Close()
objInfo := gr.ObjInfo
var encrypted bool var encrypted bool
if objectAPI.IsEncryptionSupported() { if objectAPI.IsEncryptionSupported() {
if encrypted, err = DecryptObjectInfo(&objInfo, r.Header); err != nil { if encrypted, err = DecryptObjectInfo(objInfo, r.Header); err != nil {
writeErrorResponse(w, toAPIErrorCode(err), r.URL) writeErrorResponse(w, toAPIErrorCode(err), r.URL)
return return
} else if encrypted { } else if encrypted {
s3Encrypted := crypto.S3.IsEncrypted(objInfo.UserDefined) s3Encrypted := crypto.S3.IsEncrypted(objInfo.UserDefined)
if _, err = DecryptRequest(w, r, bucket, object, objInfo.UserDefined); err != nil {
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
return
}
if s3Encrypted { if s3Encrypted {
w.Header().Set(crypto.SSEHeader, crypto.SSEAlgorithmAES256) w.Header().Set(crypto.SSEHeader, crypto.SSEAlgorithmAES256)
} else { } else {
@ -509,7 +522,10 @@ func (api objectAPIHandlers) HeadObjectHandler(w http.ResponseWriter, r *http.Re
} }
// Set standard object headers. // Set standard object headers.
setObjectHeaders(w, objInfo, nil) if hErr := setObjectHeaders(w, objInfo, nil); hErr != nil {
writeErrorResponse(w, toAPIErrorCode(hErr), r.URL)
return
}
// Set any additional requested response headers. // Set any additional requested response headers.
setHeadGetRespHeaders(w, r.URL.Query()) setHeadGetRespHeaders(w, r.URL.Query())
@ -689,7 +705,7 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re
// otherwise we proceed to encrypt/decrypt. // otherwise we proceed to encrypt/decrypt.
if sseCopyC && sseC && cpSrcDstSame { if sseCopyC && sseC && cpSrcDstSame {
// Get the old key which needs to be rotated. // Get the old key which needs to be rotated.
oldKey, err = ParseSSECopyCustomerRequest(r, srcInfo.UserDefined) oldKey, err = ParseSSECopyCustomerRequest(r.Header, srcInfo.UserDefined)
if err != nil { if err != nil {
pipeWriter.CloseWithError(err) pipeWriter.CloseWithError(err)
writeErrorResponse(w, toAPIErrorCode(err), r.URL) writeErrorResponse(w, toAPIErrorCode(err), r.URL)
@ -1244,17 +1260,13 @@ func (api objectAPIHandlers) CopyObjectPartHandler(w http.ResponseWriter, r *htt
} }
// Get request range. // Get request range.
var hrange *httpRange var startOffset, length int64
rangeHeader := r.Header.Get("x-amz-copy-source-range") rangeHeader := r.Header.Get("x-amz-copy-source-range")
if rangeHeader != "" { if startOffset, length, err = parseCopyPartRange(rangeHeader, srcInfo.Size); err != nil {
if hrange, err = parseCopyPartRange(rangeHeader, srcInfo.Size); err != nil { logger.GetReqInfo(ctx).AppendTags("rangeHeader", rangeHeader)
// Handle only errInvalidRange logger.LogIf(ctx, err)
// Ignore other parse error and treat it as regular Get request like Amazon S3. writeCopyPartErr(w, err, r.URL)
logger.GetReqInfo(ctx).AppendTags("rangeHeader", rangeHeader) return
logger.LogIf(ctx, err)
writeCopyPartErr(w, err, r.URL)
return
}
} }
// Verify before x-amz-copy-source preconditions before continuing with CopyObject. // Verify before x-amz-copy-source preconditions before continuing with CopyObject.
@ -1262,14 +1274,6 @@ func (api objectAPIHandlers) CopyObjectPartHandler(w http.ResponseWriter, r *htt
return return
} }
// Get the object.
var startOffset int64
length := srcInfo.Size
if hrange != nil {
length = hrange.getLength()
startOffset = hrange.offsetBegin
}
/// maximum copy size for multipart objects in a single operation /// maximum copy size for multipart objects in a single operation
if isMaxAllowedPartSize(length) { if isMaxAllowedPartSize(length) {
writeErrorResponse(w, ErrEntityTooLarge, r.URL) writeErrorResponse(w, ErrEntityTooLarge, r.URL)
@ -1310,6 +1314,10 @@ func (api objectAPIHandlers) CopyObjectPartHandler(w http.ResponseWriter, r *htt
} }
} }
if crypto.IsEncrypted(li.UserDefined) { if crypto.IsEncrypted(li.UserDefined) {
if !hasServerSideEncryptionHeader(r.Header) {
writeErrorResponse(w, ErrSSEMultipartEncrypted, r.URL)
return
}
var key []byte var key []byte
if crypto.SSEC.IsRequested(r.Header) { if crypto.SSEC.IsRequested(r.Header) {
key, err = ParseSSECustomerRequest(r) key, err = ParseSSECustomerRequest(r)
@ -1508,7 +1516,7 @@ func (api objectAPIHandlers) PutObjectPartHandler(w http.ResponseWriter, r *http
return return
} }
} }
sseS3 := false
if objectAPI.IsEncryptionSupported() { if objectAPI.IsEncryptionSupported() {
var li ListPartsInfo var li ListPartsInfo
li, err = objectAPI.ListObjectParts(ctx, bucket, object, uploadID, 0, 1) li, err = objectAPI.ListObjectParts(ctx, bucket, object, uploadID, 0, 1)
@ -1517,7 +1525,10 @@ func (api objectAPIHandlers) PutObjectPartHandler(w http.ResponseWriter, r *http
return return
} }
if crypto.IsEncrypted(li.UserDefined) { if crypto.IsEncrypted(li.UserDefined) {
sseS3 = crypto.S3.IsEncrypted(li.UserDefined) if !hasServerSideEncryptionHeader(r.Header) {
writeErrorResponse(w, ErrSSEMultipartEncrypted, r.URL)
return
}
var key []byte var key []byte
if crypto.SSEC.IsRequested(r.Header) { if crypto.SSEC.IsRequested(r.Header) {
key, err = ParseSSECustomerRequest(r) key, err = ParseSSECustomerRequest(r)
@ -1558,7 +1569,7 @@ func (api objectAPIHandlers) PutObjectPartHandler(w http.ResponseWriter, r *http
} }
putObjectPart := objectAPI.PutObjectPart putObjectPart := objectAPI.PutObjectPart
if api.CacheAPI() != nil && !crypto.SSEC.IsRequested(r.Header) && !sseS3 { if api.CacheAPI() != nil && !hasServerSideEncryptionHeader(r.Header) {
putObjectPart = api.CacheAPI().PutObjectPart putObjectPart = api.CacheAPI().PutObjectPart
} }
partInfo, err := putObjectPart(ctx, bucket, object, uploadID, partID, hashReader, opts) partInfo, err := putObjectPart(ctx, bucket, object, uploadID, partID, hashReader, opts)

View File

@ -19,9 +19,13 @@ package cmd
import ( import (
"bytes" "bytes"
"context" "context"
"crypto/md5"
"encoding/base64"
"encoding/xml" "encoding/xml"
"fmt" "fmt"
"io" "io"
"strings"
"io/ioutil" "io/ioutil"
"net/http" "net/http"
"net/http/httptest" "net/http/httptest"
@ -31,7 +35,9 @@ import (
"testing" "testing"
humanize "github.com/dustin/go-humanize" humanize "github.com/dustin/go-humanize"
"github.com/minio/minio/cmd/crypto"
"github.com/minio/minio/pkg/auth" "github.com/minio/minio/pkg/auth"
ioutilx "github.com/minio/minio/pkg/ioutil"
) )
// Type to capture different modifications to API request to simulate failure cases. // Type to capture different modifications to API request to simulate failure cases.
@ -129,7 +135,7 @@ func testAPIHeadObjectHandler(obj ObjectLayer, instanceType, bucketName string,
rec := httptest.NewRecorder() rec := httptest.NewRecorder()
// construct HTTP request for Get Object end point. // construct HTTP request for Get Object end point.
req, err := newTestSignedRequestV4("HEAD", getHeadObjectURL("", testCase.bucketName, testCase.objectName), req, err := newTestSignedRequestV4("HEAD", getHeadObjectURL("", testCase.bucketName, testCase.objectName),
0, nil, testCase.accessKey, testCase.secretKey) 0, nil, testCase.accessKey, testCase.secretKey, nil)
if err != nil { if err != nil {
t.Fatalf("Test %d: %s: Failed to create HTTP request for Head Object: <ERROR> %v", i+1, instanceType, err) t.Fatalf("Test %d: %s: Failed to create HTTP request for Head Object: <ERROR> %v", i+1, instanceType, err)
} }
@ -147,7 +153,7 @@ func testAPIHeadObjectHandler(obj ObjectLayer, instanceType, bucketName string,
recV2 := httptest.NewRecorder() recV2 := httptest.NewRecorder()
// construct HTTP request for Head Object endpoint. // construct HTTP request for Head Object endpoint.
reqV2, err := newTestSignedRequestV2("HEAD", getHeadObjectURL("", testCase.bucketName, testCase.objectName), reqV2, err := newTestSignedRequestV2("HEAD", getHeadObjectURL("", testCase.bucketName, testCase.objectName),
0, nil, testCase.accessKey, testCase.secretKey) 0, nil, testCase.accessKey, testCase.secretKey, nil)
if err != nil { if err != nil {
t.Fatalf("Test %d: %s: Failed to create HTTP request for Head Object: <ERROR> %v", i+1, instanceType, err) t.Fatalf("Test %d: %s: Failed to create HTTP request for Head Object: <ERROR> %v", i+1, instanceType, err)
@ -182,7 +188,7 @@ func testAPIHeadObjectHandler(obj ObjectLayer, instanceType, bucketName string,
nilBucket := "dummy-bucket" nilBucket := "dummy-bucket"
nilObject := "dummy-object" nilObject := "dummy-object"
nilReq, err := newTestSignedRequestV4("HEAD", getGetObjectURL("", nilBucket, nilObject), nilReq, err := newTestSignedRequestV4("HEAD", getGetObjectURL("", nilBucket, nilObject),
0, nil, "", "") 0, nil, "", "", nil)
if err != nil { if err != nil {
t.Errorf("Minio %s: Failed to create HTTP request for testing the response when object Layer is set to `nil`.", instanceType) t.Errorf("Minio %s: Failed to create HTTP request for testing the response when object Layer is set to `nil`.", instanceType)
@ -192,14 +198,139 @@ func testAPIHeadObjectHandler(obj ObjectLayer, instanceType, bucketName string,
ExecObjectLayerAPINilTest(t, nilBucket, nilObject, instanceType, apiRouter, nilReq) ExecObjectLayerAPINilTest(t, nilBucket, nilObject, instanceType, apiRouter, nilReq)
} }
func TestAPIHeadObjectHandlerWithEncryption(t *testing.T) {
globalPolicySys = NewPolicySys()
defer func() { globalPolicySys = nil }()
defer DetectTestLeak(t)()
ExecObjectLayerAPITest(t, testAPIHeadObjectHandlerWithEncryption, []string{"NewMultipart", "PutObjectPart", "CompleteMultipart", "GetObject", "PutObject", "HeadObject"})
}
func testAPIHeadObjectHandlerWithEncryption(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,
credentials auth.Credentials, t *testing.T) {
// Set SSL to on to do encryption tests
globalIsSSL = true
defer func() { globalIsSSL = false }()
var (
oneMiB int64 = 1024 * 1024
key32Bytes = generateBytesData(32 * humanize.Byte)
key32BytesMd5 = md5.Sum(key32Bytes)
metaWithSSEC = map[string]string{
crypto.SSECAlgorithm: crypto.SSEAlgorithmAES256,
crypto.SSECKey: base64.StdEncoding.EncodeToString(key32Bytes),
crypto.SSECKeyMD5: base64.StdEncoding.EncodeToString(key32BytesMd5[:]),
}
mapCopy = func(m map[string]string) map[string]string {
r := make(map[string]string, len(m))
for k, v := range m {
r[k] = v
}
return r
}
)
type ObjectInput struct {
objectName string
partLengths []int64
metaData map[string]string
}
objectLength := func(oi ObjectInput) (sum int64) {
for _, l := range oi.partLengths {
sum += l
}
return
}
// set of inputs for uploading the objects before tests for
// downloading is done. Data bytes are from DummyDataGen.
objectInputs := []ObjectInput{
// Unencrypted objects
{"nothing", []int64{0}, nil},
{"small-1", []int64{509}, nil},
{"mp-1", []int64{5 * oneMiB, 1}, nil},
{"mp-2", []int64{5487701, 5487799, 3}, nil},
// Encrypted object
{"enc-nothing", []int64{0}, mapCopy(metaWithSSEC)},
{"enc-small-1", []int64{509}, mapCopy(metaWithSSEC)},
{"enc-mp-1", []int64{5 * oneMiB, 1}, mapCopy(metaWithSSEC)},
{"enc-mp-2", []int64{5487701, 5487799, 3}, mapCopy(metaWithSSEC)},
}
// iterate through the above set of inputs and upload the object.
for _, input := range objectInputs {
uploadTestObject(t, apiRouter, credentials, bucketName, input.objectName, input.partLengths, input.metaData, false)
}
for i, input := range objectInputs {
// initialize HTTP NewRecorder, this records any
// mutations to response writer inside the handler.
rec := httptest.NewRecorder()
// construct HTTP request for HEAD object.
req, err := newTestSignedRequestV4("HEAD", getHeadObjectURL("", bucketName, input.objectName),
0, nil, credentials.AccessKey, credentials.SecretKey, nil)
if err != nil {
t.Fatalf("Test %d: %s: Failed to create HTTP request for Head Object: <ERROR> %v", i+1, instanceType, err)
}
// Since `apiRouter` satisfies `http.Handler` it has a
// ServeHTTP to execute the logic of the handler.
apiRouter.ServeHTTP(rec, req)
isEnc := false
expected := 200
if strings.HasPrefix(input.objectName, "enc-") {
isEnc = true
expected = 400
}
if rec.Code != expected {
t.Errorf("Test %d: expected code %d but got %d for object %s", i+1, expected, rec.Code, input.objectName)
}
contentLength := rec.Header().Get("Content-Length")
if isEnc {
// initialize HTTP NewRecorder, this records any
// mutations to response writer inside the handler.
rec := httptest.NewRecorder()
// construct HTTP request for HEAD object.
req, err := newTestSignedRequestV4("HEAD", getHeadObjectURL("", bucketName, input.objectName),
0, nil, credentials.AccessKey, credentials.SecretKey, input.metaData)
if err != nil {
t.Fatalf("Test %d: %s: Failed to create HTTP request for Head Object: <ERROR> %v", i+1, instanceType, err)
}
// Since `apiRouter` satisfies `http.Handler` it has a
// ServeHTTP to execute the logic of the handler.
apiRouter.ServeHTTP(rec, req)
if rec.Code != 200 {
t.Errorf("Test %d: Did not receive a 200 response: %d", i+1, rec.Code)
}
contentLength = rec.Header().Get("Content-Length")
}
if contentLength != fmt.Sprintf("%d", objectLength(input)) {
t.Errorf("Test %d: Content length is mismatching: got %s (expected: %d)", i+1, contentLength, objectLength(input))
}
}
}
// Wrapper for calling GetObject API handler tests for both XL multiple disks and FS single drive setup. // Wrapper for calling GetObject API handler tests for both XL multiple disks and FS single drive setup.
func TestAPIGetObjectHandler(t *testing.T) { func TestAPIGetObjectHandler(t *testing.T) {
globalPolicySys = NewPolicySys()
defer func() { globalPolicySys = nil }()
defer DetectTestLeak(t)() defer DetectTestLeak(t)()
ExecObjectLayerAPITest(t, testAPIGetObjectHandler, []string{"GetObject"}) ExecObjectLayerAPITest(t, testAPIGetObjectHandler, []string{"GetObject"})
} }
func testAPIGetObjectHandler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler, func testAPIGetObjectHandler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,
credentials auth.Credentials, t *testing.T) { credentials auth.Credentials, t *testing.T) {
objectName := "test-object" objectName := "test-object"
// set of byte data for PutObject. // set of byte data for PutObject.
// object has to be created before running tests for GetObject. // object has to be created before running tests for GetObject.
@ -376,7 +507,7 @@ func testAPIGetObjectHandler(obj ObjectLayer, instanceType, bucketName string, a
rec := httptest.NewRecorder() rec := httptest.NewRecorder()
// construct HTTP request for Get Object end point. // construct HTTP request for Get Object end point.
req, err := newTestSignedRequestV4("GET", getGetObjectURL("", testCase.bucketName, testCase.objectName), req, err := newTestSignedRequestV4("GET", getGetObjectURL("", testCase.bucketName, testCase.objectName),
0, nil, testCase.accessKey, testCase.secretKey) 0, nil, testCase.accessKey, testCase.secretKey, nil)
if err != nil { if err != nil {
t.Fatalf("Test %d: Failed to create HTTP request for Get Object: <ERROR> %v", i+1, err) t.Fatalf("Test %d: Failed to create HTTP request for Get Object: <ERROR> %v", i+1, err)
@ -406,7 +537,7 @@ func testAPIGetObjectHandler(obj ObjectLayer, instanceType, bucketName string, a
recV2 := httptest.NewRecorder() recV2 := httptest.NewRecorder()
// construct HTTP request for GET Object endpoint. // construct HTTP request for GET Object endpoint.
reqV2, err := newTestSignedRequestV2("GET", getGetObjectURL("", testCase.bucketName, testCase.objectName), reqV2, err := newTestSignedRequestV2("GET", getGetObjectURL("", testCase.bucketName, testCase.objectName),
0, nil, testCase.accessKey, testCase.secretKey) 0, nil, testCase.accessKey, testCase.secretKey, nil)
if err != nil { if err != nil {
t.Fatalf("Test %d: %s: Failed to create HTTP request for GetObject: <ERROR> %v", i+1, instanceType, err) t.Fatalf("Test %d: %s: Failed to create HTTP request for GetObject: <ERROR> %v", i+1, instanceType, err)
@ -455,7 +586,7 @@ func testAPIGetObjectHandler(obj ObjectLayer, instanceType, bucketName string, a
nilBucket := "dummy-bucket" nilBucket := "dummy-bucket"
nilObject := "dummy-object" nilObject := "dummy-object"
nilReq, err := newTestSignedRequestV4("GET", getGetObjectURL("", nilBucket, nilObject), nilReq, err := newTestSignedRequestV4("GET", getGetObjectURL("", nilBucket, nilObject),
0, nil, "", "") 0, nil, "", "", nil)
if err != nil { if err != nil {
t.Errorf("Minio %s: Failed to create HTTP request for testing the response when object Layer is set to `nil`.", instanceType) t.Errorf("Minio %s: Failed to create HTTP request for testing the response when object Layer is set to `nil`.", instanceType)
@ -465,6 +596,204 @@ func testAPIGetObjectHandler(obj ObjectLayer, instanceType, bucketName string, a
ExecObjectLayerAPINilTest(t, nilBucket, nilObject, instanceType, apiRouter, nilReq) ExecObjectLayerAPINilTest(t, nilBucket, nilObject, instanceType, apiRouter, nilReq)
} }
// Wrapper for calling GetObject API handler tests for both XL multiple disks and FS single drive setup.
func TestAPIGetObjectWithMPHandler(t *testing.T) {
globalPolicySys = NewPolicySys()
defer func() { globalPolicySys = nil }()
defer DetectTestLeak(t)()
ExecObjectLayerAPITest(t, testAPIGetObjectWithMPHandler, []string{"NewMultipart", "PutObjectPart", "CompleteMultipart", "GetObject", "PutObject"})
}
func testAPIGetObjectWithMPHandler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,
credentials auth.Credentials, t *testing.T) {
// Set SSL to on to do encryption tests
globalIsSSL = true
defer func() { globalIsSSL = false }()
var (
oneMiB int64 = 1024 * 1024
key32Bytes = generateBytesData(32 * humanize.Byte)
key32BytesMd5 = md5.Sum(key32Bytes)
metaWithSSEC = map[string]string{
crypto.SSECAlgorithm: crypto.SSEAlgorithmAES256,
crypto.SSECKey: base64.StdEncoding.EncodeToString(key32Bytes),
crypto.SSECKeyMD5: base64.StdEncoding.EncodeToString(key32BytesMd5[:]),
}
mapCopy = func(m map[string]string) map[string]string {
r := make(map[string]string, len(m))
for k, v := range m {
r[k] = v
}
return r
}
)
type ObjectInput struct {
objectName string
partLengths []int64
metaData map[string]string
}
objectLength := func(oi ObjectInput) (sum int64) {
for _, l := range oi.partLengths {
sum += l
}
return
}
// set of inputs for uploading the objects before tests for
// downloading is done. Data bytes are from DummyDataGen.
objectInputs := []ObjectInput{
// // cases 0-3: small single part objects
{"nothing", []int64{0}, make(map[string]string)},
{"small-0", []int64{11}, make(map[string]string)},
{"small-1", []int64{509}, make(map[string]string)},
{"small-2", []int64{5 * oneMiB}, make(map[string]string)},
// // // cases 4-7: multipart part objects
{"mp-0", []int64{5 * oneMiB, 1}, make(map[string]string)},
{"mp-1", []int64{5*oneMiB + 1, 1}, make(map[string]string)},
{"mp-2", []int64{5487701, 5487799, 3}, make(map[string]string)},
{"mp-3", []int64{10499807, 10499963, 7}, make(map[string]string)},
// cases 8-11: small single part objects with encryption
{"enc-nothing", []int64{0}, mapCopy(metaWithSSEC)},
{"enc-small-0", []int64{11}, mapCopy(metaWithSSEC)},
{"enc-small-1", []int64{509}, mapCopy(metaWithSSEC)},
{"enc-small-2", []int64{5 * oneMiB}, mapCopy(metaWithSSEC)},
// cases 12-15: multipart part objects with encryption
{"enc-mp-0", []int64{5 * oneMiB, 1}, mapCopy(metaWithSSEC)},
{"enc-mp-1", []int64{5*oneMiB + 1, 1}, mapCopy(metaWithSSEC)},
{"enc-mp-2", []int64{5487701, 5487799, 3}, mapCopy(metaWithSSEC)},
{"enc-mp-3", []int64{10499807, 10499963, 7}, mapCopy(metaWithSSEC)},
}
// iterate through the above set of inputs and upload the object.
for _, input := range objectInputs {
uploadTestObject(t, apiRouter, credentials, bucketName, input.objectName, input.partLengths, input.metaData, false)
}
// function type for creating signed requests - used to repeat
// requests with V2 and V4 signing.
type testSignedReqFn func(method, urlStr string, contentLength int64,
body io.ReadSeeker, accessKey, secretKey string, metamap map[string]string) (*http.Request,
error)
mkGetReq := func(oi ObjectInput, byteRange string, i int, mkSignedReq testSignedReqFn) {
object := oi.objectName
rec := httptest.NewRecorder()
req, err := mkSignedReq("GET", getGetObjectURL("", bucketName, object),
0, nil, credentials.AccessKey, credentials.SecretKey, oi.metaData)
if err != nil {
t.Fatalf("Object: %s Case %d ByteRange: %s: Failed to create HTTP request for Get Object: <ERROR> %v",
object, i+1, byteRange, err)
}
if byteRange != "" {
req.Header.Add("Range", byteRange)
}
apiRouter.ServeHTTP(rec, req)
// Check response code (we make only valid requests in
// this test)
if rec.Code != http.StatusPartialContent && rec.Code != http.StatusOK {
bd, err1 := ioutil.ReadAll(rec.Body)
t.Fatalf("%s Object: %s Case %d ByteRange: %s: Got response status `%d` and body: %s,%v",
instanceType, object, i+1, byteRange, rec.Code, string(bd), err1)
}
var off, length int64
var rs *HTTPRangeSpec
if byteRange != "" {
rs, err = parseRequestRangeSpec(byteRange)
if err != nil {
t.Fatalf("Object: %s Case %d ByteRange: %s: Unexpected err: %v", object, i+1, byteRange, err)
}
}
off, length, err = rs.GetOffsetLength(objectLength(oi))
if err != nil {
t.Fatalf("Object: %s Case %d ByteRange: %s: Unexpected err: %v", object, i+1, byteRange, err)
}
readers := []io.Reader{}
cumulativeSum := int64(0)
for _, p := range oi.partLengths {
readers = append(readers, NewDummyDataGen(p, cumulativeSum))
cumulativeSum += p
}
refReader := io.LimitReader(ioutilx.NewSkipReader(io.MultiReader(readers...), off), length)
if ok, msg := cmpReaders(refReader, rec.Body); !ok {
t.Fatalf("(%s) Object: %s Case %d ByteRange: %s --> data mismatch! (msg: %s)", instanceType, oi.objectName, i+1, byteRange, msg)
}
}
// Iterate over each uploaded object and do a bunch of get
// requests on them.
caseNumber := 0
signFns := []testSignedReqFn{newTestSignedRequestV2, newTestSignedRequestV4}
for _, oi := range objectInputs {
objLen := objectLength(oi)
for _, sf := range signFns {
// Read whole object
mkGetReq(oi, "", caseNumber, sf)
caseNumber++
// No range requests are possible if the
// object length is 0
if objLen == 0 {
continue
}
// Various ranges to query - all are valid!
rangeHdrs := []string{
// Read first byte of object
fmt.Sprintf("bytes=%d-%d", 0, 0),
// Read second byte of object
fmt.Sprintf("bytes=%d-%d", 1, 1),
// Read last byte of object
fmt.Sprintf("bytes=-%d", 1),
// Read all but first byte of object
"bytes=1-",
// Read first half of object
fmt.Sprintf("bytes=%d-%d", 0, objLen/2),
// Read last half of object
fmt.Sprintf("bytes=-%d", objLen/2),
// Read middle half of object
fmt.Sprintf("bytes=%d-%d", objLen/4, objLen*3/4),
// Read 100MiB of the object from the beginning
fmt.Sprintf("bytes=%d-%d", 0, 100*humanize.MiByte),
// Read 100MiB of the object from the end
fmt.Sprintf("bytes=-%d", 100*humanize.MiByte),
}
for _, rangeHdr := range rangeHdrs {
mkGetReq(oi, rangeHdr, caseNumber, sf)
caseNumber++
}
}
}
// HTTP request for testing when `objectLayer` is set to `nil`.
// There is no need to use an existing bucket and valid input for creating the request
// since the `objectLayer==nil` check is performed before any other checks inside the handlers.
// The only aim is to generate an HTTP request in a way that the relevant/registered end point is evoked/called.
nilBucket := "dummy-bucket"
nilObject := "dummy-object"
nilReq, err := newTestSignedRequestV4("GET", getGetObjectURL("", nilBucket, nilObject),
0, nil, "", "", nil)
if err != nil {
t.Errorf("Minio %s: Failed to create HTTP request for testing the response when object Layer is set to `nil`.", instanceType)
}
// execute the object layer set to `nil` test.
// `ExecObjectLayerAPINilTest` manages the operation.
ExecObjectLayerAPINilTest(t, nilBucket, nilObject, instanceType, apiRouter, nilReq)
}
// Wrapper for calling PutObject API handler tests using streaming signature v4 for both XL multiple disks and FS single drive setup. // Wrapper for calling PutObject API handler tests using streaming signature v4 for both XL multiple disks and FS single drive setup.
func TestAPIPutObjectStreamSigV4Handler(t *testing.T) { func TestAPIPutObjectStreamSigV4Handler(t *testing.T) {
defer DetectTestLeak(t)() defer DetectTestLeak(t)()
@ -912,7 +1241,7 @@ func testAPIPutObjectHandler(obj ObjectLayer, instanceType, bucketName string, a
rec := httptest.NewRecorder() rec := httptest.NewRecorder()
// construct HTTP request for Get Object end point. // construct HTTP request for Get Object end point.
req, err = newTestSignedRequestV4("PUT", getPutObjectURL("", testCase.bucketName, testCase.objectName), req, err = newTestSignedRequestV4("PUT", getPutObjectURL("", testCase.bucketName, testCase.objectName),
int64(testCase.dataLen), bytes.NewReader(testCase.data), testCase.accessKey, testCase.secretKey) int64(testCase.dataLen), bytes.NewReader(testCase.data), testCase.accessKey, testCase.secretKey, nil)
if err != nil { if err != nil {
t.Fatalf("Test %d: Failed to create HTTP request for Put Object: <ERROR> %v", i+1, err) t.Fatalf("Test %d: Failed to create HTTP request for Put Object: <ERROR> %v", i+1, err)
} }
@ -953,7 +1282,7 @@ func testAPIPutObjectHandler(obj ObjectLayer, instanceType, bucketName string, a
recV2 := httptest.NewRecorder() recV2 := httptest.NewRecorder()
// construct HTTP request for PUT Object endpoint. // construct HTTP request for PUT Object endpoint.
reqV2, err = newTestSignedRequestV2("PUT", getPutObjectURL("", testCase.bucketName, testCase.objectName), reqV2, err = newTestSignedRequestV2("PUT", getPutObjectURL("", testCase.bucketName, testCase.objectName),
int64(testCase.dataLen), bytes.NewReader(testCase.data), testCase.accessKey, testCase.secretKey) int64(testCase.dataLen), bytes.NewReader(testCase.data), testCase.accessKey, testCase.secretKey, nil)
if err != nil { if err != nil {
t.Fatalf("Test %d: %s: Failed to create HTTP request for PutObject: <ERROR> %v", i+1, instanceType, err) t.Fatalf("Test %d: %s: Failed to create HTTP request for PutObject: <ERROR> %v", i+1, instanceType, err)
@ -1013,7 +1342,7 @@ func testAPIPutObjectHandler(obj ObjectLayer, instanceType, bucketName string, a
nilObject := "dummy-object" nilObject := "dummy-object"
nilReq, err := newTestSignedRequestV4("PUT", getPutObjectURL("", nilBucket, nilObject), nilReq, err := newTestSignedRequestV4("PUT", getPutObjectURL("", nilBucket, nilObject),
0, nil, "", "") 0, nil, "", "", nil)
if err != nil { if err != nil {
t.Errorf("Minio %s: Failed to create HTTP request for testing the response when object Layer is set to `nil`.", instanceType) t.Errorf("Minio %s: Failed to create HTTP request for testing the response when object Layer is set to `nil`.", instanceType)
@ -1091,7 +1420,7 @@ func testAPICopyObjectPartHandlerSanity(obj ObjectLayer, instanceType, bucketNam
// construct HTTP request for copy object. // construct HTTP request for copy object.
var req *http.Request var req *http.Request
req, err = newTestSignedRequestV4("PUT", cpPartURL, 0, nil, credentials.AccessKey, credentials.SecretKey) req, err = newTestSignedRequestV4("PUT", cpPartURL, 0, nil, credentials.AccessKey, credentials.SecretKey, nil)
if err != nil { if err != nil {
t.Fatalf("Test failed to create HTTP request for copy object part: <ERROR> %v", err) t.Fatalf("Test failed to create HTTP request for copy object part: <ERROR> %v", err)
} }
@ -1373,11 +1702,11 @@ func testAPICopyObjectPartHandler(obj ObjectLayer, instanceType, bucketName stri
rec := httptest.NewRecorder() rec := httptest.NewRecorder()
if !testCase.invalidPartNumber || !testCase.maximumPartNumber { if !testCase.invalidPartNumber || !testCase.maximumPartNumber {
// construct HTTP request for copy object. // construct HTTP request for copy object.
req, err = newTestSignedRequestV4("PUT", getCopyObjectPartURL("", testCase.bucketName, testObject, testCase.uploadID, "1"), 0, nil, testCase.accessKey, testCase.secretKey) req, err = newTestSignedRequestV4("PUT", getCopyObjectPartURL("", testCase.bucketName, testObject, testCase.uploadID, "1"), 0, nil, testCase.accessKey, testCase.secretKey, nil)
} else if testCase.invalidPartNumber { } else if testCase.invalidPartNumber {
req, err = newTestSignedRequestV4("PUT", getCopyObjectPartURL("", testCase.bucketName, testObject, testCase.uploadID, "abc"), 0, nil, testCase.accessKey, testCase.secretKey) req, err = newTestSignedRequestV4("PUT", getCopyObjectPartURL("", testCase.bucketName, testObject, testCase.uploadID, "abc"), 0, nil, testCase.accessKey, testCase.secretKey, nil)
} else if testCase.maximumPartNumber { } else if testCase.maximumPartNumber {
req, err = newTestSignedRequestV4("PUT", getCopyObjectPartURL("", testCase.bucketName, testObject, testCase.uploadID, "99999"), 0, nil, testCase.accessKey, testCase.secretKey) req, err = newTestSignedRequestV4("PUT", getCopyObjectPartURL("", testCase.bucketName, testObject, testCase.uploadID, "99999"), 0, nil, testCase.accessKey, testCase.secretKey, nil)
} }
if err != nil { if err != nil {
t.Fatalf("Test %d: Failed to create HTTP request for copy Object: <ERROR> %v", i+1, err) t.Fatalf("Test %d: Failed to create HTTP request for copy Object: <ERROR> %v", i+1, err)
@ -1447,7 +1776,7 @@ func testAPICopyObjectPartHandler(obj ObjectLayer, instanceType, bucketName stri
nilObject := "dummy-object" nilObject := "dummy-object"
nilReq, err := newTestSignedRequestV4("PUT", getCopyObjectPartURL("", nilBucket, nilObject, "0", "0"), nilReq, err := newTestSignedRequestV4("PUT", getCopyObjectPartURL("", nilBucket, nilObject, "0", "0"),
0, bytes.NewReader([]byte("testNilObjLayer")), "", "") 0, bytes.NewReader([]byte("testNilObjLayer")), "", "", nil)
if err != nil { if err != nil {
t.Errorf("Minio %s: Failed to create http request for testing the response when object Layer is set to `nil`.", instanceType) t.Errorf("Minio %s: Failed to create http request for testing the response when object Layer is set to `nil`.", instanceType)
} }
@ -1740,7 +2069,7 @@ func testAPICopyObjectHandler(obj ObjectLayer, instanceType, bucketName string,
rec := httptest.NewRecorder() rec := httptest.NewRecorder()
// construct HTTP request for copy object. // construct HTTP request for copy object.
req, err = newTestSignedRequestV4("PUT", getCopyObjectURL("", testCase.bucketName, testCase.newObjectName), req, err = newTestSignedRequestV4("PUT", getCopyObjectURL("", testCase.bucketName, testCase.newObjectName),
0, nil, testCase.accessKey, testCase.secretKey) 0, nil, testCase.accessKey, testCase.secretKey, nil)
if err != nil { if err != nil {
t.Fatalf("Test %d: Failed to create HTTP request for copy Object: <ERROR> %v", i+1, err) t.Fatalf("Test %d: Failed to create HTTP request for copy Object: <ERROR> %v", i+1, err)
@ -1859,7 +2188,7 @@ func testAPICopyObjectHandler(obj ObjectLayer, instanceType, bucketName string,
nilObject := "dummy-object" nilObject := "dummy-object"
nilReq, err := newTestSignedRequestV4("PUT", getCopyObjectURL("", nilBucket, nilObject), nilReq, err := newTestSignedRequestV4("PUT", getCopyObjectURL("", nilBucket, nilObject),
0, nil, "", "") 0, nil, "", "", nil)
// Below is how CopyObjectHandler is registered. // Below is how CopyObjectHandler is registered.
// bucket.Methods("PUT").Path("/{object:.+}").HeadersRegexp("X-Amz-Copy-Source", ".*?(\\/|%2F).*?") // bucket.Methods("PUT").Path("/{object:.+}").HeadersRegexp("X-Amz-Copy-Source", ".*?(\\/|%2F).*?")
@ -1890,7 +2219,7 @@ func testAPINewMultipartHandler(obj ObjectLayer, instanceType, bucketName string
rec := httptest.NewRecorder() rec := httptest.NewRecorder()
// construct HTTP request for NewMultipart upload. // construct HTTP request for NewMultipart upload.
req, err := newTestSignedRequestV4("POST", getNewMultipartURL("", bucketName, objectName), req, err := newTestSignedRequestV4("POST", getNewMultipartURL("", bucketName, objectName),
0, nil, credentials.AccessKey, credentials.SecretKey) 0, nil, credentials.AccessKey, credentials.SecretKey, nil)
if err != nil { if err != nil {
t.Fatalf("Failed to create HTTP request for NewMultipart Request: <ERROR> %v", err) t.Fatalf("Failed to create HTTP request for NewMultipart Request: <ERROR> %v", err)
@ -1923,7 +2252,7 @@ func testAPINewMultipartHandler(obj ObjectLayer, instanceType, bucketName string
// construct HTTP request for NewMultipart upload. // construct HTTP request for NewMultipart upload.
// Setting an invalid accessID. // Setting an invalid accessID.
req, err = newTestSignedRequestV4("POST", getNewMultipartURL("", bucketName, objectName), req, err = newTestSignedRequestV4("POST", getNewMultipartURL("", bucketName, objectName),
0, nil, "Invalid-AccessID", credentials.SecretKey) 0, nil, "Invalid-AccessID", credentials.SecretKey, nil)
if err != nil { if err != nil {
t.Fatalf("Failed to create HTTP request for NewMultipart Request: <ERROR> %v", err) t.Fatalf("Failed to create HTTP request for NewMultipart Request: <ERROR> %v", err)
@ -1942,7 +2271,7 @@ func testAPINewMultipartHandler(obj ObjectLayer, instanceType, bucketName string
recV2 := httptest.NewRecorder() recV2 := httptest.NewRecorder()
// construct HTTP request for NewMultipartUpload endpoint. // construct HTTP request for NewMultipartUpload endpoint.
reqV2, err := newTestSignedRequestV2("POST", getNewMultipartURL("", bucketName, objectName), reqV2, err := newTestSignedRequestV2("POST", getNewMultipartURL("", bucketName, objectName),
0, nil, credentials.AccessKey, credentials.SecretKey) 0, nil, credentials.AccessKey, credentials.SecretKey, nil)
if err != nil { if err != nil {
t.Fatalf("Failed to create HTTP request for NewMultipart Request: <ERROR> %v", err) t.Fatalf("Failed to create HTTP request for NewMultipart Request: <ERROR> %v", err)
@ -1975,7 +2304,7 @@ func testAPINewMultipartHandler(obj ObjectLayer, instanceType, bucketName string
// construct HTTP request for NewMultipartUpload endpoint. // construct HTTP request for NewMultipartUpload endpoint.
// Setting invalid AccessID. // Setting invalid AccessID.
reqV2, err = newTestSignedRequestV2("POST", getNewMultipartURL("", bucketName, objectName), reqV2, err = newTestSignedRequestV2("POST", getNewMultipartURL("", bucketName, objectName),
0, nil, "Invalid-AccessID", credentials.SecretKey) 0, nil, "Invalid-AccessID", credentials.SecretKey, nil)
if err != nil { if err != nil {
t.Fatalf("Failed to create HTTP request for NewMultipart Request: <ERROR> %v", err) t.Fatalf("Failed to create HTTP request for NewMultipart Request: <ERROR> %v", err)
@ -2010,7 +2339,7 @@ func testAPINewMultipartHandler(obj ObjectLayer, instanceType, bucketName string
nilObject := "dummy-object" nilObject := "dummy-object"
nilReq, err := newTestSignedRequestV4("POST", getNewMultipartURL("", nilBucket, nilObject), nilReq, err := newTestSignedRequestV4("POST", getNewMultipartURL("", nilBucket, nilObject),
0, nil, "", "") 0, nil, "", "", nil)
if err != nil { if err != nil {
t.Errorf("Minio %s: Failed to create HTTP request for testing the response when object Layer is set to `nil`.", instanceType) t.Errorf("Minio %s: Failed to create HTTP request for testing the response when object Layer is set to `nil`.", instanceType)
@ -2046,7 +2375,7 @@ func testAPINewMultipartHandlerParallel(obj ObjectLayer, instanceType, bucketNam
defer wg.Done() defer wg.Done()
rec := httptest.NewRecorder() rec := httptest.NewRecorder()
// construct HTTP request NewMultipartUpload. // construct HTTP request NewMultipartUpload.
req, err := newTestSignedRequestV4("POST", getNewMultipartURL("", bucketName, objectName), 0, nil, credentials.AccessKey, credentials.SecretKey) req, err := newTestSignedRequestV4("POST", getNewMultipartURL("", bucketName, objectName), 0, nil, credentials.AccessKey, credentials.SecretKey, nil)
if err != nil { if err != nil {
t.Fatalf("Failed to create HTTP request for NewMultipart request: <ERROR> %v", err) t.Fatalf("Failed to create HTTP request for NewMultipart request: <ERROR> %v", err)
@ -2362,7 +2691,7 @@ func testAPICompleteMultipartHandler(obj ObjectLayer, instanceType, bucketName s
} }
// Indicating that all parts are uploaded and initiating CompleteMultipartUpload. // Indicating that all parts are uploaded and initiating CompleteMultipartUpload.
req, err = newTestSignedRequestV4("POST", getCompleteMultipartUploadURL("", bucketName, objectName, testCase.uploadID), req, err = newTestSignedRequestV4("POST", getCompleteMultipartUploadURL("", bucketName, objectName, testCase.uploadID),
int64(len(completeBytes)), bytes.NewReader(completeBytes), testCase.accessKey, testCase.secretKey) int64(len(completeBytes)), bytes.NewReader(completeBytes), testCase.accessKey, testCase.secretKey, nil)
if err != nil { if err != nil {
t.Fatalf("Failed to create HTTP request for CompleteMultipartUpload: <ERROR> %v", err) t.Fatalf("Failed to create HTTP request for CompleteMultipartUpload: <ERROR> %v", err)
} }
@ -2423,7 +2752,7 @@ func testAPICompleteMultipartHandler(obj ObjectLayer, instanceType, bucketName s
nilObject := "dummy-object" nilObject := "dummy-object"
nilReq, err := newTestSignedRequestV4("POST", getCompleteMultipartUploadURL("", nilBucket, nilObject, "dummy-uploadID"), nilReq, err := newTestSignedRequestV4("POST", getCompleteMultipartUploadURL("", nilBucket, nilObject, "dummy-uploadID"),
0, nil, "", "") 0, nil, "", "", nil)
if err != nil { if err != nil {
t.Errorf("Minio %s: Failed to create HTTP request for testing the response when object Layer is set to `nil`.", instanceType) t.Errorf("Minio %s: Failed to create HTTP request for testing the response when object Layer is set to `nil`.", instanceType)
@ -2547,7 +2876,7 @@ func testAPIAbortMultipartHandler(obj ObjectLayer, instanceType, bucketName stri
var req *http.Request var req *http.Request
// Indicating that all parts are uploaded and initiating abortMultipartUpload. // Indicating that all parts are uploaded and initiating abortMultipartUpload.
req, err = newTestSignedRequestV4("DELETE", getAbortMultipartUploadURL("", testCase.bucket, testCase.object, testCase.uploadID), req, err = newTestSignedRequestV4("DELETE", getAbortMultipartUploadURL("", testCase.bucket, testCase.object, testCase.uploadID),
0, nil, testCase.accessKey, testCase.secretKey) 0, nil, testCase.accessKey, testCase.secretKey, nil)
if err != nil { if err != nil {
t.Fatalf("Failed to create HTTP request for AbortMultipartUpload: <ERROR> %v", err) t.Fatalf("Failed to create HTTP request for AbortMultipartUpload: <ERROR> %v", err)
} }
@ -2586,7 +2915,7 @@ func testAPIAbortMultipartHandler(obj ObjectLayer, instanceType, bucketName stri
nilObject := "dummy-object" nilObject := "dummy-object"
nilReq, err := newTestSignedRequestV4("DELETE", getAbortMultipartUploadURL("", nilBucket, nilObject, "dummy-uploadID"), nilReq, err := newTestSignedRequestV4("DELETE", getAbortMultipartUploadURL("", nilBucket, nilObject, "dummy-uploadID"),
0, nil, "", "") 0, nil, "", "", nil)
if err != nil { if err != nil {
t.Errorf("Minio %s: Failed to create HTTP request for testing the response when object Layer is set to `nil`.", instanceType) t.Errorf("Minio %s: Failed to create HTTP request for testing the response when object Layer is set to `nil`.", instanceType)
@ -2692,7 +3021,7 @@ func testAPIDeleteObjectHandler(obj ObjectLayer, instanceType, bucketName string
rec := httptest.NewRecorder() rec := httptest.NewRecorder()
// construct HTTP request for Delete Object end point. // construct HTTP request for Delete Object end point.
req, err = newTestSignedRequestV4("DELETE", getDeleteObjectURL("", testCase.bucketName, testCase.objectName), req, err = newTestSignedRequestV4("DELETE", getDeleteObjectURL("", testCase.bucketName, testCase.objectName),
0, nil, testCase.accessKey, testCase.secretKey) 0, nil, testCase.accessKey, testCase.secretKey, nil)
if err != nil { if err != nil {
t.Fatalf("Test %d: Failed to create HTTP request for Delete Object: <ERROR> %v", i+1, err) t.Fatalf("Test %d: Failed to create HTTP request for Delete Object: <ERROR> %v", i+1, err)
@ -2710,7 +3039,7 @@ func testAPIDeleteObjectHandler(obj ObjectLayer, instanceType, bucketName string
recV2 := httptest.NewRecorder() recV2 := httptest.NewRecorder()
// construct HTTP request for Delete Object endpoint. // construct HTTP request for Delete Object endpoint.
reqV2, err = newTestSignedRequestV2("DELETE", getDeleteObjectURL("", testCase.bucketName, testCase.objectName), reqV2, err = newTestSignedRequestV2("DELETE", getDeleteObjectURL("", testCase.bucketName, testCase.objectName),
0, nil, testCase.accessKey, testCase.secretKey) 0, nil, testCase.accessKey, testCase.secretKey, nil)
if err != nil { if err != nil {
t.Fatalf("Failed to create HTTP request for NewMultipart Request: <ERROR> %v", err) t.Fatalf("Failed to create HTTP request for NewMultipart Request: <ERROR> %v", err)
@ -2747,7 +3076,7 @@ func testAPIDeleteObjectHandler(obj ObjectLayer, instanceType, bucketName string
nilObject := "dummy-object" nilObject := "dummy-object"
nilReq, err := newTestSignedRequestV4("DELETE", getDeleteObjectURL("", nilBucket, nilObject), nilReq, err := newTestSignedRequestV4("DELETE", getDeleteObjectURL("", nilBucket, nilObject),
0, nil, "", "") 0, nil, "", "", nil)
if err != nil { if err != nil {
t.Errorf("Minio %s: Failed to create HTTP request for testing the response when object Layer is set to `nil`.", instanceType) t.Errorf("Minio %s: Failed to create HTTP request for testing the response when object Layer is set to `nil`.", instanceType)
@ -2769,7 +3098,7 @@ func testAPIPutObjectPartHandlerPreSign(obj ObjectLayer, instanceType, bucketNam
testObject := "testobject" testObject := "testobject"
rec := httptest.NewRecorder() rec := httptest.NewRecorder()
req, err := newTestSignedRequestV4("POST", getNewMultipartURL("", bucketName, "testobject"), req, err := newTestSignedRequestV4("POST", getNewMultipartURL("", bucketName, "testobject"),
0, nil, credentials.AccessKey, credentials.SecretKey) 0, nil, credentials.AccessKey, credentials.SecretKey, nil)
if err != nil { if err != nil {
t.Fatalf("[%s] - Failed to create a signed request to initiate multipart upload for %s/%s: <ERROR> %v", t.Fatalf("[%s] - Failed to create a signed request to initiate multipart upload for %s/%s: <ERROR> %v",
instanceType, bucketName, testObject, err) instanceType, bucketName, testObject, err)
@ -2836,7 +3165,7 @@ func testAPIPutObjectPartHandlerStreaming(obj ObjectLayer, instanceType, bucketN
testObject := "testobject" testObject := "testobject"
rec := httptest.NewRecorder() rec := httptest.NewRecorder()
req, err := newTestSignedRequestV4("POST", getNewMultipartURL("", bucketName, "testobject"), req, err := newTestSignedRequestV4("POST", getNewMultipartURL("", bucketName, "testobject"),
0, nil, credentials.AccessKey, credentials.SecretKey) 0, nil, credentials.AccessKey, credentials.SecretKey, nil)
if err != nil { if err != nil {
t.Fatalf("[%s] - Failed to create a signed request to initiate multipart upload for %s/%s: <ERROR> %v", t.Fatalf("[%s] - Failed to create a signed request to initiate multipart upload for %s/%s: <ERROR> %v",
instanceType, bucketName, testObject, err) instanceType, bucketName, testObject, err)
@ -3107,7 +3436,7 @@ func testAPIPutObjectPartHandler(obj ObjectLayer, instanceType, bucketName strin
// constructing a v4 signed HTTP request. // constructing a v4 signed HTTP request.
reqV4, err = newTestSignedRequestV4("PUT", reqV4, err = newTestSignedRequestV4("PUT",
getPutObjectPartURL("", bucketName, test.objectName, uploadID, test.partNumber), getPutObjectPartURL("", bucketName, test.objectName, uploadID, test.partNumber),
0, test.reader, test.accessKey, test.secretKey) 0, test.reader, test.accessKey, test.secretKey, nil)
if err != nil { if err != nil {
t.Fatalf("Failed to create a signed V4 request to upload part for %s/%s: <ERROR> %v", t.Fatalf("Failed to create a signed V4 request to upload part for %s/%s: <ERROR> %v",
bucketName, test.objectName, err) bucketName, test.objectName, err)
@ -3116,7 +3445,7 @@ func testAPIPutObjectPartHandler(obj ObjectLayer, instanceType, bucketName strin
// construct HTTP request for PutObject Part Object endpoint. // construct HTTP request for PutObject Part Object endpoint.
reqV2, err = newTestSignedRequestV2("PUT", reqV2, err = newTestSignedRequestV2("PUT",
getPutObjectPartURL("", bucketName, test.objectName, uploadID, test.partNumber), getPutObjectPartURL("", bucketName, test.objectName, uploadID, test.partNumber),
0, test.reader, test.accessKey, test.secretKey) 0, test.reader, test.accessKey, test.secretKey, nil)
if err != nil { if err != nil {
t.Fatalf("Test %d %s Failed to create a V2 signed request to upload part for %s/%s: <ERROR> %v", i+1, instanceType, t.Fatalf("Test %d %s Failed to create a V2 signed request to upload part for %s/%s: <ERROR> %v", i+1, instanceType,
@ -3218,7 +3547,7 @@ func testAPIPutObjectPartHandler(obj ObjectLayer, instanceType, bucketName strin
nilObject := "dummy-object" nilObject := "dummy-object"
nilReq, err := newTestSignedRequestV4("PUT", getPutObjectPartURL("", nilBucket, nilObject, "0", "0"), nilReq, err := newTestSignedRequestV4("PUT", getPutObjectPartURL("", nilBucket, nilObject, "0", "0"),
0, bytes.NewReader([]byte("testNilObjLayer")), "", "") 0, bytes.NewReader([]byte("testNilObjLayer")), "", "", nil)
if err != nil { if err != nil {
t.Errorf("Minio %s: Failed to create http request for testing the response when object Layer is set to `nil`.", instanceType) t.Errorf("Minio %s: Failed to create http request for testing the response when object Layer is set to `nil`.", instanceType)
@ -3241,7 +3570,7 @@ func testAPIListObjectPartsHandlerPreSign(obj ObjectLayer, instanceType, bucketN
testObject := "testobject" testObject := "testobject"
rec := httptest.NewRecorder() rec := httptest.NewRecorder()
req, err := newTestSignedRequestV4("POST", getNewMultipartURL("", bucketName, testObject), req, err := newTestSignedRequestV4("POST", getNewMultipartURL("", bucketName, testObject),
0, nil, credentials.AccessKey, credentials.SecretKey) 0, nil, credentials.AccessKey, credentials.SecretKey, nil)
if err != nil { if err != nil {
t.Fatalf("[%s] - Failed to create a signed request to initiate multipart upload for %s/%s: <ERROR> %v", t.Fatalf("[%s] - Failed to create a signed request to initiate multipart upload for %s/%s: <ERROR> %v",
instanceType, bucketName, testObject, err) instanceType, bucketName, testObject, err)
@ -3264,7 +3593,7 @@ func testAPIListObjectPartsHandlerPreSign(obj ObjectLayer, instanceType, bucketN
rec = httptest.NewRecorder() rec = httptest.NewRecorder()
req, err = newTestSignedRequestV4("PUT", req, err = newTestSignedRequestV4("PUT",
getPutObjectPartURL("", bucketName, testObject, mpartResp.UploadID, "1"), getPutObjectPartURL("", bucketName, testObject, mpartResp.UploadID, "1"),
int64(len("hello")), bytes.NewReader([]byte("hello")), credentials.AccessKey, credentials.SecretKey) int64(len("hello")), bytes.NewReader([]byte("hello")), credentials.AccessKey, credentials.SecretKey, nil)
if err != nil { if err != nil {
t.Fatalf("[%s] - Failed to create a signed request to initiate multipart upload for %s/%s: <ERROR> %v", t.Fatalf("[%s] - Failed to create a signed request to initiate multipart upload for %s/%s: <ERROR> %v",
instanceType, bucketName, testObject, err) instanceType, bucketName, testObject, err)
@ -3424,7 +3753,7 @@ func testAPIListObjectPartsHandler(obj ObjectLayer, instanceType, bucketName str
// constructing a v4 signed HTTP request for ListMultipartUploads. // constructing a v4 signed HTTP request for ListMultipartUploads.
reqV4, err = newTestSignedRequestV4("GET", reqV4, err = newTestSignedRequestV4("GET",
getListMultipartURLWithParams("", bucketName, testObject, uploadID, test.maxParts, test.partNumberMarker, ""), getListMultipartURLWithParams("", bucketName, testObject, uploadID, test.maxParts, test.partNumberMarker, ""),
0, nil, credentials.AccessKey, credentials.SecretKey) 0, nil, credentials.AccessKey, credentials.SecretKey, nil)
if err != nil { if err != nil {
t.Fatalf("Failed to create a V4 signed request to list object parts for %s/%s: <ERROR> %v.", t.Fatalf("Failed to create a V4 signed request to list object parts for %s/%s: <ERROR> %v.",
@ -3434,7 +3763,7 @@ func testAPIListObjectPartsHandler(obj ObjectLayer, instanceType, bucketName str
// construct HTTP request for PutObject Part Object endpoint. // construct HTTP request for PutObject Part Object endpoint.
reqV2, err = newTestSignedRequestV2("GET", reqV2, err = newTestSignedRequestV2("GET",
getListMultipartURLWithParams("", bucketName, testObject, uploadID, test.maxParts, test.partNumberMarker, ""), getListMultipartURLWithParams("", bucketName, testObject, uploadID, test.maxParts, test.partNumberMarker, ""),
0, nil, credentials.AccessKey, credentials.SecretKey) 0, nil, credentials.AccessKey, credentials.SecretKey, nil)
if err != nil { if err != nil {
t.Fatalf("Failed to create a V2 signed request to list object parts for %s/%s: <ERROR> %v.", t.Fatalf("Failed to create a V2 signed request to list object parts for %s/%s: <ERROR> %v.",
@ -3522,7 +3851,7 @@ func testAPIListObjectPartsHandler(obj ObjectLayer, instanceType, bucketName str
nilReq, err := newTestSignedRequestV4("GET", nilReq, err := newTestSignedRequestV4("GET",
getListMultipartURLWithParams("", nilBucket, nilObject, "dummy-uploadID", "0", "0", ""), getListMultipartURLWithParams("", nilBucket, nilObject, "dummy-uploadID", "0", "0", ""),
0, nil, "", "") 0, nil, "", "", nil)
if err != nil { if err != nil {
t.Errorf("Minio %s:Failed to create http request for testing the response when object Layer is set to `nil`.", instanceType) t.Errorf("Minio %s:Failed to create http request for testing the response when object Layer is set to `nil`.", instanceType)
} }

View File

@ -32,6 +32,7 @@ import (
"encoding/hex" "encoding/hex"
"encoding/json" "encoding/json"
"encoding/pem" "encoding/pem"
"encoding/xml"
"errors" "errors"
"fmt" "fmt"
"io" "io"
@ -1110,9 +1111,9 @@ const (
func newTestSignedRequest(method, urlStr string, contentLength int64, body io.ReadSeeker, accessKey, secretKey string, signer signerType) (*http.Request, error) { func newTestSignedRequest(method, urlStr string, contentLength int64, body io.ReadSeeker, accessKey, secretKey string, signer signerType) (*http.Request, error) {
if signer == signerV2 { if signer == signerV2 {
return newTestSignedRequestV2(method, urlStr, contentLength, body, accessKey, secretKey) return newTestSignedRequestV2(method, urlStr, contentLength, body, accessKey, secretKey, nil)
} }
return newTestSignedRequestV4(method, urlStr, contentLength, body, accessKey, secretKey) return newTestSignedRequestV4(method, urlStr, contentLength, body, accessKey, secretKey, nil)
} }
// Returns request with correct signature but with incorrect SHA256. // Returns request with correct signature but with incorrect SHA256.
@ -1139,7 +1140,7 @@ func newTestSignedBadSHARequest(method, urlStr string, contentLength int64, body
} }
// Returns new HTTP request object signed with signature v2. // Returns new HTTP request object signed with signature v2.
func newTestSignedRequestV2(method, urlStr string, contentLength int64, body io.ReadSeeker, accessKey, secretKey string) (*http.Request, error) { func newTestSignedRequestV2(method, urlStr string, contentLength int64, body io.ReadSeeker, accessKey, secretKey string, headers map[string]string) (*http.Request, error) {
req, err := newTestRequest(method, urlStr, contentLength, body) req, err := newTestRequest(method, urlStr, contentLength, body)
if err != nil { if err != nil {
return nil, err return nil, err
@ -1151,6 +1152,10 @@ func newTestSignedRequestV2(method, urlStr string, contentLength int64, body io.
return req, nil return req, nil
} }
for k, v := range headers {
req.Header.Add(k, v)
}
err = signRequestV2(req, accessKey, secretKey) err = signRequestV2(req, accessKey, secretKey)
if err != nil { if err != nil {
return nil, err return nil, err
@ -1160,7 +1165,7 @@ func newTestSignedRequestV2(method, urlStr string, contentLength int64, body io.
} }
// Returns new HTTP request object signed with signature v4. // Returns new HTTP request object signed with signature v4.
func newTestSignedRequestV4(method, urlStr string, contentLength int64, body io.ReadSeeker, accessKey, secretKey string) (*http.Request, error) { func newTestSignedRequestV4(method, urlStr string, contentLength int64, body io.ReadSeeker, accessKey, secretKey string, headers map[string]string) (*http.Request, error) {
req, err := newTestRequest(method, urlStr, contentLength, body) req, err := newTestRequest(method, urlStr, contentLength, body)
if err != nil { if err != nil {
return nil, err return nil, err
@ -1171,6 +1176,10 @@ func newTestSignedRequestV4(method, urlStr string, contentLength int64, body io.
return req, nil return req, nil
} }
for k, v := range headers {
req.Header.Add(k, v)
}
err = signRequestV4(req, accessKey, secretKey) err = signRequestV4(req, accessKey, secretKey)
if err != nil { if err != nil {
return nil, err return nil, err
@ -2332,3 +2341,101 @@ func TestToErrIsNil(t *testing.T) {
t.Errorf("Test expected error code to be ErrNone, failed instead provided %d", toAPIErrorCode(nil)) t.Errorf("Test expected error code to be ErrNone, failed instead provided %d", toAPIErrorCode(nil))
} }
} }
// Uploads an object using DummyDataGen directly via the http
// handler. Each part in a multipart object is a new DummyDataGen
// instance (so the part sizes are needed to reconstruct the whole
// object). When `len(partSizes) == 1`, asMultipart is used to upload
// the object as multipart with 1 part or as a regular single object.
//
// All upload failures are considered test errors - this function is
// intended as a helper for other tests.
func uploadTestObject(t *testing.T, apiRouter http.Handler, creds auth.Credentials, bucketName, objectName string,
partSizes []int64, metadata map[string]string, asMultipart bool) {
if len(partSizes) == 0 {
t.Fatalf("Cannot upload an object without part sizes")
}
if len(partSizes) > 1 {
asMultipart = true
}
checkRespErr := func(rec *httptest.ResponseRecorder, exp int) {
if rec.Code != exp {
b, err := ioutil.ReadAll(rec.Body)
t.Fatalf("Expected: %v, Got: %v, Body: %s, err: %v", exp, rec.Code, string(b), err)
}
}
if !asMultipart {
srcData := NewDummyDataGen(partSizes[0], 0)
req, err := newTestSignedRequestV4("PUT", getPutObjectURL("", bucketName, objectName),
partSizes[0], srcData, creds.AccessKey, creds.SecretKey, metadata)
if err != nil {
t.Fatalf("Unexpected err: %#v", err)
}
rec := httptest.NewRecorder()
apiRouter.ServeHTTP(rec, req)
checkRespErr(rec, http.StatusOK)
} else {
// Multipart upload - each part is a new DummyDataGen
// (so the part lengths are required to verify the
// object when reading).
// Initiate mp upload
reqI, err := newTestSignedRequestV4("POST", getNewMultipartURL("", bucketName, objectName),
0, nil, creds.AccessKey, creds.SecretKey, metadata)
if err != nil {
t.Fatalf("Unexpected err: %#v", err)
}
rec := httptest.NewRecorder()
apiRouter.ServeHTTP(rec, reqI)
checkRespErr(rec, http.StatusOK)
decoder := xml.NewDecoder(rec.Body)
multipartResponse := &InitiateMultipartUploadResponse{}
err = decoder.Decode(multipartResponse)
if err != nil {
t.Fatalf("Error decoding the recorded response Body")
}
upID := multipartResponse.UploadID
// Upload each part
var cp []CompletePart
cumulativeSum := int64(0)
for i, partLen := range partSizes {
partID := i + 1
partSrc := NewDummyDataGen(partLen, cumulativeSum)
cumulativeSum += partLen
req, errP := newTestSignedRequestV4("PUT",
getPutObjectPartURL("", bucketName, objectName, upID, fmt.Sprintf("%d", partID)),
partLen, partSrc, creds.AccessKey, creds.SecretKey, metadata)
if errP != nil {
t.Fatalf("Unexpected err: %#v", errP)
}
rec = httptest.NewRecorder()
apiRouter.ServeHTTP(rec, req)
checkRespErr(rec, http.StatusOK)
etag := rec.Header().Get("ETag")
if etag == "" {
t.Fatalf("Unexpected empty etag")
}
cp = append(cp, CompletePart{partID, etag[1 : len(etag)-1]})
}
// Call CompleteMultipart API
compMpBody, err := xml.Marshal(CompleteMultipartUpload{Parts: cp})
if err != nil {
t.Fatalf("Unexpected err: %#v", err)
}
reqC, errP := newTestSignedRequestV4("POST",
getCompleteMultipartUploadURL("", bucketName, objectName, upID),
int64(len(compMpBody)), bytes.NewReader(compMpBody),
creds.AccessKey, creds.SecretKey, metadata)
if errP != nil {
t.Fatalf("Unexpected err: %#v", errP)
}
rec = httptest.NewRecorder()
apiRouter.ServeHTTP(rec, reqC)
checkRespErr(rec, http.StatusOK)
}
}

View File

@ -718,14 +718,17 @@ func (web *webAPIHandlers) Download(w http.ResponseWriter, r *http.Request) {
return return
} }
length := objInfo.Size
if objectAPI.IsEncryptionSupported() { if objectAPI.IsEncryptionSupported() {
if _, err = DecryptObjectInfo(&objInfo, r.Header); err != nil { if _, err = DecryptObjectInfo(objInfo, r.Header); err != nil {
writeWebErrorResponse(w, err) writeWebErrorResponse(w, err)
return return
} }
if crypto.IsEncrypted(objInfo.UserDefined) {
length, _ = objInfo.DecryptedSize()
}
} }
var startOffset int64 var startOffset int64
length := objInfo.Size
var writer io.Writer var writer io.Writer
writer = w writer = w
if objectAPI.IsEncryptionSupported() && crypto.S3.IsEncrypted(objInfo.UserDefined) { if objectAPI.IsEncryptionSupported() && crypto.S3.IsEncrypted(objInfo.UserDefined) {
@ -822,17 +825,21 @@ func (web *webAPIHandlers) DownloadZip(w http.ResponseWriter, r *http.Request) {
if err != nil { if err != nil {
return err return err
} }
length := info.Size
if objectAPI.IsEncryptionSupported() { if objectAPI.IsEncryptionSupported() {
if _, err = DecryptObjectInfo(&info, r.Header); err != nil { if _, err = DecryptObjectInfo(info, r.Header); err != nil {
writeWebErrorResponse(w, err) writeWebErrorResponse(w, err)
return err return err
} }
if crypto.IsEncrypted(info.UserDefined) {
length, _ = info.DecryptedSize()
}
} }
header := &zip.FileHeader{ header := &zip.FileHeader{
Name: strings.TrimPrefix(objectName, args.Prefix), Name: strings.TrimPrefix(objectName, args.Prefix),
Method: zip.Deflate, Method: zip.Deflate,
UncompressedSize64: uint64(info.Size), UncompressedSize64: uint64(length),
UncompressedSize: uint32(info.Size), UncompressedSize: uint32(length),
} }
wr, err := archive.CreateHeader(header) wr, err := archive.CreateHeader(header)
if err != nil { if err != nil {
@ -840,7 +847,6 @@ func (web *webAPIHandlers) DownloadZip(w http.ResponseWriter, r *http.Request) {
return err return err
} }
var startOffset int64 var startOffset int64
length := info.Size
var writer io.Writer var writer io.Writer
writer = wr writer = wr
if objectAPI.IsEncryptionSupported() && crypto.S3.IsEncrypted(info.UserDefined) { if objectAPI.IsEncryptionSupported() && crypto.S3.IsEncrypted(info.UserDefined) {

View File

@ -21,6 +21,7 @@ import (
"fmt" "fmt"
"hash/crc32" "hash/crc32"
"io" "io"
"net/http"
"sort" "sort"
"strings" "strings"
"sync" "sync"
@ -578,6 +579,11 @@ func (s *xlSets) ListBuckets(ctx context.Context) (buckets []BucketInfo, err err
// --- Object Operations --- // --- Object Operations ---
// GetObjectNInfo - returns object info and locked object ReadCloser
func (s *xlSets) GetObjectNInfo(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, h http.Header) (gr *GetObjectReader, err error) {
return s.getHashedSet(object).GetObjectNInfo(ctx, bucket, object, rs, h)
}
// GetObject - reads an object from the hashedSet based on the object name. // GetObject - reads an object from the hashedSet based on the object name.
func (s *xlSets) GetObject(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, etag string, opts ObjectOptions) error { func (s *xlSets) GetObject(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, etag string, opts ObjectOptions) error {
return s.getHashedSet(object).GetObject(ctx, bucket, object, startOffset, length, writer, etag, opts) return s.getHashedSet(object).GetObject(ctx, bucket, object, startOffset, length, writer, etag, opts)

View File

@ -17,9 +17,11 @@
package cmd package cmd
import ( import (
"bytes"
"context" "context"
"encoding/hex" "encoding/hex"
"io" "io"
"net/http"
"path" "path"
"strconv" "strconv"
"strings" "strings"
@ -162,6 +164,57 @@ func (xl xlObjects) CopyObject(ctx context.Context, srcBucket, srcObject, dstBuc
return objInfo, nil return objInfo, nil
} }
// GetObjectNInfo - returns object info and an object
// Read(Closer). When err != nil, the returned reader is always nil.
func (xl xlObjects) GetObjectNInfo(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, h http.Header) (gr *GetObjectReader, err error) {
// Acquire lock
lock := xl.nsMutex.NewNSLock(bucket, object)
if err = lock.GetRLock(globalObjectTimeout); err != nil {
return nil, err
}
nsUnlocker := lock.RUnlock
if err = checkGetObjArgs(ctx, bucket, object); err != nil {
nsUnlocker()
return nil, err
}
// Handler directory request by returning a reader that
// returns no bytes.
if hasSuffix(object, slashSeparator) {
if !xl.isObjectDir(bucket, object) {
nsUnlocker()
return nil, toObjectErr(errFileNotFound, bucket, object)
}
var objInfo ObjectInfo
if objInfo, err = xl.getObjectInfoDir(ctx, bucket, object); err != nil {
nsUnlocker()
return nil, toObjectErr(err, bucket, object)
}
return NewGetObjectReaderFromReader(bytes.NewBuffer(nil), objInfo, nsUnlocker), nil
}
var objInfo ObjectInfo
objInfo, err = xl.getObjectInfo(ctx, bucket, object)
if err != nil {
nsUnlocker()
return nil, toObjectErr(err, bucket, object)
}
fn, off, length, nErr := NewGetObjectReader(rs, objInfo, nsUnlocker)
if nErr != nil {
return nil, nErr
}
pr, pw := io.Pipe()
go func() {
err := xl.getObject(ctx, bucket, object, off, length, pw, "", ObjectOptions{})
pw.CloseWithError(err)
}()
return fn(pr, h)
}
// GetObject - reads an object erasured coded across multiple // GetObject - reads an object erasured coded across multiple
// disks. Supports additional parameters like offset and length // disks. Supports additional parameters like offset and length
// which are synonymous with HTTP Range requests. // which are synonymous with HTTP Range requests.
@ -517,6 +570,7 @@ func (xl xlObjects) PutObject(ctx context.Context, bucket string, object string,
if err = checkPutObjectArgs(ctx, bucket, object, xl, data.Size()); err != nil { if err = checkPutObjectArgs(ctx, bucket, object, xl, data.Size()); err != nil {
return ObjectInfo{}, err return ObjectInfo{}, err
} }
// Lock the object. // Lock the object.
objectLock := xl.nsMutex.NewNSLock(bucket, object) objectLock := xl.nsMutex.NewNSLock(bucket, object)
if err := objectLock.GetLock(globalObjectTimeout); err != nil { if err := objectLock.GetLock(globalObjectTimeout); err != nil {

View File

@ -126,3 +126,34 @@ func (nopCloser) Close() error { return nil }
func NopCloser(w io.Writer) io.WriteCloser { func NopCloser(w io.Writer) io.WriteCloser {
return nopCloser{w} return nopCloser{w}
} }
// SkipReader skips a given number of bytes and then returns all
// remaining data.
type SkipReader struct {
io.Reader
skipCount int64
}
func (s *SkipReader) Read(p []byte) (int, error) {
l := int64(len(p))
if l == 0 {
return 0, nil
}
for s.skipCount > 0 {
if l > s.skipCount {
l = s.skipCount
}
n, err := s.Reader.Read(p[:l])
if err != nil {
return 0, err
}
s.skipCount -= int64(n)
}
return s.Reader.Read(p)
}
// NewSkipReader - creates a SkipReader
func NewSkipReader(r io.Reader, n int64) io.Reader {
return &SkipReader{r, n}
}

View File

@ -17,6 +17,8 @@
package ioutil package ioutil
import ( import (
"bytes"
"io"
goioutil "io/ioutil" goioutil "io/ioutil"
"os" "os"
"testing" "testing"
@ -73,3 +75,29 @@ func TestAppendFile(t *testing.T) {
t.Errorf("AppendFile() failed, expected: %s, got %s", expected, string(b)) t.Errorf("AppendFile() failed, expected: %s, got %s", expected, string(b))
} }
} }
func TestSkipReader(t *testing.T) {
testCases := []struct {
src io.Reader
skipLen int64
expected string
}{
{bytes.NewBuffer([]byte("")), 0, ""},
{bytes.NewBuffer([]byte("")), 1, ""},
{bytes.NewBuffer([]byte("abc")), 0, "abc"},
{bytes.NewBuffer([]byte("abc")), 1, "bc"},
{bytes.NewBuffer([]byte("abc")), 2, "c"},
{bytes.NewBuffer([]byte("abc")), 3, ""},
{bytes.NewBuffer([]byte("abc")), 4, ""},
}
for i, testCase := range testCases {
r := NewSkipReader(testCase.src, testCase.skipLen)
b, err := goioutil.ReadAll(r)
if err != nil {
t.Errorf("Case %d: Unexpected err %v", i, err)
}
if string(b) != testCase.expected {
t.Errorf("Case %d: Got wrong result: %v", i, string(b))
}
}
}