mirror of
https://github.com/minio/minio.git
synced 2024-12-23 21:55:53 -05:00
objectAPI: Fix object API interface, remove unnecessary structs.
ObjectAPI changes. ``` ListObjects(bucket, prefix, marker, delimiter string, maxKeys int) (ListObjectsInfo, *probe.Error) ListMultipartUploads(bucket, objectPrefix, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (ListMultipartsInfo, *probe.Error) ListObjectParts(bucket, object, uploadID string, partNumberMarker, maxParts int) (ListPartsInfo, *probe.Error) CompleteMultipartUpload(bucket string, object string, uploadID string, parts []completePart) (ObjectInfo, *probe.Error) ```
This commit is contained in:
parent
12515eabe2
commit
0479d4976b
@ -58,20 +58,20 @@ func encodeResponse(response interface{}) []byte {
|
||||
}
|
||||
|
||||
// Write object header
|
||||
func setObjectHeaders(w http.ResponseWriter, objectInfo ObjectInfo, contentRange *httpRange) {
|
||||
func setObjectHeaders(w http.ResponseWriter, objInfo ObjectInfo, contentRange *httpRange) {
|
||||
// set common headers
|
||||
setCommonHeaders(w)
|
||||
|
||||
// set object-related metadata headers
|
||||
lastModified := objectInfo.ModifiedTime.UTC().Format(http.TimeFormat)
|
||||
lastModified := objInfo.ModifiedTime.UTC().Format(http.TimeFormat)
|
||||
w.Header().Set("Last-Modified", lastModified)
|
||||
|
||||
w.Header().Set("Content-Type", objectInfo.ContentType)
|
||||
if objectInfo.MD5Sum != "" {
|
||||
w.Header().Set("ETag", "\""+objectInfo.MD5Sum+"\"")
|
||||
w.Header().Set("Content-Type", objInfo.ContentType)
|
||||
if objInfo.MD5Sum != "" {
|
||||
w.Header().Set("ETag", "\""+objInfo.MD5Sum+"\"")
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Length", strconv.FormatInt(objectInfo.Size, 10))
|
||||
w.Header().Set("Content-Length", strconv.FormatInt(objInfo.Size, 10))
|
||||
|
||||
// for providing ranged content
|
||||
if contentRange != nil {
|
||||
|
@ -21,7 +21,7 @@ import (
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// parse bucket url queries
|
||||
// Parse bucket url queries
|
||||
func getBucketResources(values url.Values) (prefix, marker, delimiter string, maxkeys int, encodingType string) {
|
||||
prefix = values.Get("prefix")
|
||||
marker = values.Get("marker")
|
||||
@ -31,27 +31,29 @@ func getBucketResources(values url.Values) (prefix, marker, delimiter string, ma
|
||||
return
|
||||
}
|
||||
|
||||
// part bucket url queries for ?uploads
|
||||
func getBucketMultipartResources(values url.Values) (v BucketMultipartResourcesMetadata) {
|
||||
v.Prefix = values.Get("prefix")
|
||||
v.KeyMarker = values.Get("key-marker")
|
||||
v.MaxUploads, _ = strconv.Atoi(values.Get("max-uploads"))
|
||||
v.Delimiter = values.Get("delimiter")
|
||||
v.EncodingType = values.Get("encoding-type")
|
||||
v.UploadIDMarker = values.Get("upload-id-marker")
|
||||
// Parse bucket url queries for ?uploads
|
||||
func getBucketMultipartResources(values url.Values) (prefix, keyMarker, uploadIDMarker, delimiter string, maxUploads int, encodingType string) {
|
||||
|
||||
prefix = values.Get("prefix")
|
||||
keyMarker = values.Get("key-marker")
|
||||
uploadIDMarker = values.Get("upload-id-marker")
|
||||
delimiter = values.Get("delimiter")
|
||||
maxUploads, _ = strconv.Atoi(values.Get("max-uploads"))
|
||||
encodingType = values.Get("encoding-type")
|
||||
return
|
||||
}
|
||||
|
||||
// parse object url queries
|
||||
func getObjectResources(values url.Values) (v ObjectResourcesMetadata) {
|
||||
v.UploadID = values.Get("uploadId")
|
||||
v.PartNumberMarker, _ = strconv.Atoi(values.Get("part-number-marker"))
|
||||
v.MaxParts, _ = strconv.Atoi(values.Get("max-parts"))
|
||||
v.EncodingType = values.Get("encoding-type")
|
||||
// Parse object url queries
|
||||
func getObjectResources(values url.Values) (uploadID string, partNumberMarker, maxParts int, encodingType string) {
|
||||
uploadID = values.Get("uploadId")
|
||||
partNumberMarker, _ = strconv.Atoi(values.Get("part-number-marker"))
|
||||
maxParts, _ = strconv.Atoi(values.Get("max-parts"))
|
||||
encodingType = values.Get("encoding-type")
|
||||
return
|
||||
}
|
||||
|
||||
// get upload id.
|
||||
// Get upload id.
|
||||
func getUploadID(values url.Values) (uploadID string) {
|
||||
return getObjectResources(values).UploadID
|
||||
uploadID, _, _, _ = getObjectResources(values)
|
||||
return
|
||||
}
|
||||
|
@ -245,7 +245,7 @@ func generateListBucketsResponse(buckets []BucketInfo) ListBucketsResponse {
|
||||
}
|
||||
|
||||
// generates an ListObjects response for the said bucket with other enumerated options.
|
||||
func generateListObjectsResponse(bucket, prefix, marker, delimiter string, maxKeys int, resp ListObjectsResult) ListObjectsResponse {
|
||||
func generateListObjectsResponse(bucket, prefix, marker, delimiter string, maxKeys int, resp ListObjectsInfo) ListObjectsResponse {
|
||||
var contents []Object
|
||||
var prefixes []CommonPrefix
|
||||
var owner = Owner{}
|
||||
@ -317,25 +317,25 @@ func generateCompleteMultpartUploadResponse(bucket, key, location, etag string)
|
||||
}
|
||||
|
||||
// generateListPartsResult
|
||||
func generateListPartsResponse(objectMetadata ObjectResourcesMetadata) ListPartsResponse {
|
||||
func generateListPartsResponse(partsInfo ListPartsInfo) ListPartsResponse {
|
||||
// TODO - support EncodingType in xml decoding
|
||||
listPartsResponse := ListPartsResponse{}
|
||||
listPartsResponse.Bucket = objectMetadata.Bucket
|
||||
listPartsResponse.Key = objectMetadata.Object
|
||||
listPartsResponse.UploadID = objectMetadata.UploadID
|
||||
listPartsResponse.Bucket = partsInfo.Bucket
|
||||
listPartsResponse.Key = partsInfo.Object
|
||||
listPartsResponse.UploadID = partsInfo.UploadID
|
||||
listPartsResponse.StorageClass = "STANDARD"
|
||||
listPartsResponse.Initiator.ID = "minio"
|
||||
listPartsResponse.Initiator.DisplayName = "minio"
|
||||
listPartsResponse.Owner.ID = "minio"
|
||||
listPartsResponse.Owner.DisplayName = "minio"
|
||||
|
||||
listPartsResponse.MaxParts = objectMetadata.MaxParts
|
||||
listPartsResponse.PartNumberMarker = objectMetadata.PartNumberMarker
|
||||
listPartsResponse.IsTruncated = objectMetadata.IsTruncated
|
||||
listPartsResponse.NextPartNumberMarker = objectMetadata.NextPartNumberMarker
|
||||
listPartsResponse.MaxParts = partsInfo.MaxParts
|
||||
listPartsResponse.PartNumberMarker = partsInfo.PartNumberMarker
|
||||
listPartsResponse.IsTruncated = partsInfo.IsTruncated
|
||||
listPartsResponse.NextPartNumberMarker = partsInfo.NextPartNumberMarker
|
||||
|
||||
listPartsResponse.Parts = make([]Part, len(objectMetadata.Part))
|
||||
for index, part := range objectMetadata.Part {
|
||||
listPartsResponse.Parts = make([]Part, len(partsInfo.Parts))
|
||||
for index, part := range partsInfo.Parts {
|
||||
newPart := Part{}
|
||||
newPart.PartNumber = part.PartNumber
|
||||
newPart.ETag = "\"" + part.ETag + "\""
|
||||
@ -347,21 +347,21 @@ func generateListPartsResponse(objectMetadata ObjectResourcesMetadata) ListParts
|
||||
}
|
||||
|
||||
// generateListMultipartUploadsResponse
|
||||
func generateListMultipartUploadsResponse(bucket string, metadata BucketMultipartResourcesMetadata) ListMultipartUploadsResponse {
|
||||
func generateListMultipartUploadsResponse(bucket string, multipartsInfo ListMultipartsInfo) ListMultipartUploadsResponse {
|
||||
listMultipartUploadsResponse := ListMultipartUploadsResponse{}
|
||||
listMultipartUploadsResponse.Bucket = bucket
|
||||
listMultipartUploadsResponse.Delimiter = metadata.Delimiter
|
||||
listMultipartUploadsResponse.IsTruncated = metadata.IsTruncated
|
||||
listMultipartUploadsResponse.EncodingType = metadata.EncodingType
|
||||
listMultipartUploadsResponse.Prefix = metadata.Prefix
|
||||
listMultipartUploadsResponse.KeyMarker = metadata.KeyMarker
|
||||
listMultipartUploadsResponse.NextKeyMarker = metadata.NextKeyMarker
|
||||
listMultipartUploadsResponse.MaxUploads = metadata.MaxUploads
|
||||
listMultipartUploadsResponse.NextUploadIDMarker = metadata.NextUploadIDMarker
|
||||
listMultipartUploadsResponse.UploadIDMarker = metadata.UploadIDMarker
|
||||
listMultipartUploadsResponse.Delimiter = multipartsInfo.Delimiter
|
||||
listMultipartUploadsResponse.IsTruncated = multipartsInfo.IsTruncated
|
||||
listMultipartUploadsResponse.EncodingType = multipartsInfo.EncodingType
|
||||
listMultipartUploadsResponse.Prefix = multipartsInfo.Prefix
|
||||
listMultipartUploadsResponse.KeyMarker = multipartsInfo.KeyMarker
|
||||
listMultipartUploadsResponse.NextKeyMarker = multipartsInfo.NextKeyMarker
|
||||
listMultipartUploadsResponse.MaxUploads = multipartsInfo.MaxUploads
|
||||
listMultipartUploadsResponse.NextUploadIDMarker = multipartsInfo.NextUploadIDMarker
|
||||
listMultipartUploadsResponse.UploadIDMarker = multipartsInfo.UploadIDMarker
|
||||
|
||||
listMultipartUploadsResponse.Uploads = make([]Upload, len(metadata.Upload))
|
||||
for index, upload := range metadata.Upload {
|
||||
listMultipartUploadsResponse.Uploads = make([]Upload, len(multipartsInfo.Uploads))
|
||||
for index, upload := range multipartsInfo.Uploads {
|
||||
newUpload := Upload{}
|
||||
newUpload.UploadID = upload.UploadID
|
||||
newUpload.Key = upload.Object
|
||||
|
@ -174,16 +174,16 @@ func (api objectStorageAPI) ListMultipartUploadsHandler(w http.ResponseWriter, r
|
||||
}
|
||||
}
|
||||
|
||||
resources := getBucketMultipartResources(r.URL.Query())
|
||||
if resources.MaxUploads < 0 {
|
||||
prefix, keyMarker, uploadIDMarker, delimiter, maxUploads, _ := getBucketMultipartResources(r.URL.Query())
|
||||
if maxUploads < 0 {
|
||||
writeErrorResponse(w, r, ErrInvalidMaxUploads, r.URL.Path)
|
||||
return
|
||||
}
|
||||
if resources.MaxUploads == 0 {
|
||||
resources.MaxUploads = maxObjectList
|
||||
if maxUploads == 0 {
|
||||
maxUploads = maxObjectList
|
||||
}
|
||||
|
||||
resources, err := api.ObjectAPI.ListMultipartUploads(bucket, resources)
|
||||
listMultipartsInfo, err := api.ObjectAPI.ListMultipartUploads(bucket, prefix, keyMarker, uploadIDMarker, delimiter, maxUploads)
|
||||
if err != nil {
|
||||
errorIf(err.Trace(), "ListMultipartUploads failed.", nil)
|
||||
switch err.ToGoError().(type) {
|
||||
@ -195,7 +195,7 @@ func (api objectStorageAPI) ListMultipartUploadsHandler(w http.ResponseWriter, r
|
||||
return
|
||||
}
|
||||
// generate response
|
||||
response := generateListMultipartUploadsResponse(bucket, resources)
|
||||
response := generateListMultipartUploadsResponse(bucket, listMultipartsInfo)
|
||||
encodedSuccessResponse := encodeResponse(response)
|
||||
// write headers.
|
||||
setCommonHeaders(w)
|
||||
@ -241,10 +241,10 @@ func (api objectStorageAPI) ListObjectsHandler(w http.ResponseWriter, r *http.Re
|
||||
maxkeys = maxObjectList
|
||||
}
|
||||
|
||||
listResp, err := api.ObjectAPI.ListObjects(bucket, prefix, marker, delimiter, maxkeys)
|
||||
listObjectsInfo, err := api.ObjectAPI.ListObjects(bucket, prefix, marker, delimiter, maxkeys)
|
||||
if err == nil {
|
||||
// generate response
|
||||
response := generateListObjectsResponse(bucket, prefix, marker, delimiter, maxkeys, listResp)
|
||||
response := generateListObjectsResponse(bucket, prefix, marker, delimiter, maxkeys, listObjectsInfo)
|
||||
encodedSuccessResponse := encodeResponse(response)
|
||||
// Write headers
|
||||
setCommonHeaders(w)
|
||||
@ -306,10 +306,10 @@ func (api objectStorageAPI) ListBucketsHandler(w http.ResponseWriter, r *http.Re
|
||||
}
|
||||
}
|
||||
|
||||
buckets, err := api.ObjectAPI.ListBuckets()
|
||||
bucketsInfo, err := api.ObjectAPI.ListBuckets()
|
||||
if err == nil {
|
||||
// generate response
|
||||
response := generateListBucketsResponse(buckets)
|
||||
response := generateListBucketsResponse(bucketsInfo)
|
||||
encodedSuccessResponse := encodeResponse(response)
|
||||
// write headers
|
||||
setCommonHeaders(w)
|
||||
@ -528,7 +528,7 @@ func (api objectStorageAPI) PostPolicyBucketHandler(w http.ResponseWriter, r *ht
|
||||
writeErrorResponse(w, r, apiErr, r.URL.Path)
|
||||
return
|
||||
}
|
||||
objectInfo, err := api.ObjectAPI.PutObject(bucket, object, -1, fileBody, nil)
|
||||
objInfo, err := api.ObjectAPI.PutObject(bucket, object, -1, fileBody, nil)
|
||||
if err != nil {
|
||||
errorIf(err.Trace(), "PutObject failed.", nil)
|
||||
switch err.ToGoError().(type) {
|
||||
@ -547,8 +547,8 @@ func (api objectStorageAPI) PostPolicyBucketHandler(w http.ResponseWriter, r *ht
|
||||
}
|
||||
return
|
||||
}
|
||||
if objectInfo.MD5Sum != "" {
|
||||
w.Header().Set("ETag", "\""+objectInfo.MD5Sum+"\"")
|
||||
if objInfo.MD5Sum != "" {
|
||||
w.Header().Set("ETag", "\""+objInfo.MD5Sum+"\"")
|
||||
}
|
||||
writeSuccessResponse(w, nil)
|
||||
}
|
||||
|
@ -23,15 +23,14 @@ import (
|
||||
|
||||
var multipartsMetadataPath string
|
||||
|
||||
// SetFSMultipartsMetadataPath - set custom multiparts session
|
||||
// metadata path.
|
||||
// SetFSMultipartsMetadataPath - set custom multiparts session metadata path.
|
||||
func setFSMultipartsMetadataPath(metadataPath string) {
|
||||
multipartsMetadataPath = metadataPath
|
||||
}
|
||||
|
||||
// saveMultipartsSession - save multiparts
|
||||
func saveMultipartsSession(multiparts Multiparts) *probe.Error {
|
||||
qc, err := quick.New(multiparts)
|
||||
// saveMultipartsSession - save multiparts.
|
||||
func saveMultipartsSession(mparts multiparts) *probe.Error {
|
||||
qc, err := quick.New(mparts)
|
||||
if err != nil {
|
||||
return err.Trace()
|
||||
}
|
||||
@ -41,17 +40,17 @@ func saveMultipartsSession(multiparts Multiparts) *probe.Error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// loadMultipartsSession load multipart session file
|
||||
func loadMultipartsSession() (*Multiparts, *probe.Error) {
|
||||
multiparts := &Multiparts{}
|
||||
multiparts.Version = "1"
|
||||
multiparts.ActiveSession = make(map[string]*MultipartSession)
|
||||
qc, err := quick.New(multiparts)
|
||||
// loadMultipartsSession load multipart session file.
|
||||
func loadMultipartsSession() (*multiparts, *probe.Error) {
|
||||
mparts := &multiparts{}
|
||||
mparts.Version = "1"
|
||||
mparts.ActiveSession = make(map[string]*multipartSession)
|
||||
qc, err := quick.New(mparts)
|
||||
if err != nil {
|
||||
return nil, err.Trace()
|
||||
}
|
||||
if err := qc.Load(multipartsMetadataPath); err != nil {
|
||||
return nil, err.Trace()
|
||||
}
|
||||
return qc.Data().(*Multiparts), nil
|
||||
return qc.Data().(*multiparts), nil
|
||||
}
|
||||
|
@ -29,8 +29,8 @@ import (
|
||||
|
||||
// ListObjects - lists all objects for a given prefix, returns up to
|
||||
// maxKeys number of objects per call.
|
||||
func (fs Filesystem) ListObjects(bucket, prefix, marker, delimiter string, maxKeys int) (ListObjectsResult, *probe.Error) {
|
||||
result := ListObjectsResult{}
|
||||
func (fs Filesystem) ListObjects(bucket, prefix, marker, delimiter string, maxKeys int) (ListObjectsInfo, *probe.Error) {
|
||||
result := ListObjectsInfo{}
|
||||
var queryPrefix string
|
||||
|
||||
// Input validation.
|
||||
@ -41,15 +41,15 @@ func (fs Filesystem) ListObjects(bucket, prefix, marker, delimiter string, maxKe
|
||||
bucket = getActualBucketname(fs.path, bucket) // Get the right bucket name.
|
||||
bucketDir := filepath.Join(fs.path, bucket)
|
||||
// Verify if bucket exists.
|
||||
if status, err := isDirExist(bucketDir); !status {
|
||||
if err == nil {
|
||||
if status, e := isDirExist(bucketDir); !status {
|
||||
if e == nil {
|
||||
// File exists, but its not a directory.
|
||||
return result, probe.NewError(BucketNotFound{Bucket: bucket})
|
||||
} else if os.IsNotExist(err) {
|
||||
} else if os.IsNotExist(e) {
|
||||
// File does not exist.
|
||||
return result, probe.NewError(BucketNotFound{Bucket: bucket})
|
||||
} else {
|
||||
return result, probe.NewError(err)
|
||||
return result, probe.NewError(e)
|
||||
}
|
||||
}
|
||||
if !IsValidObjectPrefix(prefix) {
|
||||
@ -88,15 +88,15 @@ func (fs Filesystem) ListObjects(bucket, prefix, marker, delimiter string, maxKe
|
||||
// Verify if prefix exists.
|
||||
prefixDir := filepath.Dir(filepath.FromSlash(prefix))
|
||||
rootDir := filepath.Join(bucketDir, prefixDir)
|
||||
_, err := isDirExist(rootDir)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
_, e := isDirExist(rootDir)
|
||||
if e != nil {
|
||||
if os.IsNotExist(e) {
|
||||
// Prefix does not exist, not an error just respond empty
|
||||
// list response.
|
||||
return result, nil
|
||||
}
|
||||
// Rest errors should be treated as failure.
|
||||
return result, probe.NewError(err)
|
||||
return result, probe.NewError(e)
|
||||
}
|
||||
|
||||
recursive := true
|
||||
@ -111,7 +111,7 @@ func (fs Filesystem) ListObjects(bucket, prefix, marker, delimiter string, maxKe
|
||||
// popListObjectCh returns nil if the call to ListObject is done for the first time.
|
||||
// On further calls to ListObjects to retrive more objects within the timeout period,
|
||||
// popListObjectCh returns the channel from which rest of the objects can be retrieved.
|
||||
objectInfoCh := fs.popListObjectCh(ListObjectParams{bucket, delimiter, marker, prefix})
|
||||
objectInfoCh := fs.popListObjectCh(listObjectParams{bucket, delimiter, marker, prefix})
|
||||
if objectInfoCh == nil {
|
||||
if prefix != "" {
|
||||
// queryPrefix variable is set to value of the prefix to be searched.
|
||||
@ -141,7 +141,7 @@ func (fs Filesystem) ListObjects(bucket, prefix, marker, delimiter string, maxKe
|
||||
}
|
||||
|
||||
if objInfo.Err != nil {
|
||||
return ListObjectsResult{}, probe.NewError(objInfo.Err)
|
||||
return ListObjectsInfo{}, probe.NewError(objInfo.Err)
|
||||
}
|
||||
|
||||
if strings.Contains(objInfo.Name, "$multiparts") || strings.Contains(objInfo.Name, "$tmpobject") {
|
||||
@ -171,7 +171,7 @@ func (fs Filesystem) ListObjects(bucket, prefix, marker, delimiter string, maxKe
|
||||
if !objectInfoCh.IsClosed() {
|
||||
result.IsTruncated = true
|
||||
result.NextMarker = nextMarker
|
||||
fs.pushListObjectCh(ListObjectParams{bucket, delimiter, nextMarker, prefix}, *objectInfoCh)
|
||||
fs.pushListObjectCh(listObjectParams{bucket, delimiter, nextMarker, prefix}, *objectInfoCh)
|
||||
}
|
||||
|
||||
return result, nil
|
||||
|
@ -95,7 +95,7 @@ func TestListObjects(t *testing.T) {
|
||||
// Formualting the result data set to be expected from ListObjects call inside the tests,
|
||||
// This will be used in testCases and used for asserting the correctness of ListObjects output in the tests.
|
||||
|
||||
resultCases := []ListObjectsResult{
|
||||
resultCases := []ListObjectsInfo{
|
||||
// ListObjectsResult-0.
|
||||
// Testing for listing all objects in the bucket, (testCase 20,21,22).
|
||||
{
|
||||
@ -428,44 +428,44 @@ func TestListObjects(t *testing.T) {
|
||||
delimeter string
|
||||
maxKeys int
|
||||
// Expected output of ListObjects.
|
||||
result ListObjectsResult
|
||||
result ListObjectsInfo
|
||||
err error
|
||||
// Flag indicating whether the test is expected to pass or not.
|
||||
shouldPass bool
|
||||
}{
|
||||
// Test cases with invalid bucket names ( Test number 1-4 ).
|
||||
{".test", "", "", "", 0, ListObjectsResult{}, BucketNameInvalid{Bucket: ".test"}, false},
|
||||
{"Test", "", "", "", 0, ListObjectsResult{}, BucketNameInvalid{Bucket: "Test"}, false},
|
||||
{"---", "", "", "", 0, ListObjectsResult{}, BucketNameInvalid{Bucket: "---"}, false},
|
||||
{"ad", "", "", "", 0, ListObjectsResult{}, BucketNameInvalid{Bucket: "ad"}, false},
|
||||
{".test", "", "", "", 0, ListObjectsInfo{}, BucketNameInvalid{Bucket: ".test"}, false},
|
||||
{"Test", "", "", "", 0, ListObjectsInfo{}, BucketNameInvalid{Bucket: "Test"}, false},
|
||||
{"---", "", "", "", 0, ListObjectsInfo{}, BucketNameInvalid{Bucket: "---"}, false},
|
||||
{"ad", "", "", "", 0, ListObjectsInfo{}, BucketNameInvalid{Bucket: "ad"}, false},
|
||||
// Using an existing file for bucket name, but its not a directory (5).
|
||||
{"simple-file.txt", "", "", "", 0, ListObjectsResult{}, BucketNotFound{Bucket: "simple-file.txt"}, false},
|
||||
{"simple-file.txt", "", "", "", 0, ListObjectsInfo{}, BucketNotFound{Bucket: "simple-file.txt"}, false},
|
||||
// Valid bucket names, but they donot exist (6-8).
|
||||
{"volatile-bucket-1", "", "", "", 0, ListObjectsResult{}, BucketNotFound{Bucket: "volatile-bucket-1"}, false},
|
||||
{"volatile-bucket-2", "", "", "", 0, ListObjectsResult{}, BucketNotFound{Bucket: "volatile-bucket-2"}, false},
|
||||
{"volatile-bucket-3", "", "", "", 0, ListObjectsResult{}, BucketNotFound{Bucket: "volatile-bucket-3"}, false},
|
||||
{"volatile-bucket-1", "", "", "", 0, ListObjectsInfo{}, BucketNotFound{Bucket: "volatile-bucket-1"}, false},
|
||||
{"volatile-bucket-2", "", "", "", 0, ListObjectsInfo{}, BucketNotFound{Bucket: "volatile-bucket-2"}, false},
|
||||
{"volatile-bucket-3", "", "", "", 0, ListObjectsInfo{}, BucketNotFound{Bucket: "volatile-bucket-3"}, false},
|
||||
// Valid, existing bucket, but sending invalid delimeter values (9-10).
|
||||
// Empty string < "" > and forward slash < / > are the ony two valid arguments for delimeter.
|
||||
{"test-bucket-list-object", "", "", "*", 0, ListObjectsResult{}, fmt.Errorf("delimiter '%s' is not supported", "*"), false},
|
||||
{"test-bucket-list-object", "", "", "-", 0, ListObjectsResult{}, fmt.Errorf("delimiter '%s' is not supported", "-"), false},
|
||||
{"test-bucket-list-object", "", "", "*", 0, ListObjectsInfo{}, fmt.Errorf("delimiter '%s' is not supported", "*"), false},
|
||||
{"test-bucket-list-object", "", "", "-", 0, ListObjectsInfo{}, fmt.Errorf("delimiter '%s' is not supported", "-"), false},
|
||||
// Marker goes through url QueryUnescape, sending inputs for which QueryUnescape would fail (11-12).
|
||||
// Here is how QueryUnescape behaves https://golang.org/pkg/net/url/#QueryUnescape.
|
||||
// QueryUnescape is necessasry since marker is provided as URL query parameter.
|
||||
{"test-bucket-list-object", "", "test%", "", 0, ListObjectsResult{}, fmt.Errorf("invalid URL escape"), false},
|
||||
{"test-bucket-list-object", "", "test%A", "", 0, ListObjectsResult{}, fmt.Errorf("invalid URL escape"), false},
|
||||
{"test-bucket-list-object", "", "test%", "", 0, ListObjectsInfo{}, fmt.Errorf("invalid URL escape"), false},
|
||||
{"test-bucket-list-object", "", "test%A", "", 0, ListObjectsInfo{}, fmt.Errorf("invalid URL escape"), false},
|
||||
// Testing for failure cases with both perfix and marker (13).
|
||||
// The prefix and marker combination to be valid it should satisy strings.HasPrefix(marker, prefix).
|
||||
{"test-bucket-list-object", "asia", "europe-object", "", 0, ListObjectsResult{}, fmt.Errorf("Invalid combination of marker '%s' and prefix '%s'", "europe-object", "asia"), false},
|
||||
{"test-bucket-list-object", "asia", "europe-object", "", 0, ListObjectsInfo{}, fmt.Errorf("Invalid combination of marker '%s' and prefix '%s'", "europe-object", "asia"), false},
|
||||
// Setting a non-existing directory to be prefix (14-15).
|
||||
{"empty-bucket", "europe/france/", "", "", 1, ListObjectsResult{}, nil, true},
|
||||
{"empty-bucket", "europe/tunisia/", "", "", 1, ListObjectsResult{}, nil, true},
|
||||
{"empty-bucket", "europe/france/", "", "", 1, ListObjectsInfo{}, nil, true},
|
||||
{"empty-bucket", "europe/tunisia/", "", "", 1, ListObjectsInfo{}, nil, true},
|
||||
// Testing on empty bucket, that is, bucket without any objects in it (16).
|
||||
{"empty-bucket", "", "", "", 0, ListObjectsResult{}, nil, true},
|
||||
{"empty-bucket", "", "", "", 0, ListObjectsInfo{}, nil, true},
|
||||
// Setting maxKeys to negative value (17-18).
|
||||
{"empty-bucket", "", "", "", -1, ListObjectsResult{}, nil, true},
|
||||
{"empty-bucket", "", "", "", 1, ListObjectsResult{}, nil, true},
|
||||
{"empty-bucket", "", "", "", -1, ListObjectsInfo{}, nil, true},
|
||||
{"empty-bucket", "", "", "", 1, ListObjectsInfo{}, nil, true},
|
||||
// Setting maxKeys to a very large value (19).
|
||||
{"empty-bucket", "", "", "", 1111000000000000, ListObjectsResult{}, nil, true},
|
||||
{"empty-bucket", "", "", "", 1111000000000000, ListObjectsInfo{}, nil, true},
|
||||
// Testing for all 7 objects in the bucket (20).
|
||||
{"test-bucket-list-object", "", "", "", 9, resultCases[0], nil, true},
|
||||
//Testing for negative value of maxKey, this should set maxKeys to listObjectsLimit (21).
|
||||
@ -493,7 +493,7 @@ func TestListObjects(t *testing.T) {
|
||||
{"test-bucket-list-object", "", "man", "", 10, resultCases[13], nil, true},
|
||||
// Marker being set to a value which is greater than and all object names when sorted (38).
|
||||
// Expected to send an empty response in this case.
|
||||
{"test-bucket-list-object", "", "zen", "", 10, ListObjectsResult{}, nil, true},
|
||||
{"test-bucket-list-object", "", "zen", "", 10, ListObjectsInfo{}, nil, true},
|
||||
// Marker being set to a value which is lesser than and all object names when sorted (39).
|
||||
// Expected to send all the objects in the bucket in this case.
|
||||
{"test-bucket-list-object", "", "Abc", "", 10, resultCases[14], nil, true},
|
||||
@ -511,13 +511,13 @@ func TestListObjects(t *testing.T) {
|
||||
{"test-bucket-list-object", "new", "newPrefix0", "", 2, resultCases[22], nil, true},
|
||||
// Testing with maxKeys set to 0 (48-54).
|
||||
// The parameters have to valid.
|
||||
{"test-bucket-list-object", "", "obj1", "", 0, ListObjectsResult{}, nil, true},
|
||||
{"test-bucket-list-object", "", "obj0", "", 0, ListObjectsResult{}, nil, true},
|
||||
{"test-bucket-list-object", "new", "", "", 0, ListObjectsResult{}, nil, true},
|
||||
{"test-bucket-list-object", "obj", "", "", 0, ListObjectsResult{}, nil, true},
|
||||
{"test-bucket-list-object", "obj", "obj0", "", 0, ListObjectsResult{}, nil, true},
|
||||
{"test-bucket-list-object", "obj", "obj1", "", 0, ListObjectsResult{}, nil, true},
|
||||
{"test-bucket-list-object", "new", "newPrefix0", "", 0, ListObjectsResult{}, nil, true},
|
||||
{"test-bucket-list-object", "", "obj1", "", 0, ListObjectsInfo{}, nil, true},
|
||||
{"test-bucket-list-object", "", "obj0", "", 0, ListObjectsInfo{}, nil, true},
|
||||
{"test-bucket-list-object", "new", "", "", 0, ListObjectsInfo{}, nil, true},
|
||||
{"test-bucket-list-object", "obj", "", "", 0, ListObjectsInfo{}, nil, true},
|
||||
{"test-bucket-list-object", "obj", "obj0", "", 0, ListObjectsInfo{}, nil, true},
|
||||
{"test-bucket-list-object", "obj", "obj1", "", 0, ListObjectsInfo{}, nil, true},
|
||||
{"test-bucket-list-object", "new", "newPrefix0", "", 0, ListObjectsInfo{}, nil, true},
|
||||
// Tests on hierarchical key names as prefix.
|
||||
// Without delimteter the code should recurse into the prefix Dir.
|
||||
// Tests with prefix, but without delimiter (55-56).
|
||||
|
17
fs-bucket.go
17
fs-bucket.go
@ -21,7 +21,6 @@ import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/minio/minio/pkg/disk"
|
||||
"github.com/minio/minio/pkg/probe"
|
||||
@ -56,19 +55,13 @@ func (fs Filesystem) DeleteBucket(bucket string) *probe.Error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// BucketInfo - name and create date
|
||||
type BucketInfo struct {
|
||||
Name string
|
||||
Created time.Time
|
||||
}
|
||||
|
||||
// ListBuckets - Get service.
|
||||
func (fs Filesystem) ListBuckets() ([]BucketInfo, *probe.Error) {
|
||||
files, e := ioutil.ReadDir(fs.path)
|
||||
if e != nil {
|
||||
return []BucketInfo{}, probe.NewError(e)
|
||||
}
|
||||
var metadataList []BucketInfo
|
||||
var buckets []BucketInfo
|
||||
for _, file := range files {
|
||||
if !file.IsDir() {
|
||||
// If not directory, ignore all file types.
|
||||
@ -79,15 +72,15 @@ func (fs Filesystem) ListBuckets() ([]BucketInfo, *probe.Error) {
|
||||
if !IsValidBucketName(dirName) {
|
||||
continue
|
||||
}
|
||||
metadata := BucketInfo{
|
||||
bucket := BucketInfo{
|
||||
Name: dirName,
|
||||
Created: file.ModTime(),
|
||||
}
|
||||
metadataList = append(metadataList, metadata)
|
||||
buckets = append(buckets, bucket)
|
||||
}
|
||||
// Remove duplicated entries.
|
||||
metadataList = removeDuplicateBuckets(metadataList)
|
||||
return metadataList, nil
|
||||
buckets = removeDuplicateBuckets(buckets)
|
||||
return buckets, nil
|
||||
}
|
||||
|
||||
// removeDuplicateBuckets - remove duplicate buckets.
|
||||
|
28
fs-dir.go
28
fs-dir.go
@ -79,18 +79,6 @@ func (f byName) Less(i, j int) bool {
|
||||
return n1 < n2
|
||||
}
|
||||
|
||||
// ObjectInfo - object info.
|
||||
type ObjectInfo struct {
|
||||
Bucket string
|
||||
Name string
|
||||
ModifiedTime time.Time
|
||||
ContentType string
|
||||
MD5Sum string
|
||||
Size int64
|
||||
IsDir bool
|
||||
Err error
|
||||
}
|
||||
|
||||
// Using sort.Search() internally to jump to the file entry containing the prefix.
|
||||
func searchFileInfos(fileInfos []os.FileInfo, x string) int {
|
||||
processFunc := func(i int) bool {
|
||||
@ -140,7 +128,7 @@ func readDir(scanDir, namePrefix, queryPrefix string, isFirst bool) (objInfos []
|
||||
if queryPrefix != "" && isFirst {
|
||||
// If control is here then there is a queryPrefix, and there are objects which satisfies the prefix.
|
||||
// Since the result is sorted, the object names which satisfies query prefix would be stored one after the other.
|
||||
// Push the objectInfo only if its contains the prefix.
|
||||
// Push the ObjectInfo only if its contains the prefix.
|
||||
// This ensures that the channel containing object Info would only has objects with the given queryPrefix.
|
||||
if !strings.HasPrefix(name, queryPrefix) {
|
||||
return
|
||||
@ -194,8 +182,8 @@ func readDir(scanDir, namePrefix, queryPrefix string, isFirst bool) (objInfos []
|
||||
return
|
||||
}
|
||||
|
||||
// ObjectInfoChannel - object info channel.
|
||||
type ObjectInfoChannel struct {
|
||||
// objectInfoChannel - object info channel.
|
||||
type objectInfoChannel struct {
|
||||
ch <-chan ObjectInfo
|
||||
objInfo *ObjectInfo
|
||||
closed bool
|
||||
@ -203,7 +191,7 @@ type ObjectInfoChannel struct {
|
||||
timedOut bool
|
||||
}
|
||||
|
||||
func (oic *ObjectInfoChannel) Read() (ObjectInfo, bool) {
|
||||
func (oic *objectInfoChannel) Read() (ObjectInfo, bool) {
|
||||
if oic.closed {
|
||||
return ObjectInfo{}, false
|
||||
}
|
||||
@ -233,7 +221,7 @@ func (oic *ObjectInfoChannel) Read() (ObjectInfo, bool) {
|
||||
}
|
||||
|
||||
// IsClosed - return whether channel is closed or not.
|
||||
func (oic ObjectInfoChannel) IsClosed() bool {
|
||||
func (oic objectInfoChannel) IsClosed() bool {
|
||||
if oic.objInfo != nil {
|
||||
return false
|
||||
}
|
||||
@ -242,7 +230,7 @@ func (oic ObjectInfoChannel) IsClosed() bool {
|
||||
}
|
||||
|
||||
// IsTimedOut - return whether channel is closed due to timeout.
|
||||
func (oic ObjectInfoChannel) IsTimedOut() bool {
|
||||
func (oic objectInfoChannel) IsTimedOut() bool {
|
||||
if oic.timedOut {
|
||||
return true
|
||||
}
|
||||
@ -261,7 +249,7 @@ func (oic ObjectInfoChannel) IsTimedOut() bool {
|
||||
|
||||
// treeWalk - walk into 'scanDir' recursively when 'recursive' is true.
|
||||
// It uses 'bucketDir' to get name prefix for object name.
|
||||
func treeWalk(scanDir, bucketDir string, recursive bool, queryPrefix string) ObjectInfoChannel {
|
||||
func treeWalk(scanDir, bucketDir string, recursive bool, queryPrefix string) objectInfoChannel {
|
||||
objectInfoCh := make(chan ObjectInfo, listObjectsLimit)
|
||||
timeoutCh := make(chan struct{}, 1)
|
||||
|
||||
@ -314,5 +302,5 @@ func treeWalk(scanDir, bucketDir string, recursive bool, queryPrefix string) Obj
|
||||
}
|
||||
}()
|
||||
|
||||
return ObjectInfoChannel{ch: objectInfoCh, timeoutCh: timeoutCh}
|
||||
return objectInfoChannel{ch: objectInfoCh, timeoutCh: timeoutCh}
|
||||
}
|
||||
|
137
fs-multipart.go
137
fs-multipart.go
@ -50,55 +50,57 @@ func (fs Filesystem) isValidUploadID(object, uploadID string) (ok bool) {
|
||||
}
|
||||
|
||||
// byObjectInfoKey is a sortable interface for UploadMetadata slice
|
||||
type byUploadMetadataKey []*UploadMetadata
|
||||
type byUploadMetadataKey []uploadMetadata
|
||||
|
||||
func (b byUploadMetadataKey) Len() int { return len(b) }
|
||||
func (b byUploadMetadataKey) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
|
||||
func (b byUploadMetadataKey) Less(i, j int) bool { return b[i].Object < b[j].Object }
|
||||
|
||||
// ListMultipartUploads - list incomplete multipart sessions for a given BucketMultipartResourcesMetadata
|
||||
func (fs Filesystem) ListMultipartUploads(bucket string, resources BucketMultipartResourcesMetadata) (BucketMultipartResourcesMetadata, *probe.Error) {
|
||||
func (fs Filesystem) ListMultipartUploads(bucket, objectPrefix, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (ListMultipartsInfo, *probe.Error) {
|
||||
// Input validation.
|
||||
if !IsValidBucketName(bucket) {
|
||||
return BucketMultipartResourcesMetadata{}, probe.NewError(BucketNameInvalid{Bucket: bucket})
|
||||
return ListMultipartsInfo{}, probe.NewError(BucketNameInvalid{Bucket: bucket})
|
||||
}
|
||||
bucket = getActualBucketname(fs.path, bucket)
|
||||
bucketPath := filepath.Join(fs.path, bucket)
|
||||
if _, e := os.Stat(bucketPath); e != nil {
|
||||
// Check bucket exists.
|
||||
if os.IsNotExist(e) {
|
||||
return BucketMultipartResourcesMetadata{}, probe.NewError(BucketNotFound{Bucket: bucket})
|
||||
return ListMultipartsInfo{}, probe.NewError(BucketNotFound{Bucket: bucket})
|
||||
}
|
||||
return BucketMultipartResourcesMetadata{}, probe.NewError(e)
|
||||
return ListMultipartsInfo{}, probe.NewError(e)
|
||||
}
|
||||
var uploads []*UploadMetadata
|
||||
var uploads []uploadMetadata
|
||||
multipartsInfo := ListMultipartsInfo{}
|
||||
|
||||
fs.rwLock.RLock()
|
||||
defer fs.rwLock.RUnlock()
|
||||
for uploadID, session := range fs.multiparts.ActiveSession {
|
||||
objectName := session.ObjectName
|
||||
if strings.HasPrefix(objectName, resources.Prefix) {
|
||||
if len(uploads) > resources.MaxUploads {
|
||||
if strings.HasPrefix(objectName, objectPrefix) {
|
||||
if len(uploads) > maxUploads {
|
||||
sort.Sort(byUploadMetadataKey(uploads))
|
||||
resources.Upload = uploads
|
||||
resources.NextKeyMarker = session.ObjectName
|
||||
resources.NextUploadIDMarker = uploadID
|
||||
resources.IsTruncated = true
|
||||
return resources, nil
|
||||
multipartsInfo.Uploads = uploads
|
||||
multipartsInfo.NextKeyMarker = session.ObjectName
|
||||
multipartsInfo.NextUploadIDMarker = uploadID
|
||||
multipartsInfo.IsTruncated = true
|
||||
return multipartsInfo, nil
|
||||
}
|
||||
// UploadIDMarker is ignored if KeyMarker is empty.
|
||||
// uploadIDMarker is ignored if KeyMarker is empty.
|
||||
switch {
|
||||
case resources.KeyMarker != "" && resources.UploadIDMarker == "":
|
||||
if objectName > resources.KeyMarker {
|
||||
upload := new(UploadMetadata)
|
||||
case keyMarker != "" && uploadIDMarker == "":
|
||||
if objectName > keyMarker {
|
||||
upload := uploadMetadata{}
|
||||
upload.Object = objectName
|
||||
upload.UploadID = uploadID
|
||||
upload.Initiated = session.Initiated
|
||||
uploads = append(uploads, upload)
|
||||
}
|
||||
case resources.KeyMarker != "" && resources.UploadIDMarker != "":
|
||||
if session.UploadID > resources.UploadIDMarker {
|
||||
if objectName >= resources.KeyMarker {
|
||||
upload := new(UploadMetadata)
|
||||
case keyMarker != "" && uploadIDMarker != "":
|
||||
if session.UploadID > uploadIDMarker {
|
||||
if objectName >= keyMarker {
|
||||
upload := uploadMetadata{}
|
||||
upload.Object = objectName
|
||||
upload.UploadID = uploadID
|
||||
upload.Initiated = session.Initiated
|
||||
@ -106,7 +108,7 @@ func (fs Filesystem) ListMultipartUploads(bucket string, resources BucketMultipa
|
||||
}
|
||||
}
|
||||
default:
|
||||
upload := new(UploadMetadata)
|
||||
upload := uploadMetadata{}
|
||||
upload.Object = objectName
|
||||
upload.UploadID = uploadID
|
||||
upload.Initiated = session.Initiated
|
||||
@ -115,13 +117,13 @@ func (fs Filesystem) ListMultipartUploads(bucket string, resources BucketMultipa
|
||||
}
|
||||
}
|
||||
sort.Sort(byUploadMetadataKey(uploads))
|
||||
resources.Upload = uploads
|
||||
return resources, nil
|
||||
multipartsInfo.Uploads = uploads
|
||||
return multipartsInfo, nil
|
||||
}
|
||||
|
||||
// verify if parts sent over the network do really match with what we
|
||||
// have for the session.
|
||||
func doPartsMatch(parts []CompletePart, savedParts []PartMetadata) bool {
|
||||
func doPartsMatch(parts []completePart, savedParts []partInfo) bool {
|
||||
if parts == nil || savedParts == nil {
|
||||
return false
|
||||
}
|
||||
@ -175,7 +177,7 @@ func MultiCloser(closers ...io.Closer) io.Closer {
|
||||
}
|
||||
|
||||
// removeParts - remove all parts.
|
||||
func removeParts(partPathPrefix string, parts []PartMetadata) *probe.Error {
|
||||
func removeParts(partPathPrefix string, parts []partInfo) *probe.Error {
|
||||
for _, part := range parts {
|
||||
// We are on purpose ignoring the return values here, since
|
||||
// another thread would have purged these entries.
|
||||
@ -185,7 +187,7 @@ func removeParts(partPathPrefix string, parts []PartMetadata) *probe.Error {
|
||||
}
|
||||
|
||||
// saveParts - concantenate and save all parts.
|
||||
func saveParts(partPathPrefix string, mw io.Writer, parts []CompletePart) *probe.Error {
|
||||
func saveParts(partPathPrefix string, mw io.Writer, parts []completePart) *probe.Error {
|
||||
var partReaders []io.Reader
|
||||
var partClosers []io.Closer
|
||||
for _, part := range parts {
|
||||
@ -274,13 +276,13 @@ func (fs Filesystem) NewMultipartUpload(bucket, object string) (string, *probe.E
|
||||
fs.rwLock.Lock()
|
||||
defer fs.rwLock.Unlock()
|
||||
// Initialize multipart session.
|
||||
mpartSession := &MultipartSession{}
|
||||
mpartSession := &multipartSession{}
|
||||
mpartSession.TotalParts = 0
|
||||
mpartSession.ObjectName = object
|
||||
mpartSession.UploadID = uploadID
|
||||
mpartSession.Initiated = time.Now().UTC()
|
||||
// Multipart has maximum of 10000 parts.
|
||||
var parts []PartMetadata
|
||||
var parts []partInfo
|
||||
mpartSession.Parts = parts
|
||||
|
||||
fs.multiparts.ActiveSession[uploadID] = mpartSession
|
||||
@ -291,7 +293,7 @@ func (fs Filesystem) NewMultipartUpload(bucket, object string) (string, *probe.E
|
||||
}
|
||||
|
||||
// Remove all duplicated parts based on the latest time of their upload.
|
||||
func removeDuplicateParts(parts []PartMetadata) []PartMetadata {
|
||||
func removeDuplicateParts(parts []partInfo) []partInfo {
|
||||
length := len(parts) - 1
|
||||
for i := 0; i < length; i++ {
|
||||
for j := i + 1; j <= length; j++ {
|
||||
@ -311,7 +313,7 @@ func removeDuplicateParts(parts []PartMetadata) []PartMetadata {
|
||||
}
|
||||
|
||||
// partNumber is a sortable interface for Part slice.
|
||||
type partNumber []PartMetadata
|
||||
type partNumber []partInfo
|
||||
|
||||
func (a partNumber) Len() int { return len(a) }
|
||||
func (a partNumber) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||
@ -324,18 +326,12 @@ func (fs Filesystem) PutObjectPart(bucket, object, uploadID string, partID int,
|
||||
return "", probe.NewError(err)
|
||||
}
|
||||
|
||||
// Remove 5% from total space for cumulative disk space used for
|
||||
// journalling, inodes etc.
|
||||
// Remove 5% from total space for cumulative disk space used for journalling, inodes etc.
|
||||
availableDiskSpace := (float64(di.Free) / (float64(di.Total) - (0.05 * float64(di.Total)))) * 100
|
||||
if int64(availableDiskSpace) <= fs.minFreeDisk {
|
||||
return "", probe.NewError(RootPathFull{Path: fs.path})
|
||||
}
|
||||
|
||||
// Part id cannot be negative.
|
||||
if partID <= 0 {
|
||||
return "", probe.NewError(errors.New("invalid part id, cannot be zero or less than zero"))
|
||||
}
|
||||
|
||||
// Check bucket name valid.
|
||||
if !IsValidBucketName(bucket) {
|
||||
return "", probe.NewError(BucketNameInvalid{Bucket: bucket})
|
||||
@ -346,6 +342,11 @@ func (fs Filesystem) PutObjectPart(bucket, object, uploadID string, partID int,
|
||||
return "", probe.NewError(ObjectNameInvalid{Bucket: bucket, Object: object})
|
||||
}
|
||||
|
||||
// Part id cannot be negative.
|
||||
if partID <= 0 {
|
||||
return "", probe.NewError(errors.New("invalid part id, cannot be zero or less than zero"))
|
||||
}
|
||||
|
||||
// Verify upload is valid for the incoming object.
|
||||
if !fs.isValidUploadID(object, uploadID) {
|
||||
return "", probe.NewError(InvalidUploadID{UploadID: uploadID})
|
||||
@ -394,11 +395,11 @@ func (fs Filesystem) PutObjectPart(bucket, object, uploadID string, partID int,
|
||||
if e != nil {
|
||||
return "", probe.NewError(e)
|
||||
}
|
||||
partMetadata := PartMetadata{}
|
||||
partMetadata.PartNumber = partID
|
||||
partMetadata.ETag = newMD5Hex
|
||||
partMetadata.Size = fi.Size()
|
||||
partMetadata.LastModified = fi.ModTime()
|
||||
prtInfo := partInfo{}
|
||||
prtInfo.PartNumber = partID
|
||||
prtInfo.ETag = newMD5Hex
|
||||
prtInfo.Size = fi.Size()
|
||||
prtInfo.LastModified = fi.ModTime()
|
||||
|
||||
// Critical region requiring read lock.
|
||||
fs.rwLock.RLock()
|
||||
@ -409,10 +410,11 @@ func (fs Filesystem) PutObjectPart(bucket, object, uploadID string, partID int,
|
||||
}
|
||||
|
||||
// Add all incoming parts.
|
||||
deserializedMultipartSession.Parts = append(deserializedMultipartSession.Parts, partMetadata)
|
||||
deserializedMultipartSession.Parts = append(deserializedMultipartSession.Parts, prtInfo)
|
||||
|
||||
// Remove duplicate parts based on the most recent uploaded.
|
||||
deserializedMultipartSession.Parts = removeDuplicateParts(deserializedMultipartSession.Parts)
|
||||
|
||||
// Save total parts uploaded.
|
||||
deserializedMultipartSession.TotalParts = len(deserializedMultipartSession.Parts)
|
||||
|
||||
@ -431,7 +433,7 @@ func (fs Filesystem) PutObjectPart(bucket, object, uploadID string, partID int,
|
||||
}
|
||||
|
||||
// CompleteMultipartUpload - complete a multipart upload and persist the data
|
||||
func (fs Filesystem) CompleteMultipartUpload(bucket string, object string, uploadID string, parts []CompletePart) (ObjectInfo, *probe.Error) {
|
||||
func (fs Filesystem) CompleteMultipartUpload(bucket string, object string, uploadID string, parts []completePart) (ObjectInfo, *probe.Error) {
|
||||
// Check bucket name is valid.
|
||||
if !IsValidBucketName(bucket) {
|
||||
return ObjectInfo{}, probe.NewError(BucketNameInvalid{Bucket: bucket})
|
||||
@ -531,35 +533,32 @@ func (fs Filesystem) CompleteMultipartUpload(bucket string, object string, uploa
|
||||
return newObject, nil
|
||||
}
|
||||
|
||||
// ListObjectParts - list parts from incomplete multipart session for a given ObjectResourcesMetadata
|
||||
func (fs Filesystem) ListObjectParts(bucket, object string, resources ObjectResourcesMetadata) (ObjectResourcesMetadata, *probe.Error) {
|
||||
// ListObjectParts - list parts from incomplete multipart session.
|
||||
func (fs Filesystem) ListObjectParts(bucket, object, uploadID string, partNumberMarker, maxParts int) (ListPartsInfo, *probe.Error) {
|
||||
// Check bucket name is valid.
|
||||
if !IsValidBucketName(bucket) {
|
||||
return ObjectResourcesMetadata{}, probe.NewError(BucketNameInvalid{Bucket: bucket})
|
||||
return ListPartsInfo{}, probe.NewError(BucketNameInvalid{Bucket: bucket})
|
||||
}
|
||||
|
||||
// Verify object path legal.
|
||||
if !IsValidObjectName(object) {
|
||||
return ObjectResourcesMetadata{}, probe.NewError(ObjectNameInvalid{Bucket: bucket, Object: object})
|
||||
return ListPartsInfo{}, probe.NewError(ObjectNameInvalid{Bucket: bucket, Object: object})
|
||||
}
|
||||
|
||||
// Save upload id.
|
||||
uploadID := resources.UploadID
|
||||
|
||||
// Verify if upload id is valid for incoming object.
|
||||
if !fs.isValidUploadID(object, uploadID) {
|
||||
return ObjectResourcesMetadata{}, probe.NewError(InvalidUploadID{UploadID: uploadID})
|
||||
return ListPartsInfo{}, probe.NewError(InvalidUploadID{UploadID: uploadID})
|
||||
}
|
||||
|
||||
objectResourcesMetadata := resources
|
||||
objectResourcesMetadata.Bucket = bucket
|
||||
objectResourcesMetadata.Object = object
|
||||
prtsInfo := ListPartsInfo{}
|
||||
prtsInfo.Bucket = bucket
|
||||
prtsInfo.Object = object
|
||||
var startPartNumber int
|
||||
switch {
|
||||
case objectResourcesMetadata.PartNumberMarker == 0:
|
||||
case partNumberMarker == 0:
|
||||
startPartNumber = 1
|
||||
default:
|
||||
startPartNumber = objectResourcesMetadata.PartNumberMarker
|
||||
startPartNumber = partNumberMarker
|
||||
}
|
||||
|
||||
bucket = getActualBucketname(fs.path, bucket)
|
||||
@ -567,9 +566,9 @@ func (fs Filesystem) ListObjectParts(bucket, object string, resources ObjectReso
|
||||
if _, e := os.Stat(bucketPath); e != nil {
|
||||
// Check bucket exists.
|
||||
if os.IsNotExist(e) {
|
||||
return ObjectResourcesMetadata{}, probe.NewError(BucketNotFound{Bucket: bucket})
|
||||
return ListPartsInfo{}, probe.NewError(BucketNotFound{Bucket: bucket})
|
||||
}
|
||||
return ObjectResourcesMetadata{}, probe.NewError(e)
|
||||
return ListPartsInfo{}, probe.NewError(e)
|
||||
}
|
||||
|
||||
// Critical region requiring read lock.
|
||||
@ -577,22 +576,22 @@ func (fs Filesystem) ListObjectParts(bucket, object string, resources ObjectReso
|
||||
deserializedMultipartSession, ok := fs.multiparts.ActiveSession[uploadID]
|
||||
fs.rwLock.RUnlock()
|
||||
if !ok {
|
||||
return ObjectResourcesMetadata{}, probe.NewError(InvalidUploadID{UploadID: resources.UploadID})
|
||||
return ListPartsInfo{}, probe.NewError(InvalidUploadID{UploadID: uploadID})
|
||||
}
|
||||
var parts []PartMetadata
|
||||
var parts []partInfo
|
||||
for i := startPartNumber; i <= deserializedMultipartSession.TotalParts; i++ {
|
||||
if len(parts) > objectResourcesMetadata.MaxParts {
|
||||
if len(parts) > maxParts {
|
||||
sort.Sort(partNumber(parts))
|
||||
objectResourcesMetadata.IsTruncated = true
|
||||
objectResourcesMetadata.Part = parts
|
||||
objectResourcesMetadata.NextPartNumberMarker = i
|
||||
return objectResourcesMetadata, nil
|
||||
prtsInfo.IsTruncated = true
|
||||
prtsInfo.Parts = parts
|
||||
prtsInfo.NextPartNumberMarker = i
|
||||
return prtsInfo, nil
|
||||
}
|
||||
parts = append(parts, deserializedMultipartSession.Parts[i-1])
|
||||
}
|
||||
sort.Sort(partNumber(parts))
|
||||
objectResourcesMetadata.Part = parts
|
||||
return objectResourcesMetadata, nil
|
||||
prtsInfo.Parts = parts
|
||||
return prtsInfo, nil
|
||||
}
|
||||
|
||||
// AbortMultipartUpload - abort an incomplete multipart session
|
||||
|
@ -58,7 +58,6 @@ func (fs Filesystem) GetObject(bucket, object string, startOffset int64) (io.Rea
|
||||
if os.IsNotExist(e) {
|
||||
return nil, probe.NewError(BucketNotFound{Bucket: bucket})
|
||||
}
|
||||
|
||||
return nil, probe.NewError(ObjectNotFound{Bucket: bucket, Object: object})
|
||||
}
|
||||
return nil, probe.NewError(e)
|
||||
@ -73,7 +72,7 @@ func (fs Filesystem) GetObject(bucket, object string, startOffset int64) (io.Rea
|
||||
return nil, probe.NewError(ObjectNotFound{Bucket: bucket, Object: object})
|
||||
}
|
||||
|
||||
// Seet to a starting offset.
|
||||
// Seek to a starting offset.
|
||||
_, e = file.Seek(startOffset, os.SEEK_SET)
|
||||
if e != nil {
|
||||
// When the "handle is invalid", the file might be a directory on Windows.
|
||||
@ -82,8 +81,6 @@ func (fs Filesystem) GetObject(bucket, object string, startOffset int64) (io.Rea
|
||||
}
|
||||
return nil, probe.NewError(e)
|
||||
}
|
||||
|
||||
// Return successfully seeked file handler.
|
||||
return file, nil
|
||||
}
|
||||
|
||||
|
@ -238,19 +238,17 @@ func BenchmarkGetObject(b *testing.B) {
|
||||
}
|
||||
|
||||
b.ResetTimer()
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
var w bytes.Buffer
|
||||
var buffer = new(bytes.Buffer)
|
||||
r, err := fs.GetObject("bucket", "object"+strconv.Itoa(i%10), 0)
|
||||
if err != nil {
|
||||
b.Error(err)
|
||||
}
|
||||
n, e := io.Copy(&w, r)
|
||||
if e != nil {
|
||||
if _, e := io.Copy(buffer, r); e != nil {
|
||||
b.Error(e)
|
||||
}
|
||||
if n != int64(len(text)) {
|
||||
b.Errorf("GetObject returned incorrect length %d (should be %d)\n", n, int64(len(text)))
|
||||
if buffer.Len() != len(text) {
|
||||
b.Errorf("GetObject returned incorrect length %d (should be %d)\n", buffer.Len(), len(text))
|
||||
}
|
||||
r.Close()
|
||||
}
|
||||
|
62
fs.go
62
fs.go
@ -25,8 +25,8 @@ import (
|
||||
"github.com/minio/minio/pkg/probe"
|
||||
)
|
||||
|
||||
// ListObjectParams - list object params used for list object map
|
||||
type ListObjectParams struct {
|
||||
// listObjectParams - list object params used for list object map
|
||||
type listObjectParams struct {
|
||||
bucket string
|
||||
delimiter string
|
||||
marker string
|
||||
@ -38,16 +38,31 @@ type Filesystem struct {
|
||||
path string
|
||||
minFreeDisk int64
|
||||
rwLock *sync.RWMutex
|
||||
multiparts *Multiparts
|
||||
listObjectMap map[ListObjectParams][]ObjectInfoChannel
|
||||
multiparts *multiparts
|
||||
listObjectMap map[listObjectParams][]objectInfoChannel
|
||||
listObjectMapMutex *sync.Mutex
|
||||
}
|
||||
|
||||
func (fs *Filesystem) pushListObjectCh(params ListObjectParams, ch ObjectInfoChannel) {
|
||||
// MultipartSession holds active session information
|
||||
type multipartSession struct {
|
||||
TotalParts int
|
||||
ObjectName string
|
||||
UploadID string
|
||||
Initiated time.Time
|
||||
Parts []partInfo
|
||||
}
|
||||
|
||||
// multiparts collection of many parts
|
||||
type multiparts struct {
|
||||
Version string `json:"version"`
|
||||
ActiveSession map[string]*multipartSession `json:"activeSessions"`
|
||||
}
|
||||
|
||||
func (fs *Filesystem) pushListObjectCh(params listObjectParams, ch objectInfoChannel) {
|
||||
fs.listObjectMapMutex.Lock()
|
||||
defer fs.listObjectMapMutex.Unlock()
|
||||
|
||||
channels := []ObjectInfoChannel{ch}
|
||||
channels := []objectInfoChannel{ch}
|
||||
if _, ok := fs.listObjectMap[params]; ok {
|
||||
channels = append(fs.listObjectMap[params], ch)
|
||||
}
|
||||
@ -55,7 +70,7 @@ func (fs *Filesystem) pushListObjectCh(params ListObjectParams, ch ObjectInfoCha
|
||||
fs.listObjectMap[params] = channels
|
||||
}
|
||||
|
||||
func (fs *Filesystem) popListObjectCh(params ListObjectParams) *ObjectInfoChannel {
|
||||
func (fs *Filesystem) popListObjectCh(params listObjectParams) *objectInfoChannel {
|
||||
fs.listObjectMapMutex.Lock()
|
||||
defer fs.listObjectMapMutex.Unlock()
|
||||
|
||||
@ -80,40 +95,25 @@ func (fs *Filesystem) popListObjectCh(params ListObjectParams) *ObjectInfoChanne
|
||||
return nil
|
||||
}
|
||||
|
||||
// MultipartSession holds active session information
|
||||
type MultipartSession struct {
|
||||
TotalParts int
|
||||
ObjectName string
|
||||
UploadID string
|
||||
Initiated time.Time
|
||||
Parts []PartMetadata
|
||||
}
|
||||
|
||||
// Multiparts collection of many parts
|
||||
type Multiparts struct {
|
||||
Version string `json:"version"`
|
||||
ActiveSession map[string]*MultipartSession `json:"activeSessions"`
|
||||
}
|
||||
|
||||
// newFS instantiate a new filesystem.
|
||||
func newFS(rootPath string) (ObjectAPI, *probe.Error) {
|
||||
setFSMultipartsMetadataPath(filepath.Join(rootPath, "$multiparts-session.json"))
|
||||
|
||||
var err *probe.Error
|
||||
// load multiparts session from disk
|
||||
var multiparts *Multiparts
|
||||
multiparts, err = loadMultipartsSession()
|
||||
var mparts *multiparts
|
||||
mparts, err = loadMultipartsSession()
|
||||
if err != nil {
|
||||
if os.IsNotExist(err.ToGoError()) {
|
||||
multiparts = &Multiparts{
|
||||
mparts = &multiparts{
|
||||
Version: "1",
|
||||
ActiveSession: make(map[string]*MultipartSession),
|
||||
ActiveSession: make(map[string]*multipartSession),
|
||||
}
|
||||
if err = saveMultipartsSession(*multiparts); err != nil {
|
||||
return Filesystem{}, err.Trace()
|
||||
if err = saveMultipartsSession(*mparts); err != nil {
|
||||
return nil, err.Trace()
|
||||
}
|
||||
} else {
|
||||
return Filesystem{}, err.Trace()
|
||||
return nil, err.Trace()
|
||||
}
|
||||
}
|
||||
|
||||
@ -121,14 +121,14 @@ func newFS(rootPath string) (ObjectAPI, *probe.Error) {
|
||||
rwLock: &sync.RWMutex{},
|
||||
}
|
||||
fs.path = rootPath
|
||||
fs.multiparts = multiparts
|
||||
fs.multiparts = mparts
|
||||
|
||||
/// Defaults
|
||||
|
||||
// Minium free disk required for i/o operations to succeed.
|
||||
fs.minFreeDisk = 5
|
||||
|
||||
fs.listObjectMap = make(map[ListObjectParams][]ObjectInfoChannel)
|
||||
fs.listObjectMap = make(map[listObjectParams][]objectInfoChannel)
|
||||
fs.listObjectMapMutex = &sync.Mutex{}
|
||||
|
||||
// Return here.
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Minimalist Object Storage, (C) 2015, 2016 Minio, Inc.
|
||||
* Minio Cloud Storage, (C) 2015, 2016 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@ -59,8 +59,7 @@ func testMultipartObjectCreation(c *check.C, create func() ObjectAPI) {
|
||||
uploadID, err := fs.NewMultipartUpload("bucket", "key")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
completedParts := CompleteMultipartUpload{}
|
||||
//completedParts.Part = make([]CompletePart, 10)
|
||||
completedParts := completeMultipartUpload{}
|
||||
for i := 1; i <= 10; i++ {
|
||||
randomPerm := rand.Perm(10)
|
||||
randomString := ""
|
||||
@ -76,11 +75,11 @@ func testMultipartObjectCreation(c *check.C, create func() ObjectAPI) {
|
||||
calculatedMD5sum, err = fs.PutObjectPart("bucket", "key", uploadID, i, int64(len(randomString)), bytes.NewBufferString(randomString), expectedMD5Sumhex)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(calculatedMD5sum, check.Equals, expectedMD5Sumhex)
|
||||
completedParts.Parts = append(completedParts.Parts, CompletePart{PartNumber: i, ETag: calculatedMD5sum})
|
||||
completedParts.Parts = append(completedParts.Parts, completePart{PartNumber: i, ETag: calculatedMD5sum})
|
||||
}
|
||||
objectInfo, err := fs.CompleteMultipartUpload("bucket", "key", uploadID, completedParts.Parts)
|
||||
objInfo, err := fs.CompleteMultipartUpload("bucket", "key", uploadID, completedParts.Parts)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(objectInfo.MD5Sum, check.Equals, "9b7d6f13ba00e24d0b02de92e814891b-10")
|
||||
c.Assert(objInfo.MD5Sum, check.Equals, "9b7d6f13ba00e24d0b02de92e814891b-10")
|
||||
}
|
||||
|
||||
func testMultipartObjectAbort(c *check.C, create func() ObjectAPI) {
|
||||
@ -91,6 +90,7 @@ func testMultipartObjectAbort(c *check.C, create func() ObjectAPI) {
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
parts := make(map[int]string)
|
||||
metadata := make(map[string]string)
|
||||
for i := 1; i <= 10; i++ {
|
||||
randomPerm := rand.Perm(10)
|
||||
randomString := ""
|
||||
@ -102,6 +102,7 @@ func testMultipartObjectAbort(c *check.C, create func() ObjectAPI) {
|
||||
hasher.Write([]byte(randomString))
|
||||
expectedMD5Sumhex := hex.EncodeToString(hasher.Sum(nil))
|
||||
|
||||
metadata["md5"] = expectedMD5Sumhex
|
||||
var calculatedMD5sum string
|
||||
calculatedMD5sum, err = fs.PutObjectPart("bucket", "key", uploadID, i, int64(len(randomString)), bytes.NewBufferString(randomString), expectedMD5Sumhex)
|
||||
c.Assert(err, check.IsNil)
|
||||
@ -130,24 +131,25 @@ func testMultipleObjectCreation(c *check.C, create func() ObjectAPI) {
|
||||
|
||||
key := "obj" + strconv.Itoa(i)
|
||||
objects[key] = []byte(randomString)
|
||||
var objectInfo ObjectInfo
|
||||
metadata := make(map[string]string)
|
||||
metadata["md5Sum"] = expectedMD5Sumhex
|
||||
objectInfo, err = fs.PutObject("bucket", key, int64(len(randomString)), bytes.NewBufferString(randomString), metadata)
|
||||
objInfo, err := fs.PutObject("bucket", key, int64(len(randomString)), bytes.NewBufferString(randomString), metadata)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(objectInfo.MD5Sum, check.Equals, expectedMD5Sumhex)
|
||||
c.Assert(objInfo.MD5Sum, check.Equals, expectedMD5Sumhex)
|
||||
}
|
||||
|
||||
for key, value := range objects {
|
||||
var byteBuffer bytes.Buffer
|
||||
r, err := fs.GetObject("bucket", key, 0)
|
||||
c.Assert(err, check.IsNil)
|
||||
io.Copy(&byteBuffer, r)
|
||||
_, e := io.Copy(&byteBuffer, r)
|
||||
c.Assert(e, check.IsNil)
|
||||
c.Assert(byteBuffer.Bytes(), check.DeepEquals, value)
|
||||
c.Assert(r.Close(), check.IsNil)
|
||||
|
||||
metadata, err := fs.GetObjectInfo("bucket", key)
|
||||
objInfo, err := fs.GetObjectInfo("bucket", key)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(metadata.Size, check.Equals, int64(len(value)))
|
||||
c.Assert(objInfo.Size, check.Equals, int64(len(value)))
|
||||
r.Close()
|
||||
}
|
||||
}
|
||||
@ -259,7 +261,7 @@ func testObjectOverwriteWorks(c *check.C, create func() ObjectAPI) {
|
||||
|
||||
_, err = fs.PutObject("bucket", "object", int64(len("one")), bytes.NewBufferString("one"), nil)
|
||||
c.Assert(err, check.IsNil)
|
||||
// c.Assert(md5Sum1hex, check.Equals, objectInfo.MD5Sum)
|
||||
// c.Assert(md5Sum1hex, check.Equals, objInfo.MD5Sum)
|
||||
|
||||
_, err = fs.PutObject("bucket", "object", int64(len("three")), bytes.NewBufferString("three"), nil)
|
||||
c.Assert(err, check.IsNil)
|
||||
@ -267,9 +269,10 @@ func testObjectOverwriteWorks(c *check.C, create func() ObjectAPI) {
|
||||
var bytesBuffer bytes.Buffer
|
||||
r, err := fs.GetObject("bucket", "object", 0)
|
||||
c.Assert(err, check.IsNil)
|
||||
io.Copy(&bytesBuffer, r)
|
||||
_, e := io.Copy(&bytesBuffer, r)
|
||||
c.Assert(e, check.IsNil)
|
||||
c.Assert(string(bytesBuffer.Bytes()), check.Equals, "three")
|
||||
r.Close()
|
||||
c.Assert(r.Close(), check.IsNil)
|
||||
}
|
||||
|
||||
func testNonExistantBucketOperations(c *check.C, create func() ObjectAPI) {
|
||||
@ -297,9 +300,11 @@ func testPutObjectInSubdir(c *check.C, create func() ObjectAPI) {
|
||||
var bytesBuffer bytes.Buffer
|
||||
r, err := fs.GetObject("bucket", "dir1/dir2/object", 0)
|
||||
c.Assert(err, check.IsNil)
|
||||
io.Copy(&bytesBuffer, r)
|
||||
n, e := io.Copy(&bytesBuffer, r)
|
||||
c.Assert(e, check.IsNil)
|
||||
c.Assert(len(bytesBuffer.Bytes()), check.Equals, len("hello world"))
|
||||
r.Close()
|
||||
c.Assert(int64(len(bytesBuffer.Bytes())), check.Equals, int64(n))
|
||||
c.Assert(r.Close(), check.IsNil)
|
||||
}
|
||||
|
||||
func testListBuckets(c *check.C, create func() ObjectAPI) {
|
||||
@ -411,6 +416,7 @@ func testDefaultContentType(c *check.C, create func() ObjectAPI) {
|
||||
|
||||
// Test empty
|
||||
_, err = fs.PutObject("bucket", "one", int64(len("one")), bytes.NewBufferString("one"), nil)
|
||||
c.Assert(err, check.IsNil)
|
||||
objInfo, err := fs.GetObjectInfo("bucket", "one")
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(objInfo.ContentType, check.Equals, "application/octet-stream")
|
||||
|
@ -15,8 +15,8 @@ type ObjectAPI interface {
|
||||
GetBucketInfo(bucket string) (BucketInfo, *probe.Error)
|
||||
|
||||
// Bucket query API.
|
||||
ListObjects(bucket, prefix, marker, delimiter string, maxKeys int) (ListObjectsResult, *probe.Error)
|
||||
ListMultipartUploads(bucket string, resources BucketMultipartResourcesMetadata) (BucketMultipartResourcesMetadata, *probe.Error)
|
||||
ListObjects(bucket, prefix, marker, delimiter string, maxKeys int) (ListObjectsInfo, *probe.Error)
|
||||
ListMultipartUploads(bucket, objectPrefix, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (ListMultipartsInfo, *probe.Error)
|
||||
|
||||
// Object resource API.
|
||||
GetObject(bucket, object string, startOffset int64) (io.ReadCloser, *probe.Error)
|
||||
@ -27,7 +27,7 @@ type ObjectAPI interface {
|
||||
// Object query API.
|
||||
NewMultipartUpload(bucket, object string) (string, *probe.Error)
|
||||
PutObjectPart(bucket, object, uploadID string, partID int, size int64, data io.Reader, md5Hex string) (string, *probe.Error)
|
||||
ListObjectParts(bucket, object string, resources ObjectResourcesMetadata) (ObjectResourcesMetadata, *probe.Error)
|
||||
CompleteMultipartUpload(bucket string, object string, uploadID string, parts []CompletePart) (ObjectInfo, *probe.Error)
|
||||
ListObjectParts(bucket, object, uploadID string, partNumberMarker, maxParts int) (ListPartsInfo, *probe.Error)
|
||||
CompleteMultipartUpload(bucket string, object string, uploadID string, parts []completePart) (ObjectInfo, *probe.Error)
|
||||
AbortMultipartUpload(bucket, object, uploadID string) *probe.Error
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Minio Cloud Storage, (C) 2015, 2016 Minio, Inc.
|
||||
* Minio Cloud Storage, (C) 2016 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@ -18,16 +18,26 @@ package main
|
||||
|
||||
import "time"
|
||||
|
||||
// PartMetadata - various types of individual part resources
|
||||
type PartMetadata struct {
|
||||
PartNumber int
|
||||
LastModified time.Time
|
||||
ETag string
|
||||
Size int64
|
||||
// BucketInfo - bucket name and create date
|
||||
type BucketInfo struct {
|
||||
Name string
|
||||
Created time.Time
|
||||
}
|
||||
|
||||
// ObjectResourcesMetadata - various types of object resources
|
||||
type ObjectResourcesMetadata struct {
|
||||
// ObjectInfo - object info.
|
||||
type ObjectInfo struct {
|
||||
Bucket string
|
||||
Name string
|
||||
ModifiedTime time.Time
|
||||
ContentType string
|
||||
MD5Sum string
|
||||
Size int64
|
||||
IsDir bool
|
||||
Err error
|
||||
}
|
||||
|
||||
// ListPartsInfo - various types of object resources.
|
||||
type ListPartsInfo struct {
|
||||
Bucket string
|
||||
Object string
|
||||
UploadID string
|
||||
@ -37,20 +47,12 @@ type ObjectResourcesMetadata struct {
|
||||
MaxParts int
|
||||
IsTruncated bool
|
||||
|
||||
Part []PartMetadata
|
||||
Parts []partInfo
|
||||
EncodingType string
|
||||
}
|
||||
|
||||
// UploadMetadata container capturing metadata on in progress multipart upload in a given bucket
|
||||
type UploadMetadata struct {
|
||||
Object string
|
||||
UploadID string
|
||||
StorageClass string
|
||||
Initiated time.Time
|
||||
}
|
||||
|
||||
// BucketMultipartResourcesMetadata - various types of bucket resources for inprogress multipart uploads
|
||||
type BucketMultipartResourcesMetadata struct {
|
||||
// ListMultipartsInfo - various types of bucket resources for inprogress multipart uploads.
|
||||
type ListMultipartsInfo struct {
|
||||
KeyMarker string
|
||||
UploadIDMarker string
|
||||
NextKeyMarker string
|
||||
@ -58,34 +60,50 @@ type BucketMultipartResourcesMetadata struct {
|
||||
EncodingType string
|
||||
MaxUploads int
|
||||
IsTruncated bool
|
||||
Upload []*UploadMetadata
|
||||
Uploads []uploadMetadata
|
||||
Prefix string
|
||||
Delimiter string
|
||||
CommonPrefixes []string
|
||||
}
|
||||
|
||||
// ListObjectsResult - container for list object request results.
|
||||
type ListObjectsResult struct {
|
||||
// ListObjectsInfo - container for list objects.
|
||||
type ListObjectsInfo struct {
|
||||
IsTruncated bool
|
||||
NextMarker string
|
||||
Objects []ObjectInfo
|
||||
Prefixes []string
|
||||
}
|
||||
|
||||
// CompletePart - completed part container
|
||||
type CompletePart struct {
|
||||
// partInfo - various types of individual part resources.
|
||||
type partInfo struct {
|
||||
PartNumber int
|
||||
LastModified time.Time
|
||||
ETag string
|
||||
Size int64
|
||||
}
|
||||
|
||||
// uploadMetadata container capturing metadata on in progress multipart upload in a given bucket
|
||||
type uploadMetadata struct {
|
||||
Object string
|
||||
UploadID string
|
||||
StorageClass string
|
||||
Initiated time.Time
|
||||
}
|
||||
|
||||
// completePart - completed part container.
|
||||
type completePart struct {
|
||||
PartNumber int
|
||||
ETag string
|
||||
}
|
||||
|
||||
// completedParts is a sortable interface for Part slice
|
||||
type completedParts []CompletePart
|
||||
type completedParts []completePart
|
||||
|
||||
func (a completedParts) Len() int { return len(a) }
|
||||
func (a completedParts) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||
func (a completedParts) Less(i, j int) bool { return a[i].PartNumber < a[j].PartNumber }
|
||||
|
||||
// CompleteMultipartUpload container for completing multipart upload
|
||||
type CompleteMultipartUpload struct {
|
||||
Parts []CompletePart `xml:"Part"`
|
||||
// completeMultipartUpload container for completing multipart upload
|
||||
type completeMultipartUpload struct {
|
||||
Parts []completePart `xml:"Part"`
|
||||
}
|
@ -9,7 +9,7 @@
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implieapi.ObjectAPI.
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
@ -84,9 +84,8 @@ func (api objectStorageAPI) GetObjectHandler(w http.ResponseWriter, r *http.Requ
|
||||
}
|
||||
}
|
||||
|
||||
objectInfo, err := api.ObjectAPI.GetObjectInfo(bucket, object)
|
||||
objInfo, err := api.ObjectAPI.GetObjectInfo(bucket, object)
|
||||
if err != nil {
|
||||
errorIf(err.Trace(), "GetObject failed.", nil)
|
||||
switch err.ToGoError().(type) {
|
||||
case BucketNameInvalid:
|
||||
writeErrorResponse(w, r, ErrInvalidBucketName, r.URL.Path)
|
||||
@ -97,23 +96,14 @@ func (api objectStorageAPI) GetObjectHandler(w http.ResponseWriter, r *http.Requ
|
||||
case ObjectNameInvalid:
|
||||
writeErrorResponse(w, r, ErrNoSuchKey, r.URL.Path)
|
||||
default:
|
||||
errorIf(err.Trace(), "GetObjectInfo failed.", nil)
|
||||
writeErrorResponse(w, r, ErrInternalError, r.URL.Path)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
var hrange *httpRange
|
||||
hrange, err = getRequestedRange(r.Header.Get("Range"), objectInfo.Size)
|
||||
if err != nil {
|
||||
writeErrorResponse(w, r, ErrInvalidRange, r.URL.Path)
|
||||
return
|
||||
}
|
||||
|
||||
// Set standard object headers.
|
||||
setObjectHeaders(w, objectInfo, hrange)
|
||||
|
||||
// Verify 'If-Modified-Since' and 'If-Unmodified-Since'.
|
||||
lastModified := objectInfo.ModifiedTime
|
||||
lastModified := objInfo.ModifiedTime
|
||||
if checkLastModified(w, r, lastModified) {
|
||||
return
|
||||
}
|
||||
@ -122,8 +112,12 @@ func (api objectStorageAPI) GetObjectHandler(w http.ResponseWriter, r *http.Requ
|
||||
return
|
||||
}
|
||||
|
||||
// Set any additional requested response headers.
|
||||
setGetRespHeaders(w, r.URL.Query())
|
||||
var hrange *httpRange
|
||||
hrange, err = getRequestedRange(r.Header.Get("Range"), objInfo.Size)
|
||||
if err != nil {
|
||||
writeErrorResponse(w, r, ErrInvalidRange, r.URL.Path)
|
||||
return
|
||||
}
|
||||
|
||||
// Get the object.
|
||||
startOffset := hrange.start
|
||||
@ -134,6 +128,13 @@ func (api objectStorageAPI) GetObjectHandler(w http.ResponseWriter, r *http.Requ
|
||||
return
|
||||
}
|
||||
defer readCloser.Close() // Close after this handler returns.
|
||||
|
||||
// Set standard object headers.
|
||||
setObjectHeaders(w, objInfo, hrange)
|
||||
|
||||
// Set any additional requested response headers.
|
||||
setGetRespHeaders(w, r.URL.Query())
|
||||
|
||||
if hrange.length > 0 {
|
||||
if _, e := io.CopyN(w, readCloser, hrange.length); e != nil {
|
||||
errorIf(probe.NewError(e), "Writing to client failed", nil)
|
||||
@ -264,7 +265,7 @@ func (api objectStorageAPI) HeadObjectHandler(w http.ResponseWriter, r *http.Req
|
||||
}
|
||||
}
|
||||
|
||||
objectInfo, err := api.ObjectAPI.GetObjectInfo(bucket, object)
|
||||
objInfo, err := api.ObjectAPI.GetObjectInfo(bucket, object)
|
||||
if err != nil {
|
||||
errorIf(err.Trace(bucket, object), "GetObjectInfo failed.", nil)
|
||||
switch err.ToGoError().(type) {
|
||||
@ -282,11 +283,8 @@ func (api objectStorageAPI) HeadObjectHandler(w http.ResponseWriter, r *http.Req
|
||||
return
|
||||
}
|
||||
|
||||
// Set standard object headers.
|
||||
setObjectHeaders(w, objectInfo, nil)
|
||||
|
||||
// Verify 'If-Modified-Since' and 'If-Unmodified-Since'.
|
||||
lastModified := objectInfo.ModifiedTime
|
||||
lastModified := objInfo.ModifiedTime
|
||||
if checkLastModified(w, r, lastModified) {
|
||||
return
|
||||
}
|
||||
@ -296,6 +294,9 @@ func (api objectStorageAPI) HeadObjectHandler(w http.ResponseWriter, r *http.Req
|
||||
return
|
||||
}
|
||||
|
||||
// Set standard object headers.
|
||||
setObjectHeaders(w, objInfo, nil)
|
||||
|
||||
// Successfull response.
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}
|
||||
@ -357,7 +358,7 @@ func (api objectStorageAPI) CopyObjectHandler(w http.ResponseWriter, r *http.Req
|
||||
return
|
||||
}
|
||||
|
||||
objectInfo, err := api.ObjectAPI.GetObjectInfo(sourceBucket, sourceObject)
|
||||
objInfo, err := api.ObjectAPI.GetObjectInfo(sourceBucket, sourceObject)
|
||||
if err != nil {
|
||||
errorIf(err.Trace(), "GetObjectInfo failed.", nil)
|
||||
switch err.ToGoError().(type) {
|
||||
@ -378,7 +379,7 @@ func (api objectStorageAPI) CopyObjectHandler(w http.ResponseWriter, r *http.Req
|
||||
|
||||
// Verify x-amz-copy-source-if-modified-since and
|
||||
// x-amz-copy-source-if-unmodified-since.
|
||||
lastModified := objectInfo.ModifiedTime
|
||||
lastModified := objInfo.ModifiedTime
|
||||
if checkCopySourceLastModified(w, r, lastModified) {
|
||||
return
|
||||
}
|
||||
@ -390,15 +391,15 @@ func (api objectStorageAPI) CopyObjectHandler(w http.ResponseWriter, r *http.Req
|
||||
}
|
||||
|
||||
/// maximum Upload size for object in a single CopyObject operation.
|
||||
if isMaxObjectSize(objectInfo.Size) {
|
||||
if isMaxObjectSize(objInfo.Size) {
|
||||
writeErrorResponse(w, r, ErrEntityTooLarge, objectSource)
|
||||
return
|
||||
}
|
||||
|
||||
var md5Bytes []byte
|
||||
if objectInfo.MD5Sum != "" {
|
||||
if objInfo.MD5Sum != "" {
|
||||
var e error
|
||||
md5Bytes, e = hex.DecodeString(objectInfo.MD5Sum)
|
||||
md5Bytes, e = hex.DecodeString(objInfo.MD5Sum)
|
||||
if e != nil {
|
||||
errorIf(probe.NewError(e), "Decoding md5 failed.", nil)
|
||||
writeErrorResponse(w, r, ErrInvalidDigest, r.URL.Path)
|
||||
@ -421,16 +422,15 @@ func (api objectStorageAPI) CopyObjectHandler(w http.ResponseWriter, r *http.Req
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Size of object.
|
||||
size := objectInfo.Size
|
||||
size := objInfo.Size
|
||||
|
||||
// Save metadata.
|
||||
metadata := make(map[string]string)
|
||||
metadata["md5Sum"] = hex.EncodeToString(md5Bytes)
|
||||
|
||||
// Create the object.
|
||||
objectInfo, err = api.ObjectAPI.PutObject(bucket, object, size, readCloser, metadata)
|
||||
objInfo, err = api.ObjectAPI.PutObject(bucket, object, size, readCloser, metadata)
|
||||
if err != nil {
|
||||
errorIf(err.Trace(), "PutObject failed.", nil)
|
||||
switch err.ToGoError().(type) {
|
||||
@ -451,7 +451,7 @@ func (api objectStorageAPI) CopyObjectHandler(w http.ResponseWriter, r *http.Req
|
||||
}
|
||||
return
|
||||
}
|
||||
response := generateCopyObjectResponse(objectInfo.MD5Sum, objectInfo.ModifiedTime)
|
||||
response := generateCopyObjectResponse(objInfo.MD5Sum, objInfo.ModifiedTime)
|
||||
encodedSuccessResponse := encodeResponse(response)
|
||||
// write headers
|
||||
setCommonHeaders(w)
|
||||
@ -586,7 +586,7 @@ func (api objectStorageAPI) PutObjectHandler(w http.ResponseWriter, r *http.Requ
|
||||
return
|
||||
}
|
||||
|
||||
var objectInfo ObjectInfo
|
||||
var objInfo ObjectInfo
|
||||
switch getRequestAuthType(r) {
|
||||
default:
|
||||
// For all unknown auth types return error.
|
||||
@ -599,7 +599,7 @@ func (api objectStorageAPI) PutObjectHandler(w http.ResponseWriter, r *http.Requ
|
||||
return
|
||||
}
|
||||
// Create anonymous object.
|
||||
objectInfo, err = api.ObjectAPI.PutObject(bucket, object, size, r.Body, nil)
|
||||
objInfo, err = api.ObjectAPI.PutObject(bucket, object, size, r.Body, nil)
|
||||
case authTypePresigned:
|
||||
validateRegion := true // Validate region.
|
||||
// For presigned requests verify them right here.
|
||||
@ -608,7 +608,7 @@ func (api objectStorageAPI) PutObjectHandler(w http.ResponseWriter, r *http.Requ
|
||||
return
|
||||
}
|
||||
// Create presigned object.
|
||||
objectInfo, err = api.ObjectAPI.PutObject(bucket, object, size, r.Body, nil)
|
||||
objInfo, err = api.ObjectAPI.PutObject(bucket, object, size, r.Body, nil)
|
||||
case authTypeSigned:
|
||||
// Initialize a pipe for data pipe line.
|
||||
reader, writer := io.Pipe()
|
||||
@ -637,10 +637,10 @@ func (api objectStorageAPI) PutObjectHandler(w http.ResponseWriter, r *http.Requ
|
||||
|
||||
// Save metadata.
|
||||
metadata := make(map[string]string)
|
||||
metadata["md5Sum"] = hex.EncodeToString(md5Bytes)
|
||||
|
||||
// Make sure we hex encode here.
|
||||
metadata["md5"] = hex.EncodeToString(md5Bytes)
|
||||
// Create object.
|
||||
objectInfo, err = api.ObjectAPI.PutObject(bucket, object, size, reader, metadata)
|
||||
objInfo, err = api.ObjectAPI.PutObject(bucket, object, size, reader, metadata)
|
||||
}
|
||||
if err != nil {
|
||||
errorIf(err.Trace(), "PutObject failed.", nil)
|
||||
@ -668,8 +668,8 @@ func (api objectStorageAPI) PutObjectHandler(w http.ResponseWriter, r *http.Requ
|
||||
}
|
||||
return
|
||||
}
|
||||
if objectInfo.MD5Sum != "" {
|
||||
w.Header().Set("ETag", "\""+objectInfo.MD5Sum+"\"")
|
||||
if objInfo.MD5Sum != "" {
|
||||
w.Header().Set("ETag", "\""+objInfo.MD5Sum+"\"")
|
||||
}
|
||||
writeSuccessResponse(w, nil)
|
||||
}
|
||||
@ -868,7 +868,7 @@ func (api objectStorageAPI) AbortMultipartUploadHandler(w http.ResponseWriter, r
|
||||
}
|
||||
}
|
||||
|
||||
uploadID := getUploadID(r.URL.Query()) // Get upload id.
|
||||
uploadID, _, _, _ := getObjectResources(r.URL.Query())
|
||||
err := api.ObjectAPI.AbortMultipartUpload(bucket, object, uploadID)
|
||||
if err != nil {
|
||||
errorIf(err.Trace(), "AbortMutlipartUpload failed.", nil)
|
||||
@ -915,20 +915,20 @@ func (api objectStorageAPI) ListObjectPartsHandler(w http.ResponseWriter, r *htt
|
||||
}
|
||||
}
|
||||
|
||||
objectResourcesMetadata := getObjectResources(r.URL.Query())
|
||||
if objectResourcesMetadata.PartNumberMarker < 0 {
|
||||
uploadID, partNumberMarker, maxParts, _ := getObjectResources(r.URL.Query())
|
||||
if partNumberMarker < 0 {
|
||||
writeErrorResponse(w, r, ErrInvalidPartNumberMarker, r.URL.Path)
|
||||
return
|
||||
}
|
||||
if objectResourcesMetadata.MaxParts < 0 {
|
||||
if maxParts < 0 {
|
||||
writeErrorResponse(w, r, ErrInvalidMaxParts, r.URL.Path)
|
||||
return
|
||||
}
|
||||
if objectResourcesMetadata.MaxParts == 0 {
|
||||
objectResourcesMetadata.MaxParts = maxPartsList
|
||||
if maxParts == 0 {
|
||||
maxParts = maxPartsList
|
||||
}
|
||||
|
||||
objectResourcesMetadata, err := api.ObjectAPI.ListObjectParts(bucket, object, objectResourcesMetadata)
|
||||
listPartsInfo, err := api.ObjectAPI.ListObjectParts(bucket, object, uploadID, partNumberMarker, maxParts)
|
||||
if err != nil {
|
||||
errorIf(err.Trace(), "ListObjectParts failed.", nil)
|
||||
switch err.ToGoError().(type) {
|
||||
@ -947,7 +947,7 @@ func (api objectStorageAPI) ListObjectPartsHandler(w http.ResponseWriter, r *htt
|
||||
}
|
||||
return
|
||||
}
|
||||
response := generateListPartsResponse(objectResourcesMetadata)
|
||||
response := generateListPartsResponse(listPartsInfo)
|
||||
encodedSuccessResponse := encodeResponse(response)
|
||||
// Write headers.
|
||||
setCommonHeaders(w)
|
||||
@ -962,8 +962,10 @@ func (api objectStorageAPI) CompleteMultipartUploadHandler(w http.ResponseWriter
|
||||
object := vars["object"]
|
||||
|
||||
// Get upload id.
|
||||
uploadID := getUploadID(r.URL.Query()) // Get upload id.
|
||||
uploadID, _, _, _ := getObjectResources(r.URL.Query())
|
||||
|
||||
var objInfo ObjectInfo
|
||||
var err *probe.Error
|
||||
switch getRequestAuthType(r) {
|
||||
default:
|
||||
// For all unknown auth types return error.
|
||||
@ -987,20 +989,20 @@ func (api objectStorageAPI) CompleteMultipartUploadHandler(w http.ResponseWriter
|
||||
writeErrorResponse(w, r, ErrInternalError, r.URL.Path)
|
||||
return
|
||||
}
|
||||
completeMultipartUpload := &CompleteMultipartUpload{}
|
||||
if e = xml.Unmarshal(completeMultipartBytes, completeMultipartUpload); e != nil {
|
||||
complMultipartUpload := &completeMultipartUpload{}
|
||||
if e = xml.Unmarshal(completeMultipartBytes, complMultipartUpload); e != nil {
|
||||
writeErrorResponse(w, r, ErrMalformedXML, r.URL.Path)
|
||||
return
|
||||
}
|
||||
if !sort.IsSorted(completedParts(completeMultipartUpload.Parts)) {
|
||||
if !sort.IsSorted(completedParts(complMultipartUpload.Parts)) {
|
||||
writeErrorResponse(w, r, ErrInvalidPartOrder, r.URL.Path)
|
||||
return
|
||||
}
|
||||
// Complete parts.
|
||||
completeParts := completeMultipartUpload.Parts
|
||||
completeParts := complMultipartUpload.Parts
|
||||
|
||||
// Complete multipart upload.
|
||||
objectInfo, err := api.ObjectAPI.CompleteMultipartUpload(bucket, object, uploadID, completeParts)
|
||||
objInfo, err = api.ObjectAPI.CompleteMultipartUpload(bucket, object, uploadID, completeParts)
|
||||
if err != nil {
|
||||
errorIf(err.Trace(), "CompleteMultipartUpload failed.", nil)
|
||||
switch err.ToGoError().(type) {
|
||||
@ -1020,8 +1022,6 @@ func (api objectStorageAPI) CompleteMultipartUploadHandler(w http.ResponseWriter
|
||||
writeErrorResponse(w, r, ErrInvalidPartOrder, r.URL.Path)
|
||||
case IncompleteBody:
|
||||
writeErrorResponse(w, r, ErrIncompleteBody, r.URL.Path)
|
||||
case MalformedXML:
|
||||
writeErrorResponse(w, r, ErrMalformedXML, r.URL.Path)
|
||||
default:
|
||||
writeErrorResponse(w, r, ErrInternalError, r.URL.Path)
|
||||
}
|
||||
@ -1030,7 +1030,7 @@ func (api objectStorageAPI) CompleteMultipartUploadHandler(w http.ResponseWriter
|
||||
// Get object location.
|
||||
location := getLocation(r)
|
||||
// Generate complete multipart response.
|
||||
response := generateCompleteMultpartUploadResponse(bucket, object, location, objectInfo.MD5Sum)
|
||||
response := generateCompleteMultpartUploadResponse(bucket, object, location, objInfo.MD5Sum)
|
||||
encodedSuccessResponse := encodeResponse(response)
|
||||
// Write headers.
|
||||
setCommonHeaders(w)
|
||||
|
@ -293,7 +293,6 @@ func serverMain(c *cli.Context) {
|
||||
printListenIPs(apiServer)
|
||||
|
||||
console.Println("\nTo configure Minio Client:")
|
||||
|
||||
// Download 'mc' links.
|
||||
if runtime.GOOS == "windows" {
|
||||
console.Println(" Download 'mc' from https://dl.minio.io/client/mc/release/" + runtime.GOOS + "-" + runtime.GOARCH + "/mc.exe")
|
||||
|
@ -881,7 +881,7 @@ func (s *MyAPISuite) TestPartialContent(c *C) {
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(response.StatusCode, Equals, http.StatusOK)
|
||||
|
||||
// prepare request
|
||||
// Prepare request
|
||||
request, err = s.newRequest("GET", testAPIFSCacheServer.URL+"/partial-content/bar", 0, nil)
|
||||
c.Assert(err, IsNil)
|
||||
request.Header.Add("Range", "bytes=6-7")
|
||||
@ -1288,8 +1288,8 @@ func (s *MyAPISuite) TestObjectMultipart(c *C) {
|
||||
c.Assert(response2.StatusCode, Equals, http.StatusOK)
|
||||
|
||||
// Complete multipart upload
|
||||
completeUploads := &CompleteMultipartUpload{
|
||||
Parts: []CompletePart{
|
||||
completeUploads := &completeMultipartUpload{
|
||||
Parts: []completePart{
|
||||
{
|
||||
PartNumber: 1,
|
||||
ETag: response1.Header.Get("ETag"),
|
||||
|
@ -139,12 +139,12 @@ func (web *webAPI) MakeBucket(r *http.Request, args *MakeBucketArgs, reply *Gene
|
||||
|
||||
// ListBucketsRep - list buckets response
|
||||
type ListBucketsRep struct {
|
||||
Buckets []BketInfo `json:"buckets"`
|
||||
UIVersion string `json:"uiVersion"`
|
||||
Buckets []WebBucketInfo `json:"buckets"`
|
||||
UIVersion string `json:"uiVersion"`
|
||||
}
|
||||
|
||||
// BketInfo container for list buckets.
|
||||
type BketInfo struct {
|
||||
// WebBucketInfo container for list buckets metadata.
|
||||
type WebBucketInfo struct {
|
||||
// The name of the bucket.
|
||||
Name string `json:"name"`
|
||||
// Date the bucket was created.
|
||||
@ -163,7 +163,7 @@ func (web *webAPI) ListBuckets(r *http.Request, args *GenericArgs, reply *ListBu
|
||||
for _, bucket := range buckets {
|
||||
// List all buckets which are not private.
|
||||
if bucket.Name != path.Base(reservedBucket) {
|
||||
reply.Buckets = append(reply.Buckets, BketInfo{
|
||||
reply.Buckets = append(reply.Buckets, WebBucketInfo{
|
||||
Name: bucket.Name,
|
||||
CreationDate: bucket.Created,
|
||||
})
|
||||
@ -181,12 +181,12 @@ type ListObjectsArgs struct {
|
||||
|
||||
// ListObjectsRep - list objects response.
|
||||
type ListObjectsRep struct {
|
||||
Objects []ObjInfo `json:"objects"`
|
||||
UIVersion string `json:"uiVersion"`
|
||||
Objects []WebObjectInfo `json:"objects"`
|
||||
UIVersion string `json:"uiVersion"`
|
||||
}
|
||||
|
||||
// ObjInfo container for list objects.
|
||||
type ObjInfo struct {
|
||||
// WebObjectInfo container for list objects metadata.
|
||||
type WebObjectInfo struct {
|
||||
// Name of the object
|
||||
Key string `json:"name"`
|
||||
// Date and time the object was last modified.
|
||||
@ -210,14 +210,14 @@ func (web *webAPI) ListObjects(r *http.Request, args *ListObjectsArgs, reply *Li
|
||||
}
|
||||
marker = lo.NextMarker
|
||||
for _, obj := range lo.Objects {
|
||||
reply.Objects = append(reply.Objects, ObjInfo{
|
||||
reply.Objects = append(reply.Objects, WebObjectInfo{
|
||||
Key: obj.Name,
|
||||
LastModified: obj.ModifiedTime,
|
||||
Size: obj.Size,
|
||||
})
|
||||
}
|
||||
for _, prefix := range lo.Prefixes {
|
||||
reply.Objects = append(reply.Objects, ObjInfo{
|
||||
reply.Objects = append(reply.Objects, WebObjectInfo{
|
||||
Key: prefix,
|
||||
})
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user