mirror of
				https://github.com/minio/minio.git
				synced 2025-10-29 15:55:00 -04:00 
			
		
		
		
	
							parent
							
								
									c81f4b0228
								
							
						
					
					
						commit
						52751d81cb
					
				| @ -60,20 +60,20 @@ func encodeResponse(response interface{}) []byte { | ||||
| } | ||||
| 
 | ||||
| // Write object header | ||||
| func setObjectHeaders(w http.ResponseWriter, metadata fs.ObjectMetadata, contentRange *httpRange) { | ||||
| func setObjectHeaders(w http.ResponseWriter, objectInfo fs.ObjectInfo, contentRange *httpRange) { | ||||
| 	// set common headers | ||||
| 	setCommonHeaders(w) | ||||
| 
 | ||||
| 	// set object-related metadata headers | ||||
| 	lastModified := metadata.LastModified.UTC().Format(http.TimeFormat) | ||||
| 	lastModified := objectInfo.ModifiedTime.UTC().Format(http.TimeFormat) | ||||
| 	w.Header().Set("Last-Modified", lastModified) | ||||
| 
 | ||||
| 	w.Header().Set("Content-Type", metadata.ContentType) | ||||
| 	if metadata.MD5 != "" { | ||||
| 		w.Header().Set("ETag", "\""+metadata.MD5+"\"") | ||||
| 	w.Header().Set("Content-Type", objectInfo.ContentType) | ||||
| 	if objectInfo.MD5Sum != "" { | ||||
| 		w.Header().Set("ETag", "\""+objectInfo.MD5Sum+"\"") | ||||
| 	} | ||||
| 
 | ||||
| 	w.Header().Set("Content-Length", strconv.FormatInt(metadata.Size, 10)) | ||||
| 	w.Header().Set("Content-Length", strconv.FormatInt(objectInfo.Size, 10)) | ||||
| 
 | ||||
| 	// for providing ranged content | ||||
| 	if contentRange != nil { | ||||
|  | ||||
| @ -247,7 +247,7 @@ func getLocation(r *http.Request) string { | ||||
| // | ||||
| // output: | ||||
| // populated struct that can be serialized to match xml and json api spec output | ||||
| func generateListBucketsResponse(buckets []fs.BucketMetadata) ListBucketsResponse { | ||||
| func generateListBucketsResponse(buckets []fs.BucketInfo) ListBucketsResponse { | ||||
| 	var listbuckets []Bucket | ||||
| 	var data = ListBucketsResponse{} | ||||
| 	var owner = Owner{} | ||||
| @ -280,13 +280,13 @@ func generateListObjectsResponse(bucket, prefix, marker, delimiter string, maxKe | ||||
| 
 | ||||
| 	for _, object := range resp.Objects { | ||||
| 		var content = Object{} | ||||
| 		if object.Object == "" { | ||||
| 		if object.Name == "" { | ||||
| 			continue | ||||
| 		} | ||||
| 		content.Key = object.Object | ||||
| 		content.LastModified = object.LastModified.UTC().Format(timeFormatAMZ) | ||||
| 		if object.MD5 != "" { | ||||
| 			content.ETag = "\"" + object.MD5 + "\"" | ||||
| 		content.Key = object.Name | ||||
| 		content.LastModified = object.ModifiedTime.UTC().Format(timeFormatAMZ) | ||||
| 		if object.MD5Sum != "" { | ||||
| 			content.ETag = "\"" + object.MD5Sum + "\"" | ||||
| 		} | ||||
| 		content.Size = object.Size | ||||
| 		content.StorageClass = "STANDARD" | ||||
|  | ||||
| @ -101,9 +101,9 @@ func (api storageAPI) GetBucketLocationHandler(w http.ResponseWriter, r *http.Re | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	_, err := api.Filesystem.GetBucketMetadata(bucket) | ||||
| 	_, err := api.Filesystem.GetBucketInfo(bucket) | ||||
| 	if err != nil { | ||||
| 		errorIf(err.Trace(), "GetBucketMetadata failed.", nil) | ||||
| 		errorIf(err.Trace(), "GetBucketInfo failed.", nil) | ||||
| 		switch err.ToGoError().(type) { | ||||
| 		case fs.BucketNotFound: | ||||
| 			writeErrorResponse(w, r, ErrNoSuchBucket, r.URL.Path) | ||||
| @ -564,7 +564,7 @@ func (api storageAPI) PostPolicyBucketHandler(w http.ResponseWriter, r *http.Req | ||||
| 		writeErrorResponse(w, r, ErrMalformedPOSTRequest, r.URL.Path) | ||||
| 		return | ||||
| 	} | ||||
| 	metadata, err := api.Filesystem.CreateObject(bucket, object, "", -1, fileBody, nil) | ||||
| 	objectInfo, err := api.Filesystem.CreateObject(bucket, object, "", -1, fileBody, nil) | ||||
| 	if err != nil { | ||||
| 		errorIf(err.Trace(), "CreateObject failed.", nil) | ||||
| 		switch err.ToGoError().(type) { | ||||
| @ -585,8 +585,8 @@ func (api storageAPI) PostPolicyBucketHandler(w http.ResponseWriter, r *http.Req | ||||
| 		} | ||||
| 		return | ||||
| 	} | ||||
| 	if metadata.MD5 != "" { | ||||
| 		w.Header().Set("ETag", "\""+metadata.MD5+"\"") | ||||
| 	if objectInfo.MD5Sum != "" { | ||||
| 		w.Header().Set("ETag", "\""+objectInfo.MD5Sum+"\"") | ||||
| 	} | ||||
| 	writeSuccessResponse(w, nil) | ||||
| } | ||||
| @ -613,9 +613,9 @@ func (api storageAPI) HeadBucketHandler(w http.ResponseWriter, r *http.Request) | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	_, err := api.Filesystem.GetBucketMetadata(bucket) | ||||
| 	_, err := api.Filesystem.GetBucketInfo(bucket) | ||||
| 	if err != nil { | ||||
| 		errorIf(err.Trace(), "GetBucketMetadata failed.", nil) | ||||
| 		errorIf(err.Trace(), "GetBucketInfo failed.", nil) | ||||
| 		switch err.ToGoError().(type) { | ||||
| 		case fs.BucketNotFound: | ||||
| 			writeErrorResponse(w, r, ErrNoSuchBucket, r.URL.Path) | ||||
|  | ||||
| @ -79,7 +79,7 @@ func (api storageAPI) GetObjectHandler(w http.ResponseWriter, r *http.Request) { | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	metadata, err := api.Filesystem.GetObjectMetadata(bucket, object) | ||||
| 	objectInfo, err := api.Filesystem.GetObjectInfo(bucket, object) | ||||
| 	if err != nil { | ||||
| 		errorIf(err.Trace(), "GetObject failed.", nil) | ||||
| 		switch err.ToGoError().(type) { | ||||
| @ -98,17 +98,18 @@ func (api storageAPI) GetObjectHandler(w http.ResponseWriter, r *http.Request) { | ||||
| 	} | ||||
| 
 | ||||
| 	var hrange *httpRange | ||||
| 	hrange, err = getRequestedRange(r.Header.Get("Range"), metadata.Size) | ||||
| 	hrange, err = getRequestedRange(r.Header.Get("Range"), objectInfo.Size) | ||||
| 	if err != nil { | ||||
| 		writeErrorResponse(w, r, ErrInvalidRange, r.URL.Path) | ||||
| 		return | ||||
| 	} | ||||
| 
 | ||||
| 	// Set standard object headers. | ||||
| 	setObjectHeaders(w, metadata, hrange) | ||||
| 	setObjectHeaders(w, objectInfo, hrange) | ||||
| 
 | ||||
| 	// Verify 'If-Modified-Since' and 'If-Unmodified-Since'. | ||||
| 	if checkLastModified(w, r, metadata.LastModified) { | ||||
| 	lastModified := objectInfo.ModifiedTime | ||||
| 	if checkLastModified(w, r, lastModified) { | ||||
| 		return | ||||
| 	} | ||||
| 	// Verify 'If-Match' and 'If-None-Match'. | ||||
| @ -236,8 +237,9 @@ func (api storageAPI) HeadObjectHandler(w http.ResponseWriter, r *http.Request) | ||||
| 		return | ||||
| 	} | ||||
| 
 | ||||
| 	metadata, err := api.Filesystem.GetObjectMetadata(bucket, object) | ||||
| 	objectInfo, err := api.Filesystem.GetObjectInfo(bucket, object) | ||||
| 	if err != nil { | ||||
| 		errorIf(err.Trace(bucket, object), "GetObjectInfo failed.", nil) | ||||
| 		switch err.ToGoError().(type) { | ||||
| 		case fs.BucketNameInvalid: | ||||
| 			writeErrorResponse(w, r, ErrInvalidBucketName, r.URL.Path) | ||||
| @ -254,10 +256,11 @@ func (api storageAPI) HeadObjectHandler(w http.ResponseWriter, r *http.Request) | ||||
| 	} | ||||
| 
 | ||||
| 	// Set standard object headers. | ||||
| 	setObjectHeaders(w, metadata, nil) | ||||
| 	setObjectHeaders(w, objectInfo, nil) | ||||
| 
 | ||||
| 	// Verify 'If-Modified-Since' and 'If-Unmodified-Since'. | ||||
| 	if checkLastModified(w, r, metadata.LastModified) { | ||||
| 	lastModified := objectInfo.ModifiedTime | ||||
| 	if checkLastModified(w, r, lastModified) { | ||||
| 		return | ||||
| 	} | ||||
| 
 | ||||
| @ -333,9 +336,9 @@ func (api storageAPI) CopyObjectHandler(w http.ResponseWriter, r *http.Request) | ||||
| 		return | ||||
| 	} | ||||
| 
 | ||||
| 	metadata, err := api.Filesystem.GetObjectMetadata(sourceBucket, sourceObject) | ||||
| 	objectInfo, err := api.Filesystem.GetObjectInfo(sourceBucket, sourceObject) | ||||
| 	if err != nil { | ||||
| 		errorIf(err.Trace(), "GetObjectMetadata failed.", nil) | ||||
| 		errorIf(err.Trace(), "GetObjectInfo failed.", nil) | ||||
| 		switch err.ToGoError().(type) { | ||||
| 		case fs.BucketNameInvalid: | ||||
| 			writeErrorResponse(w, r, ErrInvalidBucketName, objectSource) | ||||
| @ -352,7 +355,7 @@ func (api storageAPI) CopyObjectHandler(w http.ResponseWriter, r *http.Request) | ||||
| 	} | ||||
| 
 | ||||
| 	/// maximum Upload size for object in a single CopyObject operation. | ||||
| 	if isMaxObjectSize(metadata.Size) { | ||||
| 	if isMaxObjectSize(objectInfo.Size) { | ||||
| 		writeErrorResponse(w, r, ErrEntityTooLarge, objectSource) | ||||
| 		return | ||||
| 	} | ||||
| @ -370,12 +373,12 @@ func (api storageAPI) CopyObjectHandler(w http.ResponseWriter, r *http.Request) | ||||
| 	}() | ||||
| 
 | ||||
| 	// Verify md5sum. | ||||
| 	expectedMD5Sum := metadata.MD5 | ||||
| 	expectedMD5Sum := objectInfo.MD5Sum | ||||
| 	// Size of object. | ||||
| 	size := metadata.Size | ||||
| 	size := objectInfo.Size | ||||
| 
 | ||||
| 	// Create the object. | ||||
| 	metadata, err = api.Filesystem.CreateObject(bucket, object, expectedMD5Sum, size, reader, nil) | ||||
| 	objectInfo, err = api.Filesystem.CreateObject(bucket, object, expectedMD5Sum, size, reader, nil) | ||||
| 	if err != nil { | ||||
| 		errorIf(err.Trace(), "CreateObject failed.", nil) | ||||
| 		switch err.ToGoError().(type) { | ||||
| @ -398,7 +401,7 @@ func (api storageAPI) CopyObjectHandler(w http.ResponseWriter, r *http.Request) | ||||
| 		} | ||||
| 		return | ||||
| 	} | ||||
| 	response := generateCopyObjectResponse(metadata.MD5, metadata.LastModified) | ||||
| 	response := generateCopyObjectResponse(objectInfo.MD5Sum, objectInfo.ModifiedTime) | ||||
| 	encodedSuccessResponse := encodeResponse(response) | ||||
| 	// write headers | ||||
| 	setCommonHeaders(w) | ||||
| @ -440,7 +443,7 @@ func (api storageAPI) PutObjectHandler(w http.ResponseWriter, r *http.Request) { | ||||
| 
 | ||||
| 	// Set http request for signature. | ||||
| 	auth := api.Signature.SetHTTPRequestToVerify(r) | ||||
| 	var metadata fs.ObjectMetadata | ||||
| 	var objectInfo fs.ObjectInfo | ||||
| 	var err *probe.Error | ||||
| 	switch getRequestAuthType(r) { | ||||
| 	default: | ||||
| @ -454,7 +457,7 @@ func (api storageAPI) PutObjectHandler(w http.ResponseWriter, r *http.Request) { | ||||
| 			return | ||||
| 		} | ||||
| 		// Create anonymous object. | ||||
| 		metadata, err = api.Filesystem.CreateObject(bucket, object, md5, size, r.Body, nil) | ||||
| 		objectInfo, err = api.Filesystem.CreateObject(bucket, object, md5, size, r.Body, nil) | ||||
| 	case authTypePresigned: | ||||
| 		// For presigned requests verify them right here. | ||||
| 		var ok bool | ||||
| @ -469,10 +472,10 @@ func (api storageAPI) PutObjectHandler(w http.ResponseWriter, r *http.Request) { | ||||
| 			return | ||||
| 		} | ||||
| 		// Create presigned object. | ||||
| 		metadata, err = api.Filesystem.CreateObject(bucket, object, md5, size, r.Body, nil) | ||||
| 		objectInfo, err = api.Filesystem.CreateObject(bucket, object, md5, size, r.Body, nil) | ||||
| 	case authTypeSigned: | ||||
| 		// Create object. | ||||
| 		metadata, err = api.Filesystem.CreateObject(bucket, object, md5, size, r.Body, &auth) | ||||
| 		objectInfo, err = api.Filesystem.CreateObject(bucket, object, md5, size, r.Body, &auth) | ||||
| 	} | ||||
| 	if err != nil { | ||||
| 		errorIf(err.Trace(), "CreateObject failed.", nil) | ||||
| @ -498,8 +501,8 @@ func (api storageAPI) PutObjectHandler(w http.ResponseWriter, r *http.Request) { | ||||
| 		} | ||||
| 		return | ||||
| 	} | ||||
| 	if metadata.MD5 != "" { | ||||
| 		w.Header().Set("ETag", "\""+metadata.MD5+"\"") | ||||
| 	if objectInfo.MD5Sum != "" { | ||||
| 		w.Header().Set("ETag", "\""+objectInfo.MD5Sum+"\"") | ||||
| 	} | ||||
| 	writeSuccessResponse(w, nil) | ||||
| } | ||||
| @ -762,7 +765,7 @@ func (api storageAPI) CompleteMultipartUploadHandler(w http.ResponseWriter, r *h | ||||
| 	// Set http request for signature. | ||||
| 	auth := api.Signature.SetHTTPRequestToVerify(r) | ||||
| 
 | ||||
| 	var metadata fs.ObjectMetadata | ||||
| 	var objectInfo fs.ObjectInfo | ||||
| 	var err *probe.Error | ||||
| 	switch getRequestAuthType(r) { | ||||
| 	default: | ||||
| @ -776,7 +779,7 @@ func (api storageAPI) CompleteMultipartUploadHandler(w http.ResponseWriter, r *h | ||||
| 			return | ||||
| 		} | ||||
| 		// Complete multipart upload anonymous. | ||||
| 		metadata, err = api.Filesystem.CompleteMultipartUpload(bucket, object, objectResourcesMetadata.UploadID, r.Body, nil) | ||||
| 		objectInfo, err = api.Filesystem.CompleteMultipartUpload(bucket, object, objectResourcesMetadata.UploadID, r.Body, nil) | ||||
| 	case authTypePresigned: | ||||
| 		// For presigned requests verify right here. | ||||
| 		var ok bool | ||||
| @ -791,10 +794,10 @@ func (api storageAPI) CompleteMultipartUploadHandler(w http.ResponseWriter, r *h | ||||
| 			return | ||||
| 		} | ||||
| 		// Complete multipart upload presigned. | ||||
| 		metadata, err = api.Filesystem.CompleteMultipartUpload(bucket, object, objectResourcesMetadata.UploadID, r.Body, nil) | ||||
| 		objectInfo, err = api.Filesystem.CompleteMultipartUpload(bucket, object, objectResourcesMetadata.UploadID, r.Body, nil) | ||||
| 	case authTypeSigned: | ||||
| 		// Complete multipart upload. | ||||
| 		metadata, err = api.Filesystem.CompleteMultipartUpload(bucket, object, objectResourcesMetadata.UploadID, r.Body, &auth) | ||||
| 		objectInfo, err = api.Filesystem.CompleteMultipartUpload(bucket, object, objectResourcesMetadata.UploadID, r.Body, &auth) | ||||
| 	} | ||||
| 	if err != nil { | ||||
| 		errorIf(err.Trace(), "CompleteMultipartUpload failed.", nil) | ||||
| @ -827,7 +830,7 @@ func (api storageAPI) CompleteMultipartUploadHandler(w http.ResponseWriter, r *h | ||||
| 	// get object location. | ||||
| 	location := getLocation(r) | ||||
| 	// Generate complete multipart response. | ||||
| 	response := generateCompleteMultpartUploadResponse(bucket, object, location, metadata.MD5) | ||||
| 	response := generateCompleteMultpartUploadResponse(bucket, object, location, objectInfo.MD5Sum) | ||||
| 	encodedSuccessResponse := encodeResponse(response) | ||||
| 	// write headers | ||||
| 	setCommonHeaders(w) | ||||
|  | ||||
| @ -86,9 +86,9 @@ func testMultipartObjectCreation(c *check.C, create func() Filesystem) { | ||||
| 	} | ||||
| 	completedPartsBytes, e := xml.Marshal(completedParts) | ||||
| 	c.Assert(e, check.IsNil) | ||||
| 	objectMetadata, err := fs.CompleteMultipartUpload("bucket", "key", uploadID, bytes.NewReader(completedPartsBytes), nil) | ||||
| 	objectInfo, err := fs.CompleteMultipartUpload("bucket", "key", uploadID, bytes.NewReader(completedPartsBytes), nil) | ||||
| 	c.Assert(err, check.IsNil) | ||||
| 	c.Assert(objectMetadata.MD5, check.Equals, "9b7d6f13ba00e24d0b02de92e814891b-10") | ||||
| 	c.Assert(objectInfo.MD5Sum, check.Equals, "9b7d6f13ba00e24d0b02de92e814891b-10") | ||||
| } | ||||
| 
 | ||||
| func testMultipartObjectAbort(c *check.C, create func() Filesystem) { | ||||
| @ -141,9 +141,9 @@ func testMultipleObjectCreation(c *check.C, create func() Filesystem) { | ||||
| 
 | ||||
| 		key := "obj" + strconv.Itoa(i) | ||||
| 		objects[key] = []byte(randomString) | ||||
| 		objectMetadata, err := fs.CreateObject("bucket", key, expectedmd5Sum, int64(len(randomString)), bytes.NewBufferString(randomString), nil) | ||||
| 		objectInfo, err := fs.CreateObject("bucket", key, expectedmd5Sum, int64(len(randomString)), bytes.NewBufferString(randomString), nil) | ||||
| 		c.Assert(err, check.IsNil) | ||||
| 		c.Assert(objectMetadata.MD5, check.Equals, expectedmd5Sumhex) | ||||
| 		c.Assert(objectInfo.MD5Sum, check.Equals, expectedmd5Sumhex) | ||||
| 	} | ||||
| 
 | ||||
| 	for key, value := range objects { | ||||
| @ -152,7 +152,7 @@ func testMultipleObjectCreation(c *check.C, create func() Filesystem) { | ||||
| 		c.Assert(err, check.IsNil) | ||||
| 		c.Assert(byteBuffer.Bytes(), check.DeepEquals, value) | ||||
| 
 | ||||
| 		metadata, err := fs.GetObjectMetadata("bucket", key) | ||||
| 		metadata, err := fs.GetObjectInfo("bucket", key) | ||||
| 		c.Assert(err, check.IsNil) | ||||
| 		c.Assert(metadata.Size, check.Equals, int64(len(value))) | ||||
| 	} | ||||
| @ -200,11 +200,11 @@ func testPaging(c *check.C, create func() Filesystem) { | ||||
| 	{ | ||||
| 		result, err = fs.ListObjects("bucket", "", "", "", 1000) | ||||
| 		c.Assert(err, check.IsNil) | ||||
| 		c.Assert(result.Objects[0].Object, check.Equals, "newPrefix") | ||||
| 		c.Assert(result.Objects[1].Object, check.Equals, "newPrefix2") | ||||
| 		c.Assert(result.Objects[2].Object, check.Equals, "obj0") | ||||
| 		c.Assert(result.Objects[3].Object, check.Equals, "obj1") | ||||
| 		c.Assert(result.Objects[4].Object, check.Equals, "obj10") | ||||
| 		c.Assert(result.Objects[0].Name, check.Equals, "newPrefix") | ||||
| 		c.Assert(result.Objects[1].Name, check.Equals, "newPrefix2") | ||||
| 		c.Assert(result.Objects[2].Name, check.Equals, "obj0") | ||||
| 		c.Assert(result.Objects[3].Name, check.Equals, "obj1") | ||||
| 		c.Assert(result.Objects[4].Name, check.Equals, "obj10") | ||||
| 	} | ||||
| 
 | ||||
| 	// check delimited results with delimiter and prefix | ||||
| @ -224,11 +224,11 @@ func testPaging(c *check.C, create func() Filesystem) { | ||||
| 	{ | ||||
| 		result, err = fs.ListObjects("bucket", "", "", "/", 1000) | ||||
| 		c.Assert(err, check.IsNil) | ||||
| 		c.Assert(result.Objects[0].Object, check.Equals, "newPrefix") | ||||
| 		c.Assert(result.Objects[1].Object, check.Equals, "newPrefix2") | ||||
| 		c.Assert(result.Objects[2].Object, check.Equals, "obj0") | ||||
| 		c.Assert(result.Objects[3].Object, check.Equals, "obj1") | ||||
| 		c.Assert(result.Objects[4].Object, check.Equals, "obj10") | ||||
| 		c.Assert(result.Objects[0].Name, check.Equals, "newPrefix") | ||||
| 		c.Assert(result.Objects[1].Name, check.Equals, "newPrefix2") | ||||
| 		c.Assert(result.Objects[2].Name, check.Equals, "obj0") | ||||
| 		c.Assert(result.Objects[3].Name, check.Equals, "obj1") | ||||
| 		c.Assert(result.Objects[4].Name, check.Equals, "obj10") | ||||
| 		c.Assert(result.Prefixes[0], check.Equals, "this/") | ||||
| 	} | ||||
| 
 | ||||
| @ -236,26 +236,26 @@ func testPaging(c *check.C, create func() Filesystem) { | ||||
| 	{ | ||||
| 		result, err = fs.ListObjects("bucket", "", "newPrefix", "", 3) | ||||
| 		c.Assert(err, check.IsNil) | ||||
| 		c.Assert(result.Objects[0].Object, check.Equals, "newPrefix2") | ||||
| 		c.Assert(result.Objects[1].Object, check.Equals, "obj0") | ||||
| 		c.Assert(result.Objects[2].Object, check.Equals, "obj1") | ||||
| 		c.Assert(result.Objects[0].Name, check.Equals, "newPrefix2") | ||||
| 		c.Assert(result.Objects[1].Name, check.Equals, "obj0") | ||||
| 		c.Assert(result.Objects[2].Name, check.Equals, "obj1") | ||||
| 	} | ||||
| 	// check ordering of results with prefix | ||||
| 	{ | ||||
| 		result, err = fs.ListObjects("bucket", "obj", "", "", 1000) | ||||
| 		c.Assert(err, check.IsNil) | ||||
| 		c.Assert(result.Objects[0].Object, check.Equals, "obj0") | ||||
| 		c.Assert(result.Objects[1].Object, check.Equals, "obj1") | ||||
| 		c.Assert(result.Objects[2].Object, check.Equals, "obj10") | ||||
| 		c.Assert(result.Objects[3].Object, check.Equals, "obj2") | ||||
| 		c.Assert(result.Objects[4].Object, check.Equals, "obj3") | ||||
| 		c.Assert(result.Objects[0].Name, check.Equals, "obj0") | ||||
| 		c.Assert(result.Objects[1].Name, check.Equals, "obj1") | ||||
| 		c.Assert(result.Objects[2].Name, check.Equals, "obj10") | ||||
| 		c.Assert(result.Objects[3].Name, check.Equals, "obj2") | ||||
| 		c.Assert(result.Objects[4].Name, check.Equals, "obj3") | ||||
| 	} | ||||
| 	// check ordering of results with prefix and no paging | ||||
| 	{ | ||||
| 		result, err = fs.ListObjects("bucket", "new", "", "", 5) | ||||
| 		c.Assert(err, check.IsNil) | ||||
| 		c.Assert(result.Objects[0].Object, check.Equals, "newPrefix") | ||||
| 		c.Assert(result.Objects[1].Object, check.Equals, "newPrefix2") | ||||
| 		c.Assert(result.Objects[0].Name, check.Equals, "newPrefix") | ||||
| 		c.Assert(result.Objects[1].Name, check.Equals, "newPrefix2") | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| @ -268,9 +268,9 @@ func testObjectOverwriteWorks(c *check.C, create func() Filesystem) { | ||||
| 	hasher1.Write([]byte("one")) | ||||
| 	md5Sum1 := base64.StdEncoding.EncodeToString(hasher1.Sum(nil)) | ||||
| 	md5Sum1hex := hex.EncodeToString(hasher1.Sum(nil)) | ||||
| 	objectMetadata, err := fs.CreateObject("bucket", "object", md5Sum1, int64(len("one")), bytes.NewBufferString("one"), nil) | ||||
| 	objectInfo, err := fs.CreateObject("bucket", "object", md5Sum1, int64(len("one")), bytes.NewBufferString("one"), nil) | ||||
| 	c.Assert(err, check.IsNil) | ||||
| 	c.Assert(md5Sum1hex, check.Equals, objectMetadata.MD5) | ||||
| 	c.Assert(md5Sum1hex, check.Equals, objectInfo.MD5Sum) | ||||
| 
 | ||||
| 	hasher2 := md5.New() | ||||
| 	hasher2.Write([]byte("three")) | ||||
| @ -308,9 +308,9 @@ func testPutObjectInSubdir(c *check.C, create func() Filesystem) { | ||||
| 	hasher.Write([]byte("hello world")) | ||||
| 	md5Sum1 := base64.StdEncoding.EncodeToString(hasher.Sum(nil)) | ||||
| 	md5Sum1hex := hex.EncodeToString(hasher.Sum(nil)) | ||||
| 	objectMetadata, err := fs.CreateObject("bucket", "dir1/dir2/object", md5Sum1, int64(len("hello world")), bytes.NewBufferString("hello world"), nil) | ||||
| 	objectInfo, err := fs.CreateObject("bucket", "dir1/dir2/object", md5Sum1, int64(len("hello world")), bytes.NewBufferString("hello world"), nil) | ||||
| 	c.Assert(err, check.IsNil) | ||||
| 	c.Assert(objectMetadata.MD5, check.Equals, md5Sum1hex) | ||||
| 	c.Assert(objectInfo.MD5Sum, check.Equals, md5Sum1hex) | ||||
| 
 | ||||
| 	var bytesBuffer bytes.Buffer | ||||
| 	length, err := fs.GetObject(&bytesBuffer, "bucket", "dir1/dir2/object", 0, 0) | ||||
| @ -437,7 +437,7 @@ func testDefaultContentType(c *check.C, create func() Filesystem) { | ||||
| 
 | ||||
| 	// test empty | ||||
| 	_, err = fs.CreateObject("bucket", "one", "", int64(len("one")), bytes.NewBufferString("one"), nil) | ||||
| 	metadata, err := fs.GetObjectMetadata("bucket", "one") | ||||
| 	metadata, err := fs.GetObjectInfo("bucket", "one") | ||||
| 	c.Assert(err, check.IsNil) | ||||
| 	c.Assert(metadata.ContentType, check.Equals, "application/octet-stream") | ||||
| } | ||||
|  | ||||
| @ -85,9 +85,9 @@ func testMultipartObjectCreation(c *check.C, create func() Filesystem) { | ||||
| 	} | ||||
| 	completedPartsBytes, e := xml.Marshal(completedParts) | ||||
| 	c.Assert(e, check.IsNil) | ||||
| 	objectMetadata, err := fs.CompleteMultipartUpload("bucket", "key", uploadID, bytes.NewReader(completedPartsBytes), nil) | ||||
| 	objectInfo, err := fs.CompleteMultipartUpload("bucket", "key", uploadID, bytes.NewReader(completedPartsBytes), nil) | ||||
| 	c.Assert(err, check.IsNil) | ||||
| 	c.Assert(objectMetadata.MD5, check.Equals, "9b7d6f13ba00e24d0b02de92e814891b-10") | ||||
| 	c.Assert(objectInfo.MD5Sum, check.Equals, "9b7d6f13ba00e24d0b02de92e814891b-10") | ||||
| } | ||||
| 
 | ||||
| func testMultipartObjectAbort(c *check.C, create func() Filesystem) { | ||||
| @ -140,9 +140,9 @@ func testMultipleObjectCreation(c *check.C, create func() Filesystem) { | ||||
| 
 | ||||
| 		key := "obj" + strconv.Itoa(i) | ||||
| 		objects[key] = []byte(randomString) | ||||
| 		objectMetadata, err := fs.CreateObject("bucket", key, expectedmd5Sum, int64(len(randomString)), bytes.NewBufferString(randomString), nil) | ||||
| 		objectInfo, err := fs.CreateObject("bucket", key, expectedmd5Sum, int64(len(randomString)), bytes.NewBufferString(randomString), nil) | ||||
| 		c.Assert(err, check.IsNil) | ||||
| 		c.Assert(objectMetadata.MD5, check.Equals, expectedmd5Sumhex) | ||||
| 		c.Assert(objectInfo.MD5Sum, check.Equals, expectedmd5Sumhex) | ||||
| 	} | ||||
| 
 | ||||
| 	for key, value := range objects { | ||||
| @ -151,7 +151,7 @@ func testMultipleObjectCreation(c *check.C, create func() Filesystem) { | ||||
| 		c.Assert(err, check.IsNil) | ||||
| 		c.Assert(byteBuffer.Bytes(), check.DeepEquals, value) | ||||
| 
 | ||||
| 		metadata, err := fs.GetObjectMetadata("bucket", key) | ||||
| 		metadata, err := fs.GetObjectInfo("bucket", key) | ||||
| 		c.Assert(err, check.IsNil) | ||||
| 		c.Assert(metadata.Size, check.Equals, int64(len(value))) | ||||
| 	} | ||||
| @ -199,11 +199,11 @@ func testPaging(c *check.C, create func() Filesystem) { | ||||
| 	{ | ||||
| 		result, err = fs.ListObjects("bucket", "", "", "", 1000) | ||||
| 		c.Assert(err, check.IsNil) | ||||
| 		c.Assert(result.Objects[0].Object, check.Equals, "newPrefix") | ||||
| 		c.Assert(result.Objects[1].Object, check.Equals, "newPrefix2") | ||||
| 		c.Assert(result.Objects[2].Object, check.Equals, "obj0") | ||||
| 		c.Assert(result.Objects[3].Object, check.Equals, "obj1") | ||||
| 		c.Assert(result.Objects[4].Object, check.Equals, "obj10") | ||||
| 		c.Assert(result.Objects[0].Name, check.Equals, "newPrefix") | ||||
| 		c.Assert(result.Objects[1].Name, check.Equals, "newPrefix2") | ||||
| 		c.Assert(result.Objects[2].Name, check.Equals, "obj0") | ||||
| 		c.Assert(result.Objects[3].Name, check.Equals, "obj1") | ||||
| 		c.Assert(result.Objects[4].Name, check.Equals, "obj10") | ||||
| 	} | ||||
| 
 | ||||
| 	// check delimited results with delimiter and prefix | ||||
| @ -222,11 +222,11 @@ func testPaging(c *check.C, create func() Filesystem) { | ||||
| 	{ | ||||
| 		result, err = fs.ListObjects("bucket", "", "", "/", 1000) | ||||
| 		c.Assert(err, check.IsNil) | ||||
| 		c.Assert(result.Objects[0].Object, check.Equals, "newPrefix") | ||||
| 		c.Assert(result.Objects[1].Object, check.Equals, "newPrefix2") | ||||
| 		c.Assert(result.Objects[2].Object, check.Equals, "obj0") | ||||
| 		c.Assert(result.Objects[3].Object, check.Equals, "obj1") | ||||
| 		c.Assert(result.Objects[4].Object, check.Equals, "obj10") | ||||
| 		c.Assert(result.Objects[0].Name, check.Equals, "newPrefix") | ||||
| 		c.Assert(result.Objects[1].Name, check.Equals, "newPrefix2") | ||||
| 		c.Assert(result.Objects[2].Name, check.Equals, "obj0") | ||||
| 		c.Assert(result.Objects[3].Name, check.Equals, "obj1") | ||||
| 		c.Assert(result.Objects[4].Name, check.Equals, "obj10") | ||||
| 		c.Assert(result.Prefixes[0], check.Equals, "this/") | ||||
| 	} | ||||
| 
 | ||||
| @ -234,26 +234,26 @@ func testPaging(c *check.C, create func() Filesystem) { | ||||
| 	{ | ||||
| 		result, err = fs.ListObjects("bucket", "", "newPrefix", "", 3) | ||||
| 		c.Assert(err, check.IsNil) | ||||
| 		c.Assert(result.Objects[0].Object, check.Equals, "newPrefix2") | ||||
| 		c.Assert(result.Objects[1].Object, check.Equals, "obj0") | ||||
| 		c.Assert(result.Objects[2].Object, check.Equals, "obj1") | ||||
| 		c.Assert(result.Objects[0].Name, check.Equals, "newPrefix2") | ||||
| 		c.Assert(result.Objects[1].Name, check.Equals, "obj0") | ||||
| 		c.Assert(result.Objects[2].Name, check.Equals, "obj1") | ||||
| 	} | ||||
| 	// check ordering of results with prefix | ||||
| 	{ | ||||
| 		result, err = fs.ListObjects("bucket", "obj", "", "", 1000) | ||||
| 		c.Assert(err, check.IsNil) | ||||
| 		c.Assert(result.Objects[0].Object, check.Equals, "obj0") | ||||
| 		c.Assert(result.Objects[1].Object, check.Equals, "obj1") | ||||
| 		c.Assert(result.Objects[2].Object, check.Equals, "obj10") | ||||
| 		c.Assert(result.Objects[3].Object, check.Equals, "obj2") | ||||
| 		c.Assert(result.Objects[4].Object, check.Equals, "obj3") | ||||
| 		c.Assert(result.Objects[0].Name, check.Equals, "obj0") | ||||
| 		c.Assert(result.Objects[1].Name, check.Equals, "obj1") | ||||
| 		c.Assert(result.Objects[2].Name, check.Equals, "obj10") | ||||
| 		c.Assert(result.Objects[3].Name, check.Equals, "obj2") | ||||
| 		c.Assert(result.Objects[4].Name, check.Equals, "obj3") | ||||
| 	} | ||||
| 	// check ordering of results with prefix and no paging | ||||
| 	{ | ||||
| 		result, err = fs.ListObjects("bucket", "new", "", "", 5) | ||||
| 		c.Assert(err, check.IsNil) | ||||
| 		c.Assert(result.Objects[0].Object, check.Equals, "newPrefix") | ||||
| 		c.Assert(result.Objects[1].Object, check.Equals, "newPrefix2") | ||||
| 		c.Assert(result.Objects[0].Name, check.Equals, "newPrefix") | ||||
| 		c.Assert(result.Objects[1].Name, check.Equals, "newPrefix2") | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| @ -265,9 +265,9 @@ func testObjectOverwriteWorks(c *check.C, create func() Filesystem) { | ||||
| 	hasher1.Write([]byte("one")) | ||||
| 	md5Sum1 := base64.StdEncoding.EncodeToString(hasher1.Sum(nil)) | ||||
| 	md5Sum1hex := hex.EncodeToString(hasher1.Sum(nil)) | ||||
| 	objectMetadata, err := fs.CreateObject("bucket", "object", md5Sum1, int64(len("one")), bytes.NewBufferString("one"), nil) | ||||
| 	objectInfo, err := fs.CreateObject("bucket", "object", md5Sum1, int64(len("one")), bytes.NewBufferString("one"), nil) | ||||
| 	c.Assert(err, check.IsNil) | ||||
| 	c.Assert(md5Sum1hex, check.Equals, objectMetadata.MD5) | ||||
| 	c.Assert(md5Sum1hex, check.Equals, objectInfo.MD5Sum) | ||||
| 
 | ||||
| 	hasher2 := md5.New() | ||||
| 	hasher2.Write([]byte("three")) | ||||
| @ -305,9 +305,9 @@ func testPutObjectInSubdir(c *check.C, create func() Filesystem) { | ||||
| 	hasher.Write([]byte("hello world")) | ||||
| 	md5Sum1 := base64.StdEncoding.EncodeToString(hasher.Sum(nil)) | ||||
| 	md5Sum1hex := hex.EncodeToString(hasher.Sum(nil)) | ||||
| 	objectMetadata, err := fs.CreateObject("bucket", "dir1/dir2/object", md5Sum1, int64(len("hello world")), bytes.NewBufferString("hello world"), nil) | ||||
| 	objectInfo, err := fs.CreateObject("bucket", "dir1/dir2/object", md5Sum1, int64(len("hello world")), bytes.NewBufferString("hello world"), nil) | ||||
| 	c.Assert(err, check.IsNil) | ||||
| 	c.Assert(objectMetadata.MD5, check.Equals, md5Sum1hex) | ||||
| 	c.Assert(objectInfo.MD5Sum, check.Equals, md5Sum1hex) | ||||
| 
 | ||||
| 	var bytesBuffer bytes.Buffer | ||||
| 	length, err := fs.GetObject(&bytesBuffer, "bucket", "dir1/dir2/object", 0, 0) | ||||
| @ -438,7 +438,7 @@ func testDefaultContentType(c *check.C, create func() Filesystem) { | ||||
| 
 | ||||
| 	// test empty | ||||
| 	_, err = fs.CreateObject("bucket", "one", "", int64(len("one")), bytes.NewBufferString("one"), nil) | ||||
| 	metadata, err := fs.GetObjectMetadata("bucket", "one") | ||||
| 	metadata, err := fs.GetObjectInfo("bucket", "one") | ||||
| 	c.Assert(err, check.IsNil) | ||||
| 	c.Assert(metadata.ContentType, check.Equals, "application/octet-stream") | ||||
| } | ||||
|  | ||||
| @ -75,9 +75,11 @@ func (f byName) Less(i, j int) bool { | ||||
| 
 | ||||
| // ObjectInfo - object info | ||||
| type ObjectInfo struct { | ||||
| 	Bucket       string | ||||
| 	Name         string | ||||
| 	ModifiedTime time.Time | ||||
| 	Checksum     string | ||||
| 	ContentType  string | ||||
| 	MD5Sum       string | ||||
| 	Size         int64 | ||||
| 	IsDir        bool | ||||
| 	Err          error | ||||
| @ -99,26 +101,54 @@ func readDir(scanDir, namePrefix string) (objInfos []ObjectInfo) { | ||||
| 		return | ||||
| 	} | ||||
| 
 | ||||
| 	// Close the directory | ||||
| 	f.Close() | ||||
| 
 | ||||
| 	// Sort files by Name. | ||||
| 	sort.Sort(byName(fis)) | ||||
| 
 | ||||
| 	// make []ObjectInfo from []FileInfo | ||||
| 	// Populate []ObjectInfo from []FileInfo | ||||
| 	for _, fi := range fis { | ||||
| 		name := fi.Name() | ||||
| 		size := fi.Size() | ||||
| 		modTime := fi.ModTime() | ||||
| 		isDir := fi.Mode().IsDir() | ||||
| 
 | ||||
| 		// Add prefix if name prefix exists. | ||||
| 		if namePrefix != "" { | ||||
| 			name = namePrefix + "/" + name | ||||
| 		} | ||||
| 
 | ||||
| 		if fi.IsDir() { | ||||
| 		// For directories explicitly end with '/'. | ||||
| 		if isDir { | ||||
| 			name += "/" | ||||
| 			size = 0 // Size is set to '0' for directories explicitly. | ||||
| 		} | ||||
| 
 | ||||
| 		if fi.Mode()&os.ModeSymlink == os.ModeSymlink { | ||||
| 			// Handle symlink by doing an additional stat and follow the link. | ||||
| 			st, e := os.Stat(filepath.Join(scanDir, name)) | ||||
| 			if e != nil { | ||||
| 				objInfos = append(objInfos, ObjectInfo{Err: err}) | ||||
| 				return | ||||
| 			} | ||||
| 			size = st.Size() | ||||
| 			modTime = st.ModTime() | ||||
| 			isDir = st.Mode().IsDir() | ||||
| 			// For directories explicitly end with '/'. | ||||
| 			if isDir { | ||||
| 				name += "/" | ||||
| 				size = 0 // Size is set to '0' for directories explicitly. | ||||
| 			} | ||||
| 		} | ||||
| 
 | ||||
| 		// Populate []ObjectInfo. | ||||
| 		objInfos = append(objInfos, ObjectInfo{ | ||||
| 			Name:         name, | ||||
| 			ModifiedTime: fi.ModTime(), | ||||
| 			Checksum:     "", | ||||
| 			Size:         fi.Size(), | ||||
| 			IsDir:        fi.IsDir(), | ||||
| 			ModifiedTime: modTime, | ||||
| 			MD5Sum:       "", // TODO | ||||
| 			Size:         size, | ||||
| 			IsDir:        isDir, | ||||
| 		}) | ||||
| 	} | ||||
| 
 | ||||
|  | ||||
| @ -106,17 +106,15 @@ func (fs Filesystem) ListObjects(bucket, prefix, marker, delimiter string, maxKe | ||||
| 			continue | ||||
| 		} | ||||
| 
 | ||||
| 		// Add the bucket. | ||||
| 		objInfo.Bucket = bucket | ||||
| 
 | ||||
| 		if strings.HasPrefix(objInfo.Name, prefix) { | ||||
| 			if objInfo.Name > marker { | ||||
| 				if objInfo.IsDir { | ||||
| 					result.Prefixes = append(result.Prefixes, objInfo.Name) | ||||
| 				} else { | ||||
| 					result.Objects = append(result.Objects, ObjectMetadata{ | ||||
| 						Bucket:       bucket, | ||||
| 						Object:       objInfo.Name, | ||||
| 						LastModified: objInfo.ModifiedTime, | ||||
| 						Size:         objInfo.Size, | ||||
| 					}) | ||||
| 					result.Objects = append(result.Objects, objInfo) | ||||
| 				} | ||||
| 				nextMarker = objInfo.Name | ||||
| 				i++ | ||||
|  | ||||
| @ -21,6 +21,7 @@ import ( | ||||
| 	"os" | ||||
| 	"path/filepath" | ||||
| 	"strings" | ||||
| 	"time" | ||||
| 
 | ||||
| 	"github.com/minio/minio/pkg/disk" | ||||
| 	"github.com/minio/minio/pkg/probe" | ||||
| @ -55,13 +56,19 @@ func (fs Filesystem) DeleteBucket(bucket string) *probe.Error { | ||||
| 	return nil | ||||
| } | ||||
| 
 | ||||
| // BucketInfo - name and create date | ||||
| type BucketInfo struct { | ||||
| 	Name    string | ||||
| 	Created time.Time | ||||
| } | ||||
| 
 | ||||
| // ListBuckets - Get service. | ||||
| func (fs Filesystem) ListBuckets() ([]BucketMetadata, *probe.Error) { | ||||
| func (fs Filesystem) ListBuckets() ([]BucketInfo, *probe.Error) { | ||||
| 	files, e := ioutil.ReadDir(fs.path) | ||||
| 	if e != nil { | ||||
| 		return []BucketMetadata{}, probe.NewError(e) | ||||
| 		return []BucketInfo{}, probe.NewError(e) | ||||
| 	} | ||||
| 	var metadataList []BucketMetadata | ||||
| 	var metadataList []BucketInfo | ||||
| 	for _, file := range files { | ||||
| 		if !file.IsDir() { | ||||
| 			// If not directory, ignore all file types. | ||||
| @ -72,7 +79,7 @@ func (fs Filesystem) ListBuckets() ([]BucketMetadata, *probe.Error) { | ||||
| 		if !IsValidBucketName(dirName) { | ||||
| 			continue | ||||
| 		} | ||||
| 		metadata := BucketMetadata{ | ||||
| 		metadata := BucketInfo{ | ||||
| 			Name:    dirName, | ||||
| 			Created: file.ModTime(), | ||||
| 		} | ||||
| @ -84,7 +91,7 @@ func (fs Filesystem) ListBuckets() ([]BucketMetadata, *probe.Error) { | ||||
| } | ||||
| 
 | ||||
| // removeDuplicateBuckets - remove duplicate buckets. | ||||
| func removeDuplicateBuckets(buckets []BucketMetadata) []BucketMetadata { | ||||
| func removeDuplicateBuckets(buckets []BucketInfo) []BucketInfo { | ||||
| 	length := len(buckets) - 1 | ||||
| 	for i := 0; i < length; i++ { | ||||
| 		for j := i + 1; j <= length; j++ { | ||||
| @ -153,10 +160,10 @@ func (fs Filesystem) denormalizeBucket(bucket string) string { | ||||
| 	return bucket | ||||
| } | ||||
| 
 | ||||
| // GetBucketMetadata - get bucket metadata. | ||||
| func (fs Filesystem) GetBucketMetadata(bucket string) (BucketMetadata, *probe.Error) { | ||||
| // GetBucketInfo - get bucket metadata. | ||||
| func (fs Filesystem) GetBucketInfo(bucket string) (BucketInfo, *probe.Error) { | ||||
| 	if !IsValidBucketName(bucket) { | ||||
| 		return BucketMetadata{}, probe.NewError(BucketNameInvalid{Bucket: bucket}) | ||||
| 		return BucketInfo{}, probe.NewError(BucketNameInvalid{Bucket: bucket}) | ||||
| 	} | ||||
| 	bucket = fs.denormalizeBucket(bucket) | ||||
| 	// Get bucket path. | ||||
| @ -165,11 +172,11 @@ func (fs Filesystem) GetBucketMetadata(bucket string) (BucketMetadata, *probe.Er | ||||
| 	if e != nil { | ||||
| 		// Check if bucket exists. | ||||
| 		if os.IsNotExist(e) { | ||||
| 			return BucketMetadata{}, probe.NewError(BucketNotFound{Bucket: bucket}) | ||||
| 			return BucketInfo{}, probe.NewError(BucketNotFound{Bucket: bucket}) | ||||
| 		} | ||||
| 		return BucketMetadata{}, probe.NewError(e) | ||||
| 		return BucketInfo{}, probe.NewError(e) | ||||
| 	} | ||||
| 	bucketMetadata := BucketMetadata{} | ||||
| 	bucketMetadata := BucketInfo{} | ||||
| 	bucketMetadata.Name = fi.Name() | ||||
| 	bucketMetadata.Created = fi.ModTime() | ||||
| 	return bucketMetadata, nil | ||||
|  | ||||
| @ -153,7 +153,7 @@ func BenchmarkDeleteBucket(b *testing.B) { | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| func BenchmarkGetBucketMetadata(b *testing.B) { | ||||
| func BenchmarkGetBucketInfo(b *testing.B) { | ||||
| 	// Make a temporary directory to use as the filesystem. | ||||
| 	directory, fserr := ioutil.TempDir("", "minio-benchmark") | ||||
| 	if fserr != nil { | ||||
| @ -177,7 +177,7 @@ func BenchmarkGetBucketMetadata(b *testing.B) { | ||||
| 
 | ||||
| 	for i := 0; i < b.N; i++ { | ||||
| 		// Retrieve the metadata! | ||||
| 		_, err := filesystem.GetBucketMetadata("bucket") | ||||
| 		_, err := filesystem.GetBucketInfo("bucket") | ||||
| 		if err != nil { | ||||
| 			b.Fatal(err) | ||||
| 		} | ||||
|  | ||||
| @ -16,28 +16,7 @@ | ||||
| 
 | ||||
| package fs | ||||
| 
 | ||||
| import ( | ||||
| 	"os" | ||||
| 	"time" | ||||
| ) | ||||
| 
 | ||||
| // BucketMetadata - name and create date | ||||
| type BucketMetadata struct { | ||||
| 	Name    string | ||||
| 	Created time.Time | ||||
| } | ||||
| 
 | ||||
| // ObjectMetadata - object key and its relevant metadata | ||||
| type ObjectMetadata struct { | ||||
| 	Bucket string | ||||
| 	Object string | ||||
| 
 | ||||
| 	ContentType  string | ||||
| 	LastModified time.Time | ||||
| 	Mode         os.FileMode | ||||
| 	MD5          string | ||||
| 	Size         int64 | ||||
| } | ||||
| import "time" | ||||
| 
 | ||||
| // PartMetadata - various types of individual part resources | ||||
| type PartMetadata struct { | ||||
| @ -89,7 +68,7 @@ type BucketMultipartResourcesMetadata struct { | ||||
| type ListObjectsResult struct { | ||||
| 	IsTruncated bool | ||||
| 	NextMarker  string | ||||
| 	Objects     []ObjectMetadata | ||||
| 	Objects     []ObjectInfo | ||||
| 	Prefixes    []string | ||||
| } | ||||
| 
 | ||||
|  | ||||
| @ -53,7 +53,7 @@ func (fs Filesystem) isValidUploadID(object, uploadID string) (ok bool) { | ||||
| 	return | ||||
| } | ||||
| 
 | ||||
| // byObjectMetadataKey is a sortable interface for UploadMetadata slice | ||||
| // byObjectInfoKey is a sortable interface for UploadMetadata slice | ||||
| type byUploadMetadataKey []*UploadMetadata | ||||
| 
 | ||||
| func (b byUploadMetadataKey) Len() int           { return len(b) } | ||||
| @ -456,20 +456,20 @@ func (fs Filesystem) CreateObjectPart(bucket, object, uploadID, expectedMD5Sum s | ||||
| } | ||||
| 
 | ||||
| // CompleteMultipartUpload - complete a multipart upload and persist the data | ||||
| func (fs Filesystem) CompleteMultipartUpload(bucket, object, uploadID string, data io.Reader, signature *signature4.Sign) (ObjectMetadata, *probe.Error) { | ||||
| func (fs Filesystem) CompleteMultipartUpload(bucket, object, uploadID string, data io.Reader, signature *signature4.Sign) (ObjectInfo, *probe.Error) { | ||||
| 	// Check bucket name is valid. | ||||
| 	if !IsValidBucketName(bucket) { | ||||
| 		return ObjectMetadata{}, probe.NewError(BucketNameInvalid{Bucket: bucket}) | ||||
| 		return ObjectInfo{}, probe.NewError(BucketNameInvalid{Bucket: bucket}) | ||||
| 	} | ||||
| 
 | ||||
| 	// Verify object path is legal. | ||||
| 	if !IsValidObjectName(object) { | ||||
| 		return ObjectMetadata{}, probe.NewError(ObjectNameInvalid{Bucket: bucket, Object: object}) | ||||
| 		return ObjectInfo{}, probe.NewError(ObjectNameInvalid{Bucket: bucket, Object: object}) | ||||
| 	} | ||||
| 
 | ||||
| 	// Verify if valid upload for incoming object. | ||||
| 	if !fs.isValidUploadID(object, uploadID) { | ||||
| 		return ObjectMetadata{}, probe.NewError(InvalidUploadID{UploadID: uploadID}) | ||||
| 		return ObjectInfo{}, probe.NewError(InvalidUploadID{UploadID: uploadID}) | ||||
| 	} | ||||
| 
 | ||||
| 	bucket = fs.denormalizeBucket(bucket) | ||||
| @ -477,21 +477,21 @@ func (fs Filesystem) CompleteMultipartUpload(bucket, object, uploadID string, da | ||||
| 	if _, e := os.Stat(bucketPath); e != nil { | ||||
| 		// Check bucket exists. | ||||
| 		if os.IsNotExist(e) { | ||||
| 			return ObjectMetadata{}, probe.NewError(BucketNotFound{Bucket: bucket}) | ||||
| 			return ObjectInfo{}, probe.NewError(BucketNotFound{Bucket: bucket}) | ||||
| 		} | ||||
| 		return ObjectMetadata{}, probe.NewError(InternalError{}) | ||||
| 		return ObjectInfo{}, probe.NewError(InternalError{}) | ||||
| 	} | ||||
| 
 | ||||
| 	objectPath := filepath.Join(bucketPath, object) | ||||
| 	objectWriter, e := atomic.FileCreateWithPrefix(objectPath, "$tmpobject") | ||||
| 	if e != nil { | ||||
| 		return ObjectMetadata{}, probe.NewError(e) | ||||
| 		return ObjectInfo{}, probe.NewError(e) | ||||
| 	} | ||||
| 
 | ||||
| 	partBytes, e := ioutil.ReadAll(data) | ||||
| 	if e != nil { | ||||
| 		objectWriter.CloseAndPurge() | ||||
| 		return ObjectMetadata{}, probe.NewError(e) | ||||
| 		return ObjectInfo{}, probe.NewError(e) | ||||
| 	} | ||||
| 	if signature != nil { | ||||
| 		sh := sha256.New() | ||||
| @ -499,21 +499,21 @@ func (fs Filesystem) CompleteMultipartUpload(bucket, object, uploadID string, da | ||||
| 		ok, err := signature.DoesSignatureMatch(hex.EncodeToString(sh.Sum(nil))) | ||||
| 		if err != nil { | ||||
| 			objectWriter.CloseAndPurge() | ||||
| 			return ObjectMetadata{}, err.Trace() | ||||
| 			return ObjectInfo{}, err.Trace() | ||||
| 		} | ||||
| 		if !ok { | ||||
| 			objectWriter.CloseAndPurge() | ||||
| 			return ObjectMetadata{}, probe.NewError(SignDoesNotMatch{}) | ||||
| 			return ObjectInfo{}, probe.NewError(SignDoesNotMatch{}) | ||||
| 		} | ||||
| 	} | ||||
| 	completeMultipartUpload := &CompleteMultipartUpload{} | ||||
| 	if e = xml.Unmarshal(partBytes, completeMultipartUpload); e != nil { | ||||
| 		objectWriter.CloseAndPurge() | ||||
| 		return ObjectMetadata{}, probe.NewError(MalformedXML{}) | ||||
| 		return ObjectInfo{}, probe.NewError(MalformedXML{}) | ||||
| 	} | ||||
| 	if !sort.IsSorted(completedParts(completeMultipartUpload.Part)) { | ||||
| 		objectWriter.CloseAndPurge() | ||||
| 		return ObjectMetadata{}, probe.NewError(InvalidPartOrder{}) | ||||
| 		return ObjectInfo{}, probe.NewError(InvalidPartOrder{}) | ||||
| 	} | ||||
| 
 | ||||
| 	// Save parts for verification. | ||||
| @ -526,14 +526,14 @@ func (fs Filesystem) CompleteMultipartUpload(bucket, object, uploadID string, da | ||||
| 
 | ||||
| 	if !doPartsMatch(parts, savedParts) { | ||||
| 		objectWriter.CloseAndPurge() | ||||
| 		return ObjectMetadata{}, probe.NewError(InvalidPart{}) | ||||
| 		return ObjectInfo{}, probe.NewError(InvalidPart{}) | ||||
| 	} | ||||
| 
 | ||||
| 	// Parts successfully validated, save all the parts. | ||||
| 	partPathPrefix := objectPath + uploadID | ||||
| 	if err := saveParts(partPathPrefix, objectWriter, parts); err != nil { | ||||
| 		objectWriter.CloseAndPurge() | ||||
| 		return ObjectMetadata{}, err.Trace(partPathPrefix) | ||||
| 		return ObjectInfo{}, err.Trace(partPathPrefix) | ||||
| 	} | ||||
| 	var md5Strs []string | ||||
| 	for _, part := range savedParts { | ||||
| @ -543,7 +543,7 @@ func (fs Filesystem) CompleteMultipartUpload(bucket, object, uploadID string, da | ||||
| 	s3MD5, err := makeS3MD5(md5Strs...) | ||||
| 	if err != nil { | ||||
| 		objectWriter.CloseAndPurge() | ||||
| 		return ObjectMetadata{}, err.Trace(md5Strs...) | ||||
| 		return ObjectInfo{}, err.Trace(md5Strs...) | ||||
| 	} | ||||
| 
 | ||||
| 	// Successfully saved multipart, remove all parts in a routine. | ||||
| @ -555,18 +555,18 @@ func (fs Filesystem) CompleteMultipartUpload(bucket, object, uploadID string, da | ||||
| 	if err := saveMultipartsSession(*fs.multiparts); err != nil { | ||||
| 		fs.rwLock.Unlock() | ||||
| 		objectWriter.CloseAndPurge() | ||||
| 		return ObjectMetadata{}, err.Trace(partPathPrefix) | ||||
| 		return ObjectInfo{}, err.Trace(partPathPrefix) | ||||
| 	} | ||||
| 	if e = objectWriter.Close(); e != nil { | ||||
| 		fs.rwLock.Unlock() | ||||
| 		return ObjectMetadata{}, probe.NewError(e) | ||||
| 		return ObjectInfo{}, probe.NewError(e) | ||||
| 	} | ||||
| 	fs.rwLock.Unlock() | ||||
| 
 | ||||
| 	// Send stat again to get object metadata. | ||||
| 	st, e := os.Stat(objectPath) | ||||
| 	if e != nil { | ||||
| 		return ObjectMetadata{}, probe.NewError(e) | ||||
| 		return ObjectInfo{}, probe.NewError(e) | ||||
| 	} | ||||
| 
 | ||||
| 	contentType := "application/octet-stream" | ||||
| @ -576,13 +576,13 @@ func (fs Filesystem) CompleteMultipartUpload(bucket, object, uploadID string, da | ||||
| 			contentType = content.ContentType | ||||
| 		} | ||||
| 	} | ||||
| 	newObject := ObjectMetadata{ | ||||
| 	newObject := ObjectInfo{ | ||||
| 		Bucket:       bucket, | ||||
| 		Object:       object, | ||||
| 		LastModified: st.ModTime(), | ||||
| 		Name:         object, | ||||
| 		ModifiedTime: st.ModTime(), | ||||
| 		Size:         st.Size(), | ||||
| 		ContentType:  contentType, | ||||
| 		MD5:          s3MD5, | ||||
| 		MD5Sum:       s3MD5, | ||||
| 	} | ||||
| 	return newObject, nil | ||||
| } | ||||
|  | ||||
| @ -102,15 +102,15 @@ func (fs Filesystem) GetObject(w io.Writer, bucket, object string, start, length | ||||
| 	return count, nil | ||||
| } | ||||
| 
 | ||||
| // GetObjectMetadata - get object metadata. | ||||
| func (fs Filesystem) GetObjectMetadata(bucket, object string) (ObjectMetadata, *probe.Error) { | ||||
| // GetObjectInfo - get object info. | ||||
| func (fs Filesystem) GetObjectInfo(bucket, object string) (ObjectInfo, *probe.Error) { | ||||
| 	// Input validation. | ||||
| 	if !IsValidBucketName(bucket) { | ||||
| 		return ObjectMetadata{}, probe.NewError(BucketNameInvalid{Bucket: bucket}) | ||||
| 		return ObjectInfo{}, probe.NewError(BucketNameInvalid{Bucket: bucket}) | ||||
| 	} | ||||
| 
 | ||||
| 	if !IsValidObjectName(object) { | ||||
| 		return ObjectMetadata{}, probe.NewError(ObjectNameInvalid{Bucket: bucket, Object: bucket}) | ||||
| 		return ObjectInfo{}, probe.NewError(ObjectNameInvalid{Bucket: bucket, Object: bucket}) | ||||
| 	} | ||||
| 
 | ||||
| 	// Normalize buckets. | ||||
| @ -118,23 +118,20 @@ func (fs Filesystem) GetObjectMetadata(bucket, object string) (ObjectMetadata, * | ||||
| 	bucketPath := filepath.Join(fs.path, bucket) | ||||
| 	if _, e := os.Stat(bucketPath); e != nil { | ||||
| 		if os.IsNotExist(e) { | ||||
| 			return ObjectMetadata{}, probe.NewError(BucketNotFound{Bucket: bucket}) | ||||
| 			return ObjectInfo{}, probe.NewError(BucketNotFound{Bucket: bucket}) | ||||
| 		} | ||||
| 		return ObjectMetadata{}, probe.NewError(e) | ||||
| 		return ObjectInfo{}, probe.NewError(e) | ||||
| 	} | ||||
| 
 | ||||
| 	metadata, err := getMetadata(fs.path, bucket, object) | ||||
| 	info, err := getObjectInfo(fs.path, bucket, object) | ||||
| 	if err != nil { | ||||
| 		return ObjectMetadata{}, err.Trace(bucket, object) | ||||
| 		return ObjectInfo{}, err.Trace(bucket, object) | ||||
| 	} | ||||
| 	if metadata.Mode.IsDir() { | ||||
| 		return ObjectMetadata{}, probe.NewError(ObjectNotFound{Bucket: bucket, Object: object}) | ||||
| 	} | ||||
| 	return metadata, nil | ||||
| 	return info, nil | ||||
| } | ||||
| 
 | ||||
| // getMetadata - get object metadata. | ||||
| func getMetadata(rootPath, bucket, object string) (ObjectMetadata, *probe.Error) { | ||||
| // getObjectInfo - get object stat info. | ||||
| func getObjectInfo(rootPath, bucket, object string) (ObjectInfo, *probe.Error) { | ||||
| 	// Do not use filepath.Join() since filepath.Join strips off any | ||||
| 	// object names with '/', use them as is in a static manner so | ||||
| 	// that we can send a proper 'ObjectNotFound' reply back upon | ||||
| @ -149,9 +146,9 @@ func getMetadata(rootPath, bucket, object string) (ObjectMetadata, *probe.Error) | ||||
| 	stat, err := os.Stat(objectPath) | ||||
| 	if err != nil { | ||||
| 		if os.IsNotExist(err) { | ||||
| 			return ObjectMetadata{}, probe.NewError(ObjectNotFound{Bucket: bucket, Object: object}) | ||||
| 			return ObjectInfo{}, probe.NewError(ObjectNotFound{Bucket: bucket, Object: object}) | ||||
| 		} | ||||
| 		return ObjectMetadata{}, probe.NewError(err) | ||||
| 		return ObjectInfo{}, probe.NewError(err) | ||||
| 	} | ||||
| 	contentType := "application/octet-stream" | ||||
| 	if runtime.GOOS == "windows" { | ||||
| @ -164,13 +161,13 @@ func getMetadata(rootPath, bucket, object string) (ObjectMetadata, *probe.Error) | ||||
| 			contentType = content.ContentType | ||||
| 		} | ||||
| 	} | ||||
| 	metadata := ObjectMetadata{ | ||||
| 	metadata := ObjectInfo{ | ||||
| 		Bucket:       bucket, | ||||
| 		Object:       object, | ||||
| 		LastModified: stat.ModTime(), | ||||
| 		Name:         object, | ||||
| 		ModifiedTime: stat.ModTime(), | ||||
| 		Size:         stat.Size(), | ||||
| 		ContentType:  contentType, | ||||
| 		Mode:         stat.Mode(), | ||||
| 		IsDir:        stat.Mode().IsDir(), | ||||
| 	} | ||||
| 	return metadata, nil | ||||
| } | ||||
| @ -199,36 +196,36 @@ func isMD5SumEqual(expectedMD5Sum, actualMD5Sum string) bool { | ||||
| } | ||||
| 
 | ||||
| // CreateObject - create an object. | ||||
| func (fs Filesystem) CreateObject(bucket, object, expectedMD5Sum string, size int64, data io.Reader, sig *signature4.Sign) (ObjectMetadata, *probe.Error) { | ||||
| func (fs Filesystem) CreateObject(bucket, object, expectedMD5Sum string, size int64, data io.Reader, sig *signature4.Sign) (ObjectInfo, *probe.Error) { | ||||
| 	di, e := disk.GetInfo(fs.path) | ||||
| 	if e != nil { | ||||
| 		return ObjectMetadata{}, probe.NewError(e) | ||||
| 		return ObjectInfo{}, probe.NewError(e) | ||||
| 	} | ||||
| 
 | ||||
| 	// Remove 5% from total space for cumulative disk space used for | ||||
| 	// journalling, inodes etc. | ||||
| 	availableDiskSpace := (float64(di.Free) / (float64(di.Total) - (0.05 * float64(di.Total)))) * 100 | ||||
| 	if int64(availableDiskSpace) <= fs.minFreeDisk { | ||||
| 		return ObjectMetadata{}, probe.NewError(RootPathFull{Path: fs.path}) | ||||
| 		return ObjectInfo{}, probe.NewError(RootPathFull{Path: fs.path}) | ||||
| 	} | ||||
| 
 | ||||
| 	// Check bucket name valid. | ||||
| 	if !IsValidBucketName(bucket) { | ||||
| 		return ObjectMetadata{}, probe.NewError(BucketNameInvalid{Bucket: bucket}) | ||||
| 		return ObjectInfo{}, probe.NewError(BucketNameInvalid{Bucket: bucket}) | ||||
| 	} | ||||
| 
 | ||||
| 	bucket = fs.denormalizeBucket(bucket) | ||||
| 	bucketPath := filepath.Join(fs.path, bucket) | ||||
| 	if _, e = os.Stat(bucketPath); e != nil { | ||||
| 		if os.IsNotExist(e) { | ||||
| 			return ObjectMetadata{}, probe.NewError(BucketNotFound{Bucket: bucket}) | ||||
| 			return ObjectInfo{}, probe.NewError(BucketNotFound{Bucket: bucket}) | ||||
| 		} | ||||
| 		return ObjectMetadata{}, probe.NewError(e) | ||||
| 		return ObjectInfo{}, probe.NewError(e) | ||||
| 	} | ||||
| 
 | ||||
| 	// Verify object path legal. | ||||
| 	if !IsValidObjectName(object) { | ||||
| 		return ObjectMetadata{}, probe.NewError(ObjectNameInvalid{Bucket: bucket, Object: object}) | ||||
| 		return ObjectInfo{}, probe.NewError(ObjectNameInvalid{Bucket: bucket, Object: object}) | ||||
| 	} | ||||
| 
 | ||||
| 	// Get object path. | ||||
| @ -238,7 +235,7 @@ func (fs Filesystem) CreateObject(bucket, object, expectedMD5Sum string, size in | ||||
| 		expectedMD5SumBytes, e = base64.StdEncoding.DecodeString(expectedMD5Sum) | ||||
| 		if e != nil { | ||||
| 			// Pro-actively close the connection. | ||||
| 			return ObjectMetadata{}, probe.NewError(InvalidDigest{MD5: expectedMD5Sum}) | ||||
| 			return ObjectInfo{}, probe.NewError(InvalidDigest{MD5: expectedMD5Sum}) | ||||
| 		} | ||||
| 		expectedMD5Sum = hex.EncodeToString(expectedMD5SumBytes) | ||||
| 	} | ||||
| @ -250,12 +247,12 @@ func (fs Filesystem) CreateObject(bucket, object, expectedMD5Sum string, size in | ||||
| 		case *os.PathError: | ||||
| 			if e.Op == "mkdir" { | ||||
| 				if strings.Contains(e.Error(), "not a directory") { | ||||
| 					return ObjectMetadata{}, probe.NewError(ObjectExistsAsPrefix{Bucket: bucket, Prefix: object}) | ||||
| 					return ObjectInfo{}, probe.NewError(ObjectExistsAsPrefix{Bucket: bucket, Prefix: object}) | ||||
| 				} | ||||
| 			} | ||||
| 			return ObjectMetadata{}, probe.NewError(e) | ||||
| 			return ObjectInfo{}, probe.NewError(e) | ||||
| 		default: | ||||
| 			return ObjectMetadata{}, probe.NewError(e) | ||||
| 			return ObjectInfo{}, probe.NewError(e) | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| @ -267,12 +264,12 @@ func (fs Filesystem) CreateObject(bucket, object, expectedMD5Sum string, size in | ||||
| 	if size > 0 { | ||||
| 		if _, e = io.CopyN(objectWriter, data, size); e != nil { | ||||
| 			file.CloseAndPurge() | ||||
| 			return ObjectMetadata{}, probe.NewError(e) | ||||
| 			return ObjectInfo{}, probe.NewError(e) | ||||
| 		} | ||||
| 	} else { | ||||
| 		if _, e = io.Copy(objectWriter, data); e != nil { | ||||
| 			file.CloseAndPurge() | ||||
| 			return ObjectMetadata{}, probe.NewError(e) | ||||
| 			return ObjectInfo{}, probe.NewError(e) | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| @ -282,7 +279,7 @@ func (fs Filesystem) CreateObject(bucket, object, expectedMD5Sum string, size in | ||||
| 	if expectedMD5Sum != "" { | ||||
| 		if !isMD5SumEqual(expectedMD5Sum, md5Sum) { | ||||
| 			file.CloseAndPurge() | ||||
| 			return ObjectMetadata{}, probe.NewError(BadDigest{MD5: expectedMD5Sum, Bucket: bucket, Object: object}) | ||||
| 			return ObjectInfo{}, probe.NewError(BadDigest{MD5: expectedMD5Sum, Bucket: bucket, Object: object}) | ||||
| 		} | ||||
| 	} | ||||
| 	sha256Sum := hex.EncodeToString(sha256Hasher.Sum(nil)) | ||||
| @ -290,11 +287,11 @@ func (fs Filesystem) CreateObject(bucket, object, expectedMD5Sum string, size in | ||||
| 		ok, err := sig.DoesSignatureMatch(sha256Sum) | ||||
| 		if err != nil { | ||||
| 			file.CloseAndPurge() | ||||
| 			return ObjectMetadata{}, err.Trace() | ||||
| 			return ObjectInfo{}, err.Trace() | ||||
| 		} | ||||
| 		if !ok { | ||||
| 			file.CloseAndPurge() | ||||
| 			return ObjectMetadata{}, probe.NewError(SignDoesNotMatch{}) | ||||
| 			return ObjectInfo{}, probe.NewError(SignDoesNotMatch{}) | ||||
| 		} | ||||
| 	} | ||||
| 	file.Close() | ||||
| @ -302,7 +299,7 @@ func (fs Filesystem) CreateObject(bucket, object, expectedMD5Sum string, size in | ||||
| 	// Set stat again to get the latest metadata. | ||||
| 	st, e := os.Stat(objectPath) | ||||
| 	if e != nil { | ||||
| 		return ObjectMetadata{}, probe.NewError(e) | ||||
| 		return ObjectInfo{}, probe.NewError(e) | ||||
| 	} | ||||
| 	contentType := "application/octet-stream" | ||||
| 	if objectExt := filepath.Ext(objectPath); objectExt != "" { | ||||
| @ -311,13 +308,13 @@ func (fs Filesystem) CreateObject(bucket, object, expectedMD5Sum string, size in | ||||
| 			contentType = content.ContentType | ||||
| 		} | ||||
| 	} | ||||
| 	newObject := ObjectMetadata{ | ||||
| 	newObject := ObjectInfo{ | ||||
| 		Bucket:       bucket, | ||||
| 		Object:       object, | ||||
| 		LastModified: st.ModTime(), | ||||
| 		Name:         object, | ||||
| 		ModifiedTime: st.ModTime(), | ||||
| 		Size:         st.Size(), | ||||
| 		ContentType:  contentType, | ||||
| 		MD5:          md5Sum, | ||||
| 		MD5Sum:       md5Sum, | ||||
| 	} | ||||
| 	return newObject, nil | ||||
| } | ||||
|  | ||||
		Loading…
	
	
			
			x
			
			
		
	
		Reference in New Issue
	
	Block a user