mirror of
https://github.com/minio/minio.git
synced 2025-01-27 06:33:18 -05:00
Make donut fully integrated back into API handlers
This commit is contained in:
parent
12bde7df30
commit
7c37e9d06a
12
commands.go
12
commands.go
@ -8,18 +8,6 @@ import (
|
|||||||
"github.com/minio/minio/pkg/server/api"
|
"github.com/minio/minio/pkg/server/api"
|
||||||
)
|
)
|
||||||
|
|
||||||
func removeDuplicates(slice []string) []string {
|
|
||||||
newSlice := []string{}
|
|
||||||
seen := make(map[string]struct{})
|
|
||||||
for _, val := range slice {
|
|
||||||
if _, ok := seen[val]; !ok {
|
|
||||||
newSlice = append(newSlice, val)
|
|
||||||
seen[val] = struct{}{}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return newSlice
|
|
||||||
}
|
|
||||||
|
|
||||||
var commands = []cli.Command{
|
var commands = []cli.Command{
|
||||||
serverCmd,
|
serverCmd,
|
||||||
controlCmd,
|
controlCmd,
|
||||||
|
@ -16,6 +16,8 @@
|
|||||||
|
|
||||||
package api
|
package api
|
||||||
|
|
||||||
|
import "github.com/minio/minio/pkg/storage/donut"
|
||||||
|
|
||||||
// Operation container for individual operations read by Ticket Master
|
// Operation container for individual operations read by Ticket Master
|
||||||
type Operation struct {
|
type Operation struct {
|
||||||
ProceedCh chan struct{}
|
ProceedCh chan struct{}
|
||||||
@ -23,10 +25,16 @@ type Operation struct {
|
|||||||
|
|
||||||
// Minio container for API and also carries OP (operation) channel
|
// Minio container for API and also carries OP (operation) channel
|
||||||
type Minio struct {
|
type Minio struct {
|
||||||
OP chan Operation
|
OP chan Operation
|
||||||
|
Donut donut.Interface
|
||||||
}
|
}
|
||||||
|
|
||||||
// New instantiate a new minio API
|
// New instantiate a new minio API
|
||||||
func New() Minio {
|
func New() Minio {
|
||||||
return Minio{OP: make(chan Operation)}
|
// ignore errors for now
|
||||||
|
d, _ := donut.LoadDonut()
|
||||||
|
return Minio{
|
||||||
|
OP: make(chan Operation),
|
||||||
|
Donut: d,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -17,16 +17,51 @@
|
|||||||
package api
|
package api
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"log"
|
|
||||||
"net/http"
|
"net/http"
|
||||||
|
|
||||||
"github.com/gorilla/mux"
|
"github.com/gorilla/mux"
|
||||||
|
"github.com/minio/minio/pkg/iodine"
|
||||||
|
"github.com/minio/minio/pkg/storage/donut"
|
||||||
|
"github.com/minio/minio/pkg/utils/log"
|
||||||
)
|
)
|
||||||
|
|
||||||
func (api Minio) isValidOp(w http.ResponseWriter, req *http.Request, acceptsContentType contentType) bool {
|
func (api Minio) isValidOp(w http.ResponseWriter, req *http.Request, acceptsContentType contentType) bool {
|
||||||
vars := mux.Vars(req)
|
vars := mux.Vars(req)
|
||||||
bucket := vars["bucket"]
|
bucket := vars["bucket"]
|
||||||
log.Println(bucket)
|
|
||||||
|
bucketMetadata, err := api.Donut.GetBucketMetadata(bucket)
|
||||||
|
switch iodine.ToError(err).(type) {
|
||||||
|
case donut.BucketNotFound:
|
||||||
|
{
|
||||||
|
writeErrorResponse(w, req, NoSuchBucket, acceptsContentType, req.URL.Path)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
case donut.BucketNameInvalid:
|
||||||
|
{
|
||||||
|
writeErrorResponse(w, req, InvalidBucketName, acceptsContentType, req.URL.Path)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
case nil:
|
||||||
|
if _, err := stripAuth(req); err != nil {
|
||||||
|
if bucketMetadata.ACL.IsPrivate() {
|
||||||
|
return true
|
||||||
|
//uncomment this when we have webcli
|
||||||
|
//writeErrorResponse(w, req, AccessDenied, acceptsContentType, req.URL.Path)
|
||||||
|
//return false
|
||||||
|
}
|
||||||
|
if bucketMetadata.ACL.IsPublicRead() && req.Method == "PUT" {
|
||||||
|
return true
|
||||||
|
//uncomment this when we have webcli
|
||||||
|
//writeErrorResponse(w, req, AccessDenied, acceptsContentType, req.URL.Path)
|
||||||
|
//return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
{
|
||||||
|
log.Error.Println(iodine.New(err, nil))
|
||||||
|
writeErrorResponse(w, req, InternalError, acceptsContentType, req.URL.Path)
|
||||||
|
}
|
||||||
|
}
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -38,18 +73,16 @@ func (api Minio) isValidOp(w http.ResponseWriter, req *http.Request, acceptsCont
|
|||||||
// This operation returns at most 1,000 multipart uploads in the response.
|
// This operation returns at most 1,000 multipart uploads in the response.
|
||||||
//
|
//
|
||||||
func (api Minio) ListMultipartUploadsHandler(w http.ResponseWriter, req *http.Request) {
|
func (api Minio) ListMultipartUploadsHandler(w http.ResponseWriter, req *http.Request) {
|
||||||
acceptsContentType := getContentType(req)
|
// Ticket master block
|
||||||
|
|
||||||
op := Operation{}
|
|
||||||
op.ProceedCh = make(chan struct{})
|
|
||||||
api.OP <- op
|
|
||||||
// block until Ticket master gives us a go
|
|
||||||
<-op.ProceedCh
|
|
||||||
{
|
{
|
||||||
// do you operation
|
op := Operation{}
|
||||||
|
op.ProceedCh = make(chan struct{})
|
||||||
|
api.OP <- op
|
||||||
|
// block until ticket master gives us a go
|
||||||
|
<-op.ProceedCh
|
||||||
}
|
}
|
||||||
log.Println(acceptsContentType)
|
|
||||||
|
|
||||||
|
acceptsContentType := getContentType(req)
|
||||||
resources := getBucketMultipartResources(req.URL.Query())
|
resources := getBucketMultipartResources(req.URL.Query())
|
||||||
if resources.MaxUploads == 0 {
|
if resources.MaxUploads == 0 {
|
||||||
resources.MaxUploads = maxObjectList
|
resources.MaxUploads = maxObjectList
|
||||||
@ -57,7 +90,29 @@ func (api Minio) ListMultipartUploadsHandler(w http.ResponseWriter, req *http.Re
|
|||||||
|
|
||||||
vars := mux.Vars(req)
|
vars := mux.Vars(req)
|
||||||
bucket := vars["bucket"]
|
bucket := vars["bucket"]
|
||||||
log.Println(bucket)
|
|
||||||
|
resources, err := api.Donut.ListMultipartUploads(bucket, resources)
|
||||||
|
switch iodine.ToError(err).(type) {
|
||||||
|
case nil: // success
|
||||||
|
{
|
||||||
|
// generate response
|
||||||
|
response := generateListMultipartUploadsResult(bucket, resources)
|
||||||
|
encodedSuccessResponse := encodeSuccessResponse(response, acceptsContentType)
|
||||||
|
// write headers
|
||||||
|
setCommonHeaders(w, getContentTypeString(acceptsContentType), len(encodedSuccessResponse))
|
||||||
|
// write body
|
||||||
|
w.Write(encodedSuccessResponse)
|
||||||
|
}
|
||||||
|
case donut.BucketNotFound:
|
||||||
|
{
|
||||||
|
writeErrorResponse(w, req, NoSuchBucket, acceptsContentType, req.URL.Path)
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
{
|
||||||
|
log.Error.Println(iodine.New(err, nil))
|
||||||
|
writeErrorResponse(w, req, InternalError, acceptsContentType, req.URL.Path)
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// ListObjectsHandler - GET Bucket (List Objects)
|
// ListObjectsHandler - GET Bucket (List Objects)
|
||||||
@ -67,18 +122,16 @@ func (api Minio) ListMultipartUploadsHandler(w http.ResponseWriter, req *http.Re
|
|||||||
// criteria to return a subset of the objects in a bucket.
|
// criteria to return a subset of the objects in a bucket.
|
||||||
//
|
//
|
||||||
func (api Minio) ListObjectsHandler(w http.ResponseWriter, req *http.Request) {
|
func (api Minio) ListObjectsHandler(w http.ResponseWriter, req *http.Request) {
|
||||||
acceptsContentType := getContentType(req)
|
// Ticket master block
|
||||||
|
|
||||||
op := Operation{}
|
|
||||||
op.ProceedCh = make(chan struct{})
|
|
||||||
api.OP <- op
|
|
||||||
// block until Ticket master gives us a go
|
|
||||||
<-op.ProceedCh
|
|
||||||
{
|
{
|
||||||
// do you operation
|
op := Operation{}
|
||||||
|
op.ProceedCh = make(chan struct{})
|
||||||
|
api.OP <- op
|
||||||
|
// block until Ticket master gives us a go
|
||||||
|
<-op.ProceedCh
|
||||||
}
|
}
|
||||||
log.Println(acceptsContentType)
|
|
||||||
|
|
||||||
|
acceptsContentType := getContentType(req)
|
||||||
// verify if bucket allows this operation
|
// verify if bucket allows this operation
|
||||||
if !api.isValidOp(w, req, acceptsContentType) {
|
if !api.isValidOp(w, req, acceptsContentType) {
|
||||||
return
|
return
|
||||||
@ -96,8 +149,25 @@ func (api Minio) ListObjectsHandler(w http.ResponseWriter, req *http.Request) {
|
|||||||
|
|
||||||
vars := mux.Vars(req)
|
vars := mux.Vars(req)
|
||||||
bucket := vars["bucket"]
|
bucket := vars["bucket"]
|
||||||
log.Println(bucket)
|
|
||||||
|
|
||||||
|
objects, resources, err := api.Donut.ListObjects(bucket, resources)
|
||||||
|
switch iodine.ToError(err).(type) {
|
||||||
|
case nil:
|
||||||
|
// generate response
|
||||||
|
response := generateListObjectsResponse(bucket, objects, resources)
|
||||||
|
encodedSuccessResponse := encodeSuccessResponse(response, acceptsContentType)
|
||||||
|
// write headers
|
||||||
|
setCommonHeaders(w, getContentTypeString(acceptsContentType), len(encodedSuccessResponse))
|
||||||
|
// write body
|
||||||
|
w.Write(encodedSuccessResponse)
|
||||||
|
case donut.ObjectNotFound:
|
||||||
|
writeErrorResponse(w, req, NoSuchKey, acceptsContentType, req.URL.Path)
|
||||||
|
case donut.ObjectNameInvalid:
|
||||||
|
writeErrorResponse(w, req, NoSuchKey, acceptsContentType, req.URL.Path)
|
||||||
|
default:
|
||||||
|
log.Error.Println(iodine.New(err, nil))
|
||||||
|
writeErrorResponse(w, req, InternalError, acceptsContentType, req.URL.Path)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// ListBucketsHandler - GET Service
|
// ListBucketsHandler - GET Service
|
||||||
@ -105,6 +175,15 @@ func (api Minio) ListObjectsHandler(w http.ResponseWriter, req *http.Request) {
|
|||||||
// This implementation of the GET operation returns a list of all buckets
|
// This implementation of the GET operation returns a list of all buckets
|
||||||
// owned by the authenticated sender of the request.
|
// owned by the authenticated sender of the request.
|
||||||
func (api Minio) ListBucketsHandler(w http.ResponseWriter, req *http.Request) {
|
func (api Minio) ListBucketsHandler(w http.ResponseWriter, req *http.Request) {
|
||||||
|
// Ticket master block
|
||||||
|
{
|
||||||
|
op := Operation{}
|
||||||
|
op.ProceedCh = make(chan struct{})
|
||||||
|
api.OP <- op
|
||||||
|
// block until Ticket master gives us a go
|
||||||
|
<-op.ProceedCh
|
||||||
|
}
|
||||||
|
|
||||||
acceptsContentType := getContentType(req)
|
acceptsContentType := getContentType(req)
|
||||||
// uncomment this when we have webcli
|
// uncomment this when we have webcli
|
||||||
// without access key credentials one cannot list buckets
|
// without access key credentials one cannot list buckets
|
||||||
@ -112,21 +191,36 @@ func (api Minio) ListBucketsHandler(w http.ResponseWriter, req *http.Request) {
|
|||||||
// writeErrorResponse(w, req, AccessDenied, acceptsContentType, req.URL.Path)
|
// writeErrorResponse(w, req, AccessDenied, acceptsContentType, req.URL.Path)
|
||||||
// return
|
// return
|
||||||
// }
|
// }
|
||||||
op := Operation{}
|
|
||||||
op.ProceedCh = make(chan struct{})
|
buckets, err := api.Donut.ListBuckets()
|
||||||
api.OP <- op
|
switch iodine.ToError(err).(type) {
|
||||||
// block until Ticket master gives us a go
|
case nil:
|
||||||
<-op.ProceedCh
|
// generate response
|
||||||
{
|
response := generateListBucketsResponse(buckets)
|
||||||
// do you operation
|
encodedSuccessResponse := encodeSuccessResponse(response, acceptsContentType)
|
||||||
|
// write headers
|
||||||
|
setCommonHeaders(w, getContentTypeString(acceptsContentType), len(encodedSuccessResponse))
|
||||||
|
// write response
|
||||||
|
w.Write(encodedSuccessResponse)
|
||||||
|
default:
|
||||||
|
log.Error.Println(iodine.New(err, nil))
|
||||||
|
writeErrorResponse(w, req, InternalError, acceptsContentType, req.URL.Path)
|
||||||
}
|
}
|
||||||
log.Println(acceptsContentType)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// PutBucketHandler - PUT Bucket
|
// PutBucketHandler - PUT Bucket
|
||||||
// ----------
|
// ----------
|
||||||
// This implementation of the PUT operation creates a new bucket for authenticated request
|
// This implementation of the PUT operation creates a new bucket for authenticated request
|
||||||
func (api Minio) PutBucketHandler(w http.ResponseWriter, req *http.Request) {
|
func (api Minio) PutBucketHandler(w http.ResponseWriter, req *http.Request) {
|
||||||
|
// Ticket master block
|
||||||
|
{
|
||||||
|
op := Operation{}
|
||||||
|
op.ProceedCh = make(chan struct{})
|
||||||
|
api.OP <- op
|
||||||
|
// block until Ticket master gives us a go
|
||||||
|
<-op.ProceedCh
|
||||||
|
}
|
||||||
|
|
||||||
acceptsContentType := getContentType(req)
|
acceptsContentType := getContentType(req)
|
||||||
// uncomment this when we have webcli
|
// uncomment this when we have webcli
|
||||||
// without access key credentials one cannot create a bucket
|
// without access key credentials one cannot create a bucket
|
||||||
@ -134,15 +228,6 @@ func (api Minio) PutBucketHandler(w http.ResponseWriter, req *http.Request) {
|
|||||||
// writeErrorResponse(w, req, AccessDenied, acceptsContentType, req.URL.Path)
|
// writeErrorResponse(w, req, AccessDenied, acceptsContentType, req.URL.Path)
|
||||||
// return
|
// return
|
||||||
// }
|
// }
|
||||||
op := Operation{}
|
|
||||||
op.ProceedCh = make(chan struct{})
|
|
||||||
api.OP <- op
|
|
||||||
// block until Ticket master gives us a go
|
|
||||||
<-op.ProceedCh
|
|
||||||
{
|
|
||||||
// do you operation
|
|
||||||
}
|
|
||||||
log.Println(acceptsContentType)
|
|
||||||
|
|
||||||
if isRequestBucketACL(req.URL.Query()) {
|
if isRequestBucketACL(req.URL.Query()) {
|
||||||
api.PutBucketACLHandler(w, req)
|
api.PutBucketACLHandler(w, req)
|
||||||
@ -157,24 +242,39 @@ func (api Minio) PutBucketHandler(w http.ResponseWriter, req *http.Request) {
|
|||||||
|
|
||||||
vars := mux.Vars(req)
|
vars := mux.Vars(req)
|
||||||
bucket := vars["bucket"]
|
bucket := vars["bucket"]
|
||||||
log.Println(bucket)
|
|
||||||
|
err := api.Donut.MakeBucket(bucket, getACLTypeString(aclType))
|
||||||
|
switch iodine.ToError(err).(type) {
|
||||||
|
case nil:
|
||||||
|
// Make sure to add Location information here only for bucket
|
||||||
|
w.Header().Set("Location", "/"+bucket)
|
||||||
|
writeSuccessResponse(w, acceptsContentType)
|
||||||
|
case donut.TooManyBuckets:
|
||||||
|
writeErrorResponse(w, req, TooManyBuckets, acceptsContentType, req.URL.Path)
|
||||||
|
case donut.BucketNameInvalid:
|
||||||
|
writeErrorResponse(w, req, InvalidBucketName, acceptsContentType, req.URL.Path)
|
||||||
|
case donut.BucketExists:
|
||||||
|
writeErrorResponse(w, req, BucketAlreadyExists, acceptsContentType, req.URL.Path)
|
||||||
|
default:
|
||||||
|
log.Error.Println(iodine.New(err, nil))
|
||||||
|
writeErrorResponse(w, req, InternalError, acceptsContentType, req.URL.Path)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// PutBucketACLHandler - PUT Bucket ACL
|
// PutBucketACLHandler - PUT Bucket ACL
|
||||||
// ----------
|
// ----------
|
||||||
// This implementation of the PUT operation modifies the bucketACL for authenticated request
|
// This implementation of the PUT operation modifies the bucketACL for authenticated request
|
||||||
func (api Minio) PutBucketACLHandler(w http.ResponseWriter, req *http.Request) {
|
func (api Minio) PutBucketACLHandler(w http.ResponseWriter, req *http.Request) {
|
||||||
acceptsContentType := getContentType(req)
|
// Ticket master block
|
||||||
|
|
||||||
op := Operation{}
|
|
||||||
op.ProceedCh = make(chan struct{})
|
|
||||||
api.OP <- op
|
|
||||||
// block until Ticket master gives us a go
|
|
||||||
<-op.ProceedCh
|
|
||||||
{
|
{
|
||||||
// do you operation
|
op := Operation{}
|
||||||
|
op.ProceedCh = make(chan struct{})
|
||||||
|
api.OP <- op
|
||||||
|
// block until Ticket master gives us a go
|
||||||
|
<-op.ProceedCh
|
||||||
}
|
}
|
||||||
log.Println(acceptsContentType)
|
|
||||||
|
acceptsContentType := getContentType(req)
|
||||||
|
|
||||||
// read from 'x-amz-acl'
|
// read from 'x-amz-acl'
|
||||||
aclType := getACLType(req)
|
aclType := getACLType(req)
|
||||||
@ -185,7 +285,19 @@ func (api Minio) PutBucketACLHandler(w http.ResponseWriter, req *http.Request) {
|
|||||||
|
|
||||||
vars := mux.Vars(req)
|
vars := mux.Vars(req)
|
||||||
bucket := vars["bucket"]
|
bucket := vars["bucket"]
|
||||||
log.Println(bucket)
|
|
||||||
|
err := api.Donut.SetBucketMetadata(bucket, map[string]string{"acl": getACLTypeString(aclType)})
|
||||||
|
switch iodine.ToError(err).(type) {
|
||||||
|
case nil:
|
||||||
|
writeSuccessResponse(w, acceptsContentType)
|
||||||
|
case donut.BucketNameInvalid:
|
||||||
|
writeErrorResponse(w, req, InvalidBucketName, acceptsContentType, req.URL.Path)
|
||||||
|
case donut.BucketNotFound:
|
||||||
|
writeErrorResponse(w, req, NoSuchBucket, acceptsContentType, req.URL.Path)
|
||||||
|
default:
|
||||||
|
log.Error.Println(iodine.New(err, nil))
|
||||||
|
writeErrorResponse(w, req, InternalError, acceptsContentType, req.URL.Path)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// HeadBucketHandler - HEAD Bucket
|
// HeadBucketHandler - HEAD Bucket
|
||||||
@ -195,19 +307,33 @@ func (api Minio) PutBucketACLHandler(w http.ResponseWriter, req *http.Request) {
|
|||||||
// have permission to access it. Otherwise, the operation might
|
// have permission to access it. Otherwise, the operation might
|
||||||
// return responses such as 404 Not Found and 403 Forbidden.
|
// return responses such as 404 Not Found and 403 Forbidden.
|
||||||
func (api Minio) HeadBucketHandler(w http.ResponseWriter, req *http.Request) {
|
func (api Minio) HeadBucketHandler(w http.ResponseWriter, req *http.Request) {
|
||||||
acceptsContentType := getContentType(req)
|
// Ticket master block
|
||||||
|
|
||||||
op := Operation{}
|
|
||||||
op.ProceedCh = make(chan struct{})
|
|
||||||
api.OP <- op
|
|
||||||
// block until Ticket master gives us a go
|
|
||||||
<-op.ProceedCh
|
|
||||||
{
|
{
|
||||||
// do you operation
|
op := Operation{}
|
||||||
|
op.ProceedCh = make(chan struct{})
|
||||||
|
api.OP <- op
|
||||||
|
// block until Ticket master gives us a go
|
||||||
|
<-op.ProceedCh
|
||||||
}
|
}
|
||||||
log.Println(acceptsContentType)
|
|
||||||
|
acceptsContentType := getContentType(req)
|
||||||
|
|
||||||
vars := mux.Vars(req)
|
vars := mux.Vars(req)
|
||||||
bucket := vars["bucket"]
|
bucket := vars["bucket"]
|
||||||
log.Println(bucket)
|
|
||||||
|
_, err := api.Donut.GetBucketMetadata(bucket)
|
||||||
|
switch iodine.ToError(err).(type) {
|
||||||
|
case nil:
|
||||||
|
writeSuccessResponse(w, acceptsContentType)
|
||||||
|
case donut.BucketNotFound:
|
||||||
|
error := getErrorCode(NoSuchBucket)
|
||||||
|
w.WriteHeader(error.HTTPStatusCode)
|
||||||
|
case donut.BucketNameInvalid:
|
||||||
|
error := getErrorCode(InvalidBucketName)
|
||||||
|
w.WriteHeader(error.HTTPStatusCode)
|
||||||
|
default:
|
||||||
|
log.Error.Println(iodine.New(err, nil))
|
||||||
|
error := getErrorCode(InternalError)
|
||||||
|
w.WriteHeader(error.HTTPStatusCode)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -9,7 +9,7 @@
|
|||||||
*
|
*
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implieapi.Donut.
|
||||||
* See the License for the specific language governing permissions and
|
* See the License for the specific language governing permissions and
|
||||||
* limitations under the License.
|
* limitations under the License.
|
||||||
*/
|
*/
|
||||||
@ -25,6 +25,7 @@ import (
|
|||||||
|
|
||||||
"github.com/gorilla/mux"
|
"github.com/gorilla/mux"
|
||||||
"github.com/minio/minio/pkg/iodine"
|
"github.com/minio/minio/pkg/iodine"
|
||||||
|
"github.com/minio/minio/pkg/storage/donut"
|
||||||
"github.com/minio/minio/pkg/utils/log"
|
"github.com/minio/minio/pkg/utils/log"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -37,17 +38,16 @@ const (
|
|||||||
// This implementation of the GET operation retrieves object. To use GET,
|
// This implementation of the GET operation retrieves object. To use GET,
|
||||||
// you must have READ access to the object.
|
// you must have READ access to the object.
|
||||||
func (api Minio) GetObjectHandler(w http.ResponseWriter, req *http.Request) {
|
func (api Minio) GetObjectHandler(w http.ResponseWriter, req *http.Request) {
|
||||||
acceptsContentType := getContentType(req)
|
// ticket master block
|
||||||
|
|
||||||
op := Operation{}
|
|
||||||
op.ProceedCh = make(chan struct{})
|
|
||||||
api.OP <- op
|
|
||||||
// block until Ticket master gives us a go
|
|
||||||
<-op.ProceedCh
|
|
||||||
{
|
{
|
||||||
// do you operation
|
op := Operation{}
|
||||||
|
op.ProceedCh = make(chan struct{})
|
||||||
|
api.OP <- op
|
||||||
|
// block until Ticket master gives us a go
|
||||||
|
<-op.ProceedCh
|
||||||
}
|
}
|
||||||
log.Println(acceptsContentType)
|
|
||||||
|
acceptsContentType := getContentType(req)
|
||||||
|
|
||||||
// verify if this operation is allowed
|
// verify if this operation is allowed
|
||||||
if !api.isValidOp(w, req, acceptsContentType) {
|
if !api.isValidOp(w, req, acceptsContentType) {
|
||||||
@ -58,25 +58,57 @@ func (api Minio) GetObjectHandler(w http.ResponseWriter, req *http.Request) {
|
|||||||
vars := mux.Vars(req)
|
vars := mux.Vars(req)
|
||||||
bucket = vars["bucket"]
|
bucket = vars["bucket"]
|
||||||
object = vars["object"]
|
object = vars["object"]
|
||||||
log.Println(bucket, object)
|
|
||||||
|
|
||||||
|
metadata, err := api.Donut.GetObjectMetadata(bucket, object)
|
||||||
|
switch iodine.ToError(err).(type) {
|
||||||
|
case nil: // success
|
||||||
|
{
|
||||||
|
httpRange, err := getRequestedRange(req, metadata.Size)
|
||||||
|
if err != nil {
|
||||||
|
writeErrorResponse(w, req, InvalidRange, acceptsContentType, req.URL.Path)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
switch httpRange.start == 0 && httpRange.length == 0 {
|
||||||
|
case true:
|
||||||
|
setObjectHeaders(w, metadata)
|
||||||
|
if _, err := api.Donut.GetObject(w, bucket, object); err != nil {
|
||||||
|
// unable to write headers, we've already printed data. Just close the connection.
|
||||||
|
log.Error.Println(iodine.New(err, nil))
|
||||||
|
}
|
||||||
|
case false:
|
||||||
|
metadata.Size = httpRange.length
|
||||||
|
setRangeObjectHeaders(w, metadata, httpRange)
|
||||||
|
w.WriteHeader(http.StatusPartialContent)
|
||||||
|
if _, err := api.Donut.GetPartialObject(w, bucket, object, httpRange.start, httpRange.length); err != nil {
|
||||||
|
// unable to write headers, we've already printed data. Just close the connection.
|
||||||
|
log.Error.Println(iodine.New(err, nil))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case donut.ObjectNotFound:
|
||||||
|
writeErrorResponse(w, req, NoSuchKey, acceptsContentType, req.URL.Path)
|
||||||
|
case donut.ObjectNameInvalid:
|
||||||
|
writeErrorResponse(w, req, NoSuchKey, acceptsContentType, req.URL.Path)
|
||||||
|
default:
|
||||||
|
log.Error.Println(iodine.New(err, nil))
|
||||||
|
writeErrorResponse(w, req, InternalError, acceptsContentType, req.URL.Path)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// HeadObjectHandler - HEAD Object
|
// HeadObjectHandler - HEAD Object
|
||||||
// -----------
|
// -----------
|
||||||
// The HEAD operation retrieves metadata from an object without returning the object itself.
|
// The HEAD operation retrieves metadata from an object without returning the object itself.
|
||||||
func (api Minio) HeadObjectHandler(w http.ResponseWriter, req *http.Request) {
|
func (api Minio) HeadObjectHandler(w http.ResponseWriter, req *http.Request) {
|
||||||
acceptsContentType := getContentType(req)
|
// ticket master block
|
||||||
|
|
||||||
op := Operation{}
|
|
||||||
op.ProceedCh = make(chan struct{})
|
|
||||||
api.OP <- op
|
|
||||||
// block until Ticket master gives us a go
|
|
||||||
<-op.ProceedCh
|
|
||||||
{
|
{
|
||||||
// do you operation
|
op := Operation{}
|
||||||
|
op.ProceedCh = make(chan struct{})
|
||||||
|
api.OP <- op
|
||||||
|
// block until Ticket master gives us a go
|
||||||
|
<-op.ProceedCh
|
||||||
}
|
}
|
||||||
log.Println(acceptsContentType)
|
|
||||||
|
acceptsContentType := getContentType(req)
|
||||||
|
|
||||||
// verify if this operation is allowed
|
// verify if this operation is allowed
|
||||||
if !api.isValidOp(w, req, acceptsContentType) {
|
if !api.isValidOp(w, req, acceptsContentType) {
|
||||||
@ -87,25 +119,42 @@ func (api Minio) HeadObjectHandler(w http.ResponseWriter, req *http.Request) {
|
|||||||
vars := mux.Vars(req)
|
vars := mux.Vars(req)
|
||||||
bucket = vars["bucket"]
|
bucket = vars["bucket"]
|
||||||
object = vars["object"]
|
object = vars["object"]
|
||||||
log.Println(bucket, object)
|
|
||||||
|
metadata, err := api.Donut.GetObjectMetadata(bucket, object)
|
||||||
|
switch iodine.ToError(err).(type) {
|
||||||
|
case nil:
|
||||||
|
setObjectHeaders(w, metadata)
|
||||||
|
w.WriteHeader(http.StatusOK)
|
||||||
|
case donut.ObjectNotFound:
|
||||||
|
error := getErrorCode(NoSuchKey)
|
||||||
|
w.Header().Set("Server", "Minio")
|
||||||
|
w.WriteHeader(error.HTTPStatusCode)
|
||||||
|
case donut.ObjectNameInvalid:
|
||||||
|
error := getErrorCode(NoSuchKey)
|
||||||
|
w.Header().Set("Server", "Minio")
|
||||||
|
w.WriteHeader(error.HTTPStatusCode)
|
||||||
|
default:
|
||||||
|
log.Error.Println(iodine.New(err, nil))
|
||||||
|
error := getErrorCode(InternalError)
|
||||||
|
w.Header().Set("Server", "Minio")
|
||||||
|
w.WriteHeader(error.HTTPStatusCode)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// PutObjectHandler - PUT Object
|
// PutObjectHandler - PUT Object
|
||||||
// ----------
|
// ----------
|
||||||
// This implementation of the PUT operation adds an object to a bucket.
|
// This implementation of the PUT operation adds an object to a bucket.
|
||||||
func (api Minio) PutObjectHandler(w http.ResponseWriter, req *http.Request) {
|
func (api Minio) PutObjectHandler(w http.ResponseWriter, req *http.Request) {
|
||||||
acceptsContentType := getContentType(req)
|
// Ticket master block
|
||||||
|
|
||||||
op := Operation{}
|
|
||||||
op.ProceedCh = make(chan struct{})
|
|
||||||
api.OP <- op
|
|
||||||
// block until Ticket master gives us a go
|
|
||||||
<-op.ProceedCh
|
|
||||||
{
|
{
|
||||||
// do you operation
|
op := Operation{}
|
||||||
|
op.ProceedCh = make(chan struct{})
|
||||||
|
api.OP <- op
|
||||||
|
// block until Ticket master gives us a go
|
||||||
|
<-op.ProceedCh
|
||||||
}
|
}
|
||||||
log.Println(acceptsContentType)
|
|
||||||
|
|
||||||
|
acceptsContentType := getContentType(req)
|
||||||
// verify if this operation is allowed
|
// verify if this operation is allowed
|
||||||
if !api.isValidOp(w, req, acceptsContentType) {
|
if !api.isValidOp(w, req, acceptsContentType) {
|
||||||
return
|
return
|
||||||
@ -122,7 +171,7 @@ func (api Minio) PutObjectHandler(w http.ResponseWriter, req *http.Request) {
|
|||||||
writeErrorResponse(w, req, InvalidDigest, acceptsContentType, req.URL.Path)
|
writeErrorResponse(w, req, InvalidDigest, acceptsContentType, req.URL.Path)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
/// if Content-Length missing, throw away
|
/// if Content-Length missing, deny the request
|
||||||
size := req.Header.Get("Content-Length")
|
size := req.Header.Get("Content-Length")
|
||||||
if size == "" {
|
if size == "" {
|
||||||
writeErrorResponse(w, req, MissingContentLength, acceptsContentType, req.URL.Path)
|
writeErrorResponse(w, req, MissingContentLength, acceptsContentType, req.URL.Path)
|
||||||
@ -148,24 +197,40 @@ func (api Minio) PutObjectHandler(w http.ResponseWriter, req *http.Request) {
|
|||||||
writeErrorResponse(w, req, InvalidRequest, acceptsContentType, req.URL.Path)
|
writeErrorResponse(w, req, InvalidRequest, acceptsContentType, req.URL.Path)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
log.Println(bucket, object, sizeInt64)
|
|
||||||
|
metadata, err := api.Donut.CreateObject(bucket, object, md5, sizeInt64, req.Body, nil)
|
||||||
|
switch iodine.ToError(err).(type) {
|
||||||
|
case nil:
|
||||||
|
w.Header().Set("ETag", metadata.MD5Sum)
|
||||||
|
writeSuccessResponse(w, acceptsContentType)
|
||||||
|
case donut.ObjectExists:
|
||||||
|
writeErrorResponse(w, req, MethodNotAllowed, acceptsContentType, req.URL.Path)
|
||||||
|
case donut.BadDigest:
|
||||||
|
writeErrorResponse(w, req, BadDigest, acceptsContentType, req.URL.Path)
|
||||||
|
case donut.EntityTooLarge:
|
||||||
|
writeErrorResponse(w, req, EntityTooLarge, acceptsContentType, req.URL.Path)
|
||||||
|
case donut.InvalidDigest:
|
||||||
|
writeErrorResponse(w, req, InvalidDigest, acceptsContentType, req.URL.Path)
|
||||||
|
default:
|
||||||
|
log.Error.Println(iodine.New(err, nil))
|
||||||
|
writeErrorResponse(w, req, InternalError, acceptsContentType, req.URL.Path)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Multipart API
|
/// Multipart API
|
||||||
|
|
||||||
// NewMultipartUploadHandler - New multipart upload
|
// NewMultipartUploadHandler - New multipart upload
|
||||||
func (api Minio) NewMultipartUploadHandler(w http.ResponseWriter, req *http.Request) {
|
func (api Minio) NewMultipartUploadHandler(w http.ResponseWriter, req *http.Request) {
|
||||||
acceptsContentType := getContentType(req)
|
// Ticket master block
|
||||||
|
|
||||||
op := Operation{}
|
|
||||||
op.ProceedCh = make(chan struct{})
|
|
||||||
api.OP <- op
|
|
||||||
// block until Ticket master gives us a go
|
|
||||||
<-op.ProceedCh
|
|
||||||
{
|
{
|
||||||
// do you operation
|
op := Operation{}
|
||||||
|
op.ProceedCh = make(chan struct{})
|
||||||
|
api.OP <- op
|
||||||
|
// block until Ticket master gives us a go
|
||||||
|
<-op.ProceedCh
|
||||||
}
|
}
|
||||||
log.Println(acceptsContentType)
|
|
||||||
|
acceptsContentType := getContentType(req)
|
||||||
|
|
||||||
// handle ACL's here at bucket level
|
// handle ACL's here at bucket level
|
||||||
if !api.isValidOp(w, req, acceptsContentType) {
|
if !api.isValidOp(w, req, acceptsContentType) {
|
||||||
@ -181,22 +246,38 @@ func (api Minio) NewMultipartUploadHandler(w http.ResponseWriter, req *http.Requ
|
|||||||
vars := mux.Vars(req)
|
vars := mux.Vars(req)
|
||||||
bucket = vars["bucket"]
|
bucket = vars["bucket"]
|
||||||
object = vars["object"]
|
object = vars["object"]
|
||||||
log.Println(bucket, object)
|
|
||||||
|
uploadID, err := api.Donut.NewMultipartUpload(bucket, object, "")
|
||||||
|
switch iodine.ToError(err).(type) {
|
||||||
|
case nil:
|
||||||
|
{
|
||||||
|
response := generateInitiateMultipartUploadResult(bucket, object, uploadID)
|
||||||
|
encodedSuccessResponse := encodeSuccessResponse(response, acceptsContentType)
|
||||||
|
// write headers
|
||||||
|
setCommonHeaders(w, getContentTypeString(acceptsContentType), len(encodedSuccessResponse))
|
||||||
|
// write body
|
||||||
|
w.Write(encodedSuccessResponse)
|
||||||
|
}
|
||||||
|
case donut.ObjectExists:
|
||||||
|
writeErrorResponse(w, req, MethodNotAllowed, acceptsContentType, req.URL.Path)
|
||||||
|
default:
|
||||||
|
log.Error.Println(iodine.New(err, nil))
|
||||||
|
writeErrorResponse(w, req, InternalError, acceptsContentType, req.URL.Path)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// PutObjectPartHandler - Upload part
|
// PutObjectPartHandler - Upload part
|
||||||
func (api Minio) PutObjectPartHandler(w http.ResponseWriter, req *http.Request) {
|
func (api Minio) PutObjectPartHandler(w http.ResponseWriter, req *http.Request) {
|
||||||
acceptsContentType := getContentType(req)
|
// Ticket master block
|
||||||
|
|
||||||
op := Operation{}
|
|
||||||
op.ProceedCh = make(chan struct{})
|
|
||||||
api.OP <- op
|
|
||||||
// block until Ticket master gives us a go
|
|
||||||
<-op.ProceedCh
|
|
||||||
{
|
{
|
||||||
// do you operation
|
op := Operation{}
|
||||||
|
op.ProceedCh = make(chan struct{})
|
||||||
|
api.OP <- op
|
||||||
|
// block until Ticket master gives us a go
|
||||||
|
<-op.ProceedCh
|
||||||
}
|
}
|
||||||
log.Println(acceptsContentType)
|
|
||||||
|
acceptsContentType := getContentType(req)
|
||||||
|
|
||||||
// handle ACL's here at bucket level
|
// handle ACL's here at bucket level
|
||||||
if !api.isValidOp(w, req, acceptsContentType) {
|
if !api.isValidOp(w, req, acceptsContentType) {
|
||||||
@ -232,7 +313,6 @@ func (api Minio) PutObjectPartHandler(w http.ResponseWriter, req *http.Request)
|
|||||||
vars := mux.Vars(req)
|
vars := mux.Vars(req)
|
||||||
bucket := vars["bucket"]
|
bucket := vars["bucket"]
|
||||||
object := vars["object"]
|
object := vars["object"]
|
||||||
log.Println(bucket, object, sizeInt64)
|
|
||||||
|
|
||||||
uploadID := req.URL.Query().Get("uploadId")
|
uploadID := req.URL.Query().Get("uploadId")
|
||||||
partIDString := req.URL.Query().Get("partNumber")
|
partIDString := req.URL.Query().Get("partNumber")
|
||||||
@ -241,22 +321,40 @@ func (api Minio) PutObjectPartHandler(w http.ResponseWriter, req *http.Request)
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
writeErrorResponse(w, req, InvalidPart, acceptsContentType, req.URL.Path)
|
writeErrorResponse(w, req, InvalidPart, acceptsContentType, req.URL.Path)
|
||||||
}
|
}
|
||||||
log.Println(uploadID, partID)
|
|
||||||
|
calculatedMD5, err := api.Donut.CreateObjectPart(bucket, object, uploadID, partID, "", md5, sizeInt64, req.Body)
|
||||||
|
switch iodine.ToError(err).(type) {
|
||||||
|
case nil:
|
||||||
|
w.Header().Set("ETag", calculatedMD5)
|
||||||
|
writeSuccessResponse(w, acceptsContentType)
|
||||||
|
case donut.InvalidUploadID:
|
||||||
|
writeErrorResponse(w, req, NoSuchUpload, acceptsContentType, req.URL.Path)
|
||||||
|
case donut.ObjectExists:
|
||||||
|
writeErrorResponse(w, req, MethodNotAllowed, acceptsContentType, req.URL.Path)
|
||||||
|
case donut.BadDigest:
|
||||||
|
writeErrorResponse(w, req, BadDigest, acceptsContentType, req.URL.Path)
|
||||||
|
case donut.EntityTooLarge:
|
||||||
|
writeErrorResponse(w, req, EntityTooLarge, acceptsContentType, req.URL.Path)
|
||||||
|
case donut.InvalidDigest:
|
||||||
|
writeErrorResponse(w, req, InvalidDigest, acceptsContentType, req.URL.Path)
|
||||||
|
default:
|
||||||
|
log.Error.Println(iodine.New(err, nil))
|
||||||
|
writeErrorResponse(w, req, InternalError, acceptsContentType, req.URL.Path)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// AbortMultipartUploadHandler - Abort multipart upload
|
// AbortMultipartUploadHandler - Abort multipart upload
|
||||||
func (api Minio) AbortMultipartUploadHandler(w http.ResponseWriter, req *http.Request) {
|
func (api Minio) AbortMultipartUploadHandler(w http.ResponseWriter, req *http.Request) {
|
||||||
acceptsContentType := getContentType(req)
|
// Ticket master block
|
||||||
|
|
||||||
op := Operation{}
|
|
||||||
op.ProceedCh = make(chan struct{})
|
|
||||||
api.OP <- op
|
|
||||||
// block until Ticket master gives us a go
|
|
||||||
<-op.ProceedCh
|
|
||||||
{
|
{
|
||||||
// do you operation
|
op := Operation{}
|
||||||
|
op.ProceedCh = make(chan struct{})
|
||||||
|
api.OP <- op
|
||||||
|
// block until Ticket master gives us a go
|
||||||
|
<-op.ProceedCh
|
||||||
}
|
}
|
||||||
log.Println(acceptsContentType)
|
|
||||||
|
acceptsContentType := getContentType(req)
|
||||||
|
|
||||||
// handle ACL's here at bucket level
|
// handle ACL's here at bucket level
|
||||||
if !api.isValidOp(w, req, acceptsContentType) {
|
if !api.isValidOp(w, req, acceptsContentType) {
|
||||||
@ -267,23 +365,33 @@ func (api Minio) AbortMultipartUploadHandler(w http.ResponseWriter, req *http.Re
|
|||||||
bucket := vars["bucket"]
|
bucket := vars["bucket"]
|
||||||
object := vars["object"]
|
object := vars["object"]
|
||||||
|
|
||||||
//objectResourcesMetadata := getObjectResources(req.URL.Query())
|
objectResourcesMetadata := getObjectResources(req.URL.Query())
|
||||||
log.Println(bucket, object)
|
|
||||||
|
err := api.Donut.AbortMultipartUpload(bucket, object, objectResourcesMetadata.UploadID)
|
||||||
|
switch iodine.ToError(err).(type) {
|
||||||
|
case nil:
|
||||||
|
setCommonHeaders(w, getContentTypeString(acceptsContentType), 0)
|
||||||
|
w.WriteHeader(http.StatusNoContent)
|
||||||
|
case donut.InvalidUploadID:
|
||||||
|
writeErrorResponse(w, req, NoSuchUpload, acceptsContentType, req.URL.Path)
|
||||||
|
default:
|
||||||
|
log.Error.Println(iodine.New(err, nil))
|
||||||
|
writeErrorResponse(w, req, InternalError, acceptsContentType, req.URL.Path)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// ListObjectPartsHandler - List object parts
|
// ListObjectPartsHandler - List object parts
|
||||||
func (api Minio) ListObjectPartsHandler(w http.ResponseWriter, req *http.Request) {
|
func (api Minio) ListObjectPartsHandler(w http.ResponseWriter, req *http.Request) {
|
||||||
acceptsContentType := getContentType(req)
|
// Ticket master block
|
||||||
|
|
||||||
op := Operation{}
|
|
||||||
op.ProceedCh = make(chan struct{})
|
|
||||||
api.OP <- op
|
|
||||||
// block until Ticket master gives us a go
|
|
||||||
<-op.ProceedCh
|
|
||||||
{
|
{
|
||||||
// do you operation
|
op := Operation{}
|
||||||
|
op.ProceedCh = make(chan struct{})
|
||||||
|
api.OP <- op
|
||||||
|
// block until Ticket master gives us a go
|
||||||
|
<-op.ProceedCh
|
||||||
}
|
}
|
||||||
log.Println(acceptsContentType)
|
|
||||||
|
acceptsContentType := getContentType(req)
|
||||||
|
|
||||||
// handle ACL's here at bucket level
|
// handle ACL's here at bucket level
|
||||||
if !api.isValidOp(w, req, acceptsContentType) {
|
if !api.isValidOp(w, req, acceptsContentType) {
|
||||||
@ -298,22 +406,38 @@ func (api Minio) ListObjectPartsHandler(w http.ResponseWriter, req *http.Request
|
|||||||
vars := mux.Vars(req)
|
vars := mux.Vars(req)
|
||||||
bucket := vars["bucket"]
|
bucket := vars["bucket"]
|
||||||
object := vars["object"]
|
object := vars["object"]
|
||||||
log.Println(bucket, object)
|
|
||||||
|
objectResourcesMetadata, err := api.Donut.ListObjectParts(bucket, object, objectResourcesMetadata)
|
||||||
|
switch iodine.ToError(err).(type) {
|
||||||
|
case nil:
|
||||||
|
{
|
||||||
|
response := generateListPartsResult(objectResourcesMetadata)
|
||||||
|
encodedSuccessResponse := encodeSuccessResponse(response, acceptsContentType)
|
||||||
|
// write headers
|
||||||
|
setCommonHeaders(w, getContentTypeString(acceptsContentType), len(encodedSuccessResponse))
|
||||||
|
// write body
|
||||||
|
w.Write(encodedSuccessResponse)
|
||||||
|
}
|
||||||
|
case donut.InvalidUploadID:
|
||||||
|
writeErrorResponse(w, req, NoSuchUpload, acceptsContentType, req.URL.Path)
|
||||||
|
default:
|
||||||
|
log.Error.Println(iodine.New(err, nil))
|
||||||
|
writeErrorResponse(w, req, InternalError, acceptsContentType, req.URL.Path)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// CompleteMultipartUploadHandler - Complete multipart upload
|
// CompleteMultipartUploadHandler - Complete multipart upload
|
||||||
func (api Minio) CompleteMultipartUploadHandler(w http.ResponseWriter, req *http.Request) {
|
func (api Minio) CompleteMultipartUploadHandler(w http.ResponseWriter, req *http.Request) {
|
||||||
acceptsContentType := getContentType(req)
|
// Ticket master block
|
||||||
|
|
||||||
op := Operation{}
|
|
||||||
op.ProceedCh = make(chan struct{})
|
|
||||||
api.OP <- op
|
|
||||||
// block until Ticket master gives us a go
|
|
||||||
<-op.ProceedCh
|
|
||||||
{
|
{
|
||||||
// do you operation
|
op := Operation{}
|
||||||
|
op.ProceedCh = make(chan struct{})
|
||||||
|
api.OP <- op
|
||||||
|
// block until Ticket master gives us a go
|
||||||
|
<-op.ProceedCh
|
||||||
}
|
}
|
||||||
log.Println(acceptsContentType)
|
|
||||||
|
acceptsContentType := getContentType(req)
|
||||||
|
|
||||||
// handle ACL's here at bucket level
|
// handle ACL's here at bucket level
|
||||||
if !api.isValidOp(w, req, acceptsContentType) {
|
if !api.isValidOp(w, req, acceptsContentType) {
|
||||||
@ -336,15 +460,31 @@ func (api Minio) CompleteMultipartUploadHandler(w http.ResponseWriter, req *http
|
|||||||
vars := mux.Vars(req)
|
vars := mux.Vars(req)
|
||||||
bucket := vars["bucket"]
|
bucket := vars["bucket"]
|
||||||
object := vars["object"]
|
object := vars["object"]
|
||||||
log.Println(bucket, object)
|
|
||||||
|
|
||||||
//objectResourcesMetadata := getObjectResources(req.URL.Query())
|
objectResourcesMetadata := getObjectResources(req.URL.Query())
|
||||||
|
|
||||||
partMap := make(map[int]string)
|
partMap := make(map[int]string)
|
||||||
for _, part := range parts.Part {
|
for _, part := range parts.Part {
|
||||||
partMap[part.PartNumber] = part.ETag
|
partMap[part.PartNumber] = part.ETag
|
||||||
}
|
}
|
||||||
|
|
||||||
|
metadata, err := api.Donut.CompleteMultipartUpload(bucket, object, objectResourcesMetadata.UploadID, partMap)
|
||||||
|
switch iodine.ToError(err).(type) {
|
||||||
|
case nil:
|
||||||
|
{
|
||||||
|
response := generateCompleteMultpartUploadResult(bucket, object, "", metadata.MD5Sum)
|
||||||
|
encodedSuccessResponse := encodeSuccessResponse(response, acceptsContentType)
|
||||||
|
// write headers
|
||||||
|
setCommonHeaders(w, getContentTypeString(acceptsContentType), len(encodedSuccessResponse))
|
||||||
|
// write body
|
||||||
|
w.Write(encodedSuccessResponse)
|
||||||
|
}
|
||||||
|
case donut.InvalidUploadID:
|
||||||
|
writeErrorResponse(w, req, NoSuchUpload, acceptsContentType, req.URL.Path)
|
||||||
|
default:
|
||||||
|
log.Error.Println(iodine.New(err, nil))
|
||||||
|
writeErrorResponse(w, req, InternalError, acceptsContentType, req.URL.Path)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Delete API
|
/// Delete API
|
||||||
|
@ -24,9 +24,9 @@ import (
|
|||||||
"github.com/minio/minio/pkg/server/rpc"
|
"github.com/minio/minio/pkg/server/rpc"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// Get api
|
||||||
func getAPI() api.Minio {
|
func getAPI() api.Minio {
|
||||||
a := api.New()
|
return api.New()
|
||||||
return a
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// registerAPI - register all the object API handlers to their respective paths
|
// registerAPI - register all the object API handlers to their respective paths
|
||||||
@ -82,15 +82,15 @@ func registerChain(handlers ...handlerFunc) chain {
|
|||||||
return ch
|
return ch
|
||||||
}
|
}
|
||||||
|
|
||||||
// registerOtherMiddleware register all available middleware
|
// registerCustomMiddleware register all available custom middleware
|
||||||
func registerOtherMiddleware(mux http.Handler, conf api.Config) http.Handler {
|
func registerCustomMiddleware(mux http.Handler, conf api.Config) http.Handler {
|
||||||
ch := registerChain(
|
ch := registerChain(
|
||||||
api.ValidContentTypeHandler,
|
api.ValidContentTypeHandler,
|
||||||
api.TimeValidityHandler,
|
api.TimeValidityHandler,
|
||||||
api.IgnoreResourcesHandler,
|
api.IgnoreResourcesHandler,
|
||||||
api.ValidateAuthHeaderHandler,
|
api.ValidateAuthHeaderHandler,
|
||||||
api.LoggingHandler,
|
api.LoggingHandler,
|
||||||
// Add new middleware here
|
// Add new your new middleware here
|
||||||
)
|
)
|
||||||
|
|
||||||
mux = ch.final(mux)
|
mux = ch.final(mux)
|
||||||
@ -109,7 +109,7 @@ func getAPIHandler(conf api.Config) (http.Handler, api.Minio) {
|
|||||||
mux := router.NewRouter()
|
mux := router.NewRouter()
|
||||||
minioAPI := getAPI()
|
minioAPI := getAPI()
|
||||||
apiHandler := registerAPI(mux, minioAPI)
|
apiHandler := registerAPI(mux, minioAPI)
|
||||||
apiHandler = registerOtherMiddleware(apiHandler, conf)
|
apiHandler = registerCustomMiddleware(apiHandler, conf)
|
||||||
return apiHandler, minioAPI
|
return apiHandler, minioAPI
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -120,6 +120,6 @@ func getRPCHandler() http.Handler {
|
|||||||
s.RegisterService(new(rpc.HelloService), "")
|
s.RegisterService(new(rpc.HelloService), "")
|
||||||
s.RegisterService(new(rpc.VersionService), "")
|
s.RegisterService(new(rpc.VersionService), "")
|
||||||
s.RegisterService(new(rpc.GetSysInfoService), "")
|
s.RegisterService(new(rpc.GetSysInfoService), "")
|
||||||
// add more services here
|
// Add new services here
|
||||||
return registerRPC(router.NewRouter(), s)
|
return registerRPC(router.NewRouter(), s)
|
||||||
}
|
}
|
||||||
|
@ -25,6 +25,7 @@ import (
|
|||||||
"github.com/minio/minio/pkg/server/api"
|
"github.com/minio/minio/pkg/server/api"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// Start API listener
|
||||||
func startAPI(errCh chan error, conf api.Config, apiHandler http.Handler) {
|
func startAPI(errCh chan error, conf api.Config, apiHandler http.Handler) {
|
||||||
defer close(errCh)
|
defer close(errCh)
|
||||||
|
|
||||||
@ -74,6 +75,7 @@ func startAPI(errCh chan error, conf api.Config, apiHandler http.Handler) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Start RPC listener
|
||||||
func startRPC(errCh chan error, rpcHandler http.Handler) {
|
func startRPC(errCh chan error, rpcHandler http.Handler) {
|
||||||
defer close(errCh)
|
defer close(errCh)
|
||||||
|
|
||||||
@ -86,10 +88,11 @@ func startRPC(errCh chan error, rpcHandler http.Handler) {
|
|||||||
errCh <- httpServer.ListenAndServe()
|
errCh <- httpServer.ListenAndServe()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Start ticket master
|
||||||
func startTM(a api.Minio) {
|
func startTM(a api.Minio) {
|
||||||
for {
|
for {
|
||||||
for op := range a.OP {
|
for op := range a.OP {
|
||||||
close(op.ProceedCh)
|
op.ProceedCh <- struct{}{}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -101,8 +104,7 @@ func StartServices(conf api.Config) error {
|
|||||||
|
|
||||||
apiHandler, minioAPI := getAPIHandler(conf)
|
apiHandler, minioAPI := getAPIHandler(conf)
|
||||||
go startAPI(apiErrCh, conf, apiHandler)
|
go startAPI(apiErrCh, conf, apiHandler)
|
||||||
rpcHandler := getRPCHandler()
|
go startRPC(rpcErrCh, getRPCHandler())
|
||||||
go startRPC(rpcErrCh, rpcHandler)
|
|
||||||
go startTM(minioAPI)
|
go startTM(minioAPI)
|
||||||
|
|
||||||
select {
|
select {
|
||||||
|
@ -57,6 +57,7 @@ func newBucket(bucketName, aclType, donutName string, nodes map[string]node) (bu
|
|||||||
"donutName": donutName,
|
"donutName": donutName,
|
||||||
"aclType": aclType,
|
"aclType": aclType,
|
||||||
}
|
}
|
||||||
|
|
||||||
if strings.TrimSpace(bucketName) == "" || strings.TrimSpace(donutName) == "" {
|
if strings.TrimSpace(bucketName) == "" || strings.TrimSpace(donutName) == "" {
|
||||||
return bucket{}, BucketMetadata{}, iodine.New(InvalidArgument{}, errParams)
|
return bucket{}, BucketMetadata{}, iodine.New(InvalidArgument{}, errParams)
|
||||||
}
|
}
|
||||||
@ -130,7 +131,7 @@ func (b bucket) GetObjectMetadata(objectName string) (ObjectMetadata, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// ListObjects - list all objects
|
// ListObjects - list all objects
|
||||||
func (b bucket) ListObjects(prefix, marker, delimiter string, maxkeys int) (ListObjects, error) {
|
func (b bucket) ListObjects(prefix, marker, delimiter string, maxkeys int) (ListObjectsResults, error) {
|
||||||
b.lock.RLock()
|
b.lock.RLock()
|
||||||
defer b.lock.RUnlock()
|
defer b.lock.RUnlock()
|
||||||
if maxkeys <= 0 {
|
if maxkeys <= 0 {
|
||||||
@ -140,7 +141,7 @@ func (b bucket) ListObjects(prefix, marker, delimiter string, maxkeys int) (List
|
|||||||
var objects []string
|
var objects []string
|
||||||
bucketMetadata, err := b.getBucketMetadata()
|
bucketMetadata, err := b.getBucketMetadata()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return ListObjects{}, iodine.New(err, nil)
|
return ListObjectsResults{}, iodine.New(err, nil)
|
||||||
}
|
}
|
||||||
for objectName := range bucketMetadata.Buckets[b.getBucketName()].BucketObjects {
|
for objectName := range bucketMetadata.Buckets[b.getBucketName()].BucketObjects {
|
||||||
if strings.HasPrefix(objectName, strings.TrimSpace(prefix)) {
|
if strings.HasPrefix(objectName, strings.TrimSpace(prefix)) {
|
||||||
@ -181,7 +182,7 @@ func (b bucket) ListObjects(prefix, marker, delimiter string, maxkeys int) (List
|
|||||||
commonPrefixes = RemoveDuplicates(commonPrefixes)
|
commonPrefixes = RemoveDuplicates(commonPrefixes)
|
||||||
sort.Strings(commonPrefixes)
|
sort.Strings(commonPrefixes)
|
||||||
|
|
||||||
listObjects := ListObjects{}
|
listObjects := ListObjectsResults{}
|
||||||
listObjects.Objects = make(map[string]ObjectMetadata)
|
listObjects.Objects = make(map[string]ObjectMetadata)
|
||||||
listObjects.CommonPrefixes = commonPrefixes
|
listObjects.CommonPrefixes = commonPrefixes
|
||||||
listObjects.IsTruncated = isTruncated
|
listObjects.IsTruncated = isTruncated
|
||||||
@ -189,7 +190,7 @@ func (b bucket) ListObjects(prefix, marker, delimiter string, maxkeys int) (List
|
|||||||
for _, objectName := range results {
|
for _, objectName := range results {
|
||||||
objMetadata, err := b.readObjectMetadata(normalizeObjectName(objectName))
|
objMetadata, err := b.readObjectMetadata(normalizeObjectName(objectName))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return ListObjects{}, iodine.New(err, nil)
|
return ListObjectsResults{}, iodine.New(err, nil)
|
||||||
}
|
}
|
||||||
listObjects.Objects[objectName] = objMetadata
|
listObjects.Objects[objectName] = objMetadata
|
||||||
}
|
}
|
||||||
|
@ -19,10 +19,31 @@ package donut
|
|||||||
import (
|
import (
|
||||||
"bufio"
|
"bufio"
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"io"
|
||||||
"sort"
|
"sort"
|
||||||
"strings"
|
"strings"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// ProxyWriter implements io.Writer to trap written bytes
|
||||||
|
type ProxyWriter struct {
|
||||||
|
writer io.Writer
|
||||||
|
writtenBytes []byte
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *ProxyWriter) Write(p []byte) (n int, err error) {
|
||||||
|
n, err = r.writer.Write(p)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
r.writtenBytes = append(r.writtenBytes, p[0:n]...)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewProxyWriter - wrap around a given writer with ProxyWriter
|
||||||
|
func NewProxyWriter(w io.Writer) *ProxyWriter {
|
||||||
|
return &ProxyWriter{writer: w, writtenBytes: nil}
|
||||||
|
}
|
||||||
|
|
||||||
// Delimiter delims the string at delimiter
|
// Delimiter delims the string at delimiter
|
||||||
func Delimiter(object, delimiter string) string {
|
func Delimiter(object, delimiter string) string {
|
||||||
readBuffer := bytes.NewBufferString(object)
|
readBuffer := bytes.NewBufferString(object)
|
||||||
|
87
pkg/storage/donut/config.go
Normal file
87
pkg/storage/donut/config.go
Normal file
@ -0,0 +1,87 @@
|
|||||||
|
/*
|
||||||
|
* Minimalist Object Storage, (C) 2015 Minio, Inc.
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package donut
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os/user"
|
||||||
|
"path/filepath"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/minio/minio/pkg/iodine"
|
||||||
|
"github.com/minio/minio/pkg/quick"
|
||||||
|
)
|
||||||
|
|
||||||
|
// getDonutConfigPath get donut config file path
|
||||||
|
func getDonutConfigPath() (string, error) {
|
||||||
|
u, err := user.Current()
|
||||||
|
if err != nil {
|
||||||
|
return "", iodine.New(err, nil)
|
||||||
|
}
|
||||||
|
donutConfigPath := filepath.Join(u.HomeDir, ".minio", "donut.json")
|
||||||
|
return donutConfigPath, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// SaveConfig save donut config
|
||||||
|
func SaveConfig(a *Config) error {
|
||||||
|
donutConfigPath, err := getDonutConfigPath()
|
||||||
|
if err != nil {
|
||||||
|
return iodine.New(err, nil)
|
||||||
|
}
|
||||||
|
qc, err := quick.New(a)
|
||||||
|
if err != nil {
|
||||||
|
return iodine.New(err, nil)
|
||||||
|
}
|
||||||
|
if err := qc.Save(donutConfigPath); err != nil {
|
||||||
|
return iodine.New(err, nil)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// LoadConfig load donut config
|
||||||
|
func LoadConfig() (*Config, error) {
|
||||||
|
donutConfigPath, err := getDonutConfigPath()
|
||||||
|
if err != nil {
|
||||||
|
return nil, iodine.New(err, nil)
|
||||||
|
}
|
||||||
|
a := &Config{}
|
||||||
|
a.Version = "0.0.1"
|
||||||
|
qc, err := quick.New(a)
|
||||||
|
if err != nil {
|
||||||
|
return nil, iodine.New(err, nil)
|
||||||
|
}
|
||||||
|
if err := qc.Load(donutConfigPath); err != nil {
|
||||||
|
return nil, iodine.New(err, nil)
|
||||||
|
}
|
||||||
|
return qc.Data().(*Config), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// LoadDonut load donut from config
|
||||||
|
func LoadDonut() (Interface, error) {
|
||||||
|
conf, err := LoadConfig()
|
||||||
|
if err != nil {
|
||||||
|
conf = &Config{
|
||||||
|
Version: "0.0.1",
|
||||||
|
MaxSize: 512000000,
|
||||||
|
Expiration: 1 * time.Hour,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
donut, err := New(conf)
|
||||||
|
if err != nil {
|
||||||
|
return nil, iodine.New(err, nil)
|
||||||
|
}
|
||||||
|
return donut, nil
|
||||||
|
}
|
@ -65,8 +65,8 @@ type BucketMetadata struct {
|
|||||||
BucketObjects map[string]interface{} `json:"objects"`
|
BucketObjects map[string]interface{} `json:"objects"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// ListObjects container for list objects response
|
// ListObjectsResults container for list objects response
|
||||||
type ListObjects struct {
|
type ListObjectsResults struct {
|
||||||
Objects map[string]ObjectMetadata `json:"objects"`
|
Objects map[string]ObjectMetadata `json:"objects"`
|
||||||
CommonPrefixes []string `json:"commonPrefixes"`
|
CommonPrefixes []string `json:"commonPrefixes"`
|
||||||
IsTruncated bool `json:"isTruncated"`
|
IsTruncated bool `json:"isTruncated"`
|
||||||
|
@ -24,19 +24,10 @@ import (
|
|||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
|
||||||
|
|
||||||
"github.com/minio/minio/pkg/iodine"
|
"github.com/minio/minio/pkg/iodine"
|
||||||
)
|
)
|
||||||
|
|
||||||
// donut struct internal data
|
|
||||||
type donut struct {
|
|
||||||
name string
|
|
||||||
buckets map[string]bucket
|
|
||||||
nodes map[string]node
|
|
||||||
lock *sync.RWMutex
|
|
||||||
}
|
|
||||||
|
|
||||||
// config files used inside Donut
|
// config files used inside Donut
|
||||||
const (
|
const (
|
||||||
// donut system config
|
// donut system config
|
||||||
@ -51,75 +42,41 @@ const (
|
|||||||
bucketMetadataVersion = "1.0.0"
|
bucketMetadataVersion = "1.0.0"
|
||||||
)
|
)
|
||||||
|
|
||||||
// attachDonutNode - wrapper function to instantiate a new node for associatedt donut
|
// makeBucket - make a new bucket
|
||||||
// based on the provided configuration
|
func (donut API) makeBucket(bucket string, acl BucketACL) error {
|
||||||
func (dt donut) attachDonutNode(hostname string, disks []string) error {
|
donut.lock.Lock()
|
||||||
if err := dt.AttachNode(hostname, disks); err != nil {
|
defer donut.lock.Unlock()
|
||||||
return iodine.New(err, nil)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewDonut - instantiate a new donut
|
|
||||||
func NewDonut(donutName string, nodeDiskMap map[string][]string) (Donut, error) {
|
|
||||||
if donutName == "" || len(nodeDiskMap) == 0 {
|
|
||||||
return nil, iodine.New(InvalidArgument{}, nil)
|
|
||||||
}
|
|
||||||
nodes := make(map[string]node)
|
|
||||||
buckets := make(map[string]bucket)
|
|
||||||
d := donut{
|
|
||||||
name: donutName,
|
|
||||||
nodes: nodes,
|
|
||||||
buckets: buckets,
|
|
||||||
lock: new(sync.RWMutex),
|
|
||||||
}
|
|
||||||
for k, v := range nodeDiskMap {
|
|
||||||
if len(v) == 0 {
|
|
||||||
return nil, iodine.New(InvalidDisksArgument{}, nil)
|
|
||||||
}
|
|
||||||
err := d.attachDonutNode(k, v)
|
|
||||||
if err != nil {
|
|
||||||
return nil, iodine.New(err, nil)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return d, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// MakeBucket - make a new bucket
|
|
||||||
func (dt donut) MakeBucket(bucket string, acl BucketACL) error {
|
|
||||||
dt.lock.Lock()
|
|
||||||
defer dt.lock.Unlock()
|
|
||||||
if bucket == "" || strings.TrimSpace(bucket) == "" {
|
if bucket == "" || strings.TrimSpace(bucket) == "" {
|
||||||
return iodine.New(InvalidArgument{}, nil)
|
return iodine.New(InvalidArgument{}, nil)
|
||||||
}
|
}
|
||||||
return dt.makeDonutBucket(bucket, acl.String())
|
return donut.makeDonutBucket(bucket, acl.String())
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetBucketMetadata - get bucket metadata
|
// getBucketMetadata - get bucket metadata
|
||||||
func (dt donut) GetBucketMetadata(bucketName string) (BucketMetadata, error) {
|
func (donut API) getBucketMetadata(bucketName string) (BucketMetadata, error) {
|
||||||
dt.lock.RLock()
|
donut.lock.RLock()
|
||||||
defer dt.lock.RUnlock()
|
defer donut.lock.RUnlock()
|
||||||
if err := dt.listDonutBuckets(); err != nil {
|
if err := donut.listDonutBuckets(); err != nil {
|
||||||
return BucketMetadata{}, iodine.New(err, nil)
|
return BucketMetadata{}, iodine.New(err, nil)
|
||||||
}
|
}
|
||||||
if _, ok := dt.buckets[bucketName]; !ok {
|
if _, ok := donut.buckets[bucketName]; !ok {
|
||||||
return BucketMetadata{}, iodine.New(BucketNotFound{Bucket: bucketName}, nil)
|
return BucketMetadata{}, iodine.New(BucketNotFound{Bucket: bucketName}, nil)
|
||||||
}
|
}
|
||||||
metadata, err := dt.getDonutBucketMetadata()
|
metadata, err := donut.getDonutBucketMetadata()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return BucketMetadata{}, iodine.New(err, nil)
|
return BucketMetadata{}, iodine.New(err, nil)
|
||||||
}
|
}
|
||||||
return metadata.Buckets[bucketName], nil
|
return metadata.Buckets[bucketName], nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetBucketMetadata - set bucket metadata
|
// setBucketMetadata - set bucket metadata
|
||||||
func (dt donut) SetBucketMetadata(bucketName string, bucketMetadata map[string]string) error {
|
func (donut API) setBucketMetadata(bucketName string, bucketMetadata map[string]string) error {
|
||||||
dt.lock.Lock()
|
donut.lock.Lock()
|
||||||
defer dt.lock.Unlock()
|
defer donut.lock.Unlock()
|
||||||
if err := dt.listDonutBuckets(); err != nil {
|
if err := donut.listDonutBuckets(); err != nil {
|
||||||
return iodine.New(err, nil)
|
return iodine.New(err, nil)
|
||||||
}
|
}
|
||||||
metadata, err := dt.getDonutBucketMetadata()
|
metadata, err := donut.getDonutBucketMetadata()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return iodine.New(err, nil)
|
return iodine.New(err, nil)
|
||||||
}
|
}
|
||||||
@ -130,17 +87,17 @@ func (dt donut) SetBucketMetadata(bucketName string, bucketMetadata map[string]s
|
|||||||
}
|
}
|
||||||
oldBucketMetadata.ACL = BucketACL(acl)
|
oldBucketMetadata.ACL = BucketACL(acl)
|
||||||
metadata.Buckets[bucketName] = oldBucketMetadata
|
metadata.Buckets[bucketName] = oldBucketMetadata
|
||||||
return dt.setDonutBucketMetadata(metadata)
|
return donut.setDonutBucketMetadata(metadata)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ListBuckets - return list of buckets
|
// listBuckets - return list of buckets
|
||||||
func (dt donut) ListBuckets() (map[string]BucketMetadata, error) {
|
func (donut API) listBuckets() (map[string]BucketMetadata, error) {
|
||||||
dt.lock.RLock()
|
donut.lock.RLock()
|
||||||
defer dt.lock.RUnlock()
|
defer donut.lock.RUnlock()
|
||||||
if err := dt.listDonutBuckets(); err != nil {
|
if err := donut.listDonutBuckets(); err != nil {
|
||||||
return nil, iodine.New(err, nil)
|
return nil, iodine.New(err, nil)
|
||||||
}
|
}
|
||||||
metadata, err := dt.getDonutBucketMetadata()
|
metadata, err := donut.getDonutBucketMetadata()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// intentionally left out the error when Donut is empty
|
// intentionally left out the error when Donut is empty
|
||||||
// but we need to revisit this area in future - since we need
|
// but we need to revisit this area in future - since we need
|
||||||
@ -150,10 +107,10 @@ func (dt donut) ListBuckets() (map[string]BucketMetadata, error) {
|
|||||||
return metadata.Buckets, nil
|
return metadata.Buckets, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// ListObjects - return list of objects
|
// listObjects - return list of objects
|
||||||
func (dt donut) ListObjects(bucket, prefix, marker, delimiter string, maxkeys int) (ListObjects, error) {
|
func (donut API) listObjects(bucket, prefix, marker, delimiter string, maxkeys int) (ListObjectsResults, error) {
|
||||||
dt.lock.RLock()
|
donut.lock.RLock()
|
||||||
defer dt.lock.RUnlock()
|
defer donut.lock.RUnlock()
|
||||||
errParams := map[string]string{
|
errParams := map[string]string{
|
||||||
"bucket": bucket,
|
"bucket": bucket,
|
||||||
"prefix": prefix,
|
"prefix": prefix,
|
||||||
@ -161,23 +118,23 @@ func (dt donut) ListObjects(bucket, prefix, marker, delimiter string, maxkeys in
|
|||||||
"delimiter": delimiter,
|
"delimiter": delimiter,
|
||||||
"maxkeys": strconv.Itoa(maxkeys),
|
"maxkeys": strconv.Itoa(maxkeys),
|
||||||
}
|
}
|
||||||
if err := dt.listDonutBuckets(); err != nil {
|
if err := donut.listDonutBuckets(); err != nil {
|
||||||
return ListObjects{}, iodine.New(err, errParams)
|
return ListObjectsResults{}, iodine.New(err, errParams)
|
||||||
}
|
}
|
||||||
if _, ok := dt.buckets[bucket]; !ok {
|
if _, ok := donut.buckets[bucket]; !ok {
|
||||||
return ListObjects{}, iodine.New(BucketNotFound{Bucket: bucket}, errParams)
|
return ListObjectsResults{}, iodine.New(BucketNotFound{Bucket: bucket}, errParams)
|
||||||
}
|
}
|
||||||
listObjects, err := dt.buckets[bucket].ListObjects(prefix, marker, delimiter, maxkeys)
|
listObjects, err := donut.buckets[bucket].ListObjects(prefix, marker, delimiter, maxkeys)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return ListObjects{}, iodine.New(err, errParams)
|
return ListObjectsResults{}, iodine.New(err, errParams)
|
||||||
}
|
}
|
||||||
return listObjects, nil
|
return listObjects, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// PutObject - put object
|
// putObject - put object
|
||||||
func (dt donut) PutObject(bucket, object, expectedMD5Sum string, reader io.Reader, metadata map[string]string) (ObjectMetadata, error) {
|
func (donut API) putObject(bucket, object, expectedMD5Sum string, reader io.Reader, metadata map[string]string) (ObjectMetadata, error) {
|
||||||
dt.lock.Lock()
|
donut.lock.Lock()
|
||||||
defer dt.lock.Unlock()
|
defer donut.lock.Unlock()
|
||||||
errParams := map[string]string{
|
errParams := map[string]string{
|
||||||
"bucket": bucket,
|
"bucket": bucket,
|
||||||
"object": object,
|
"object": object,
|
||||||
@ -188,34 +145,34 @@ func (dt donut) PutObject(bucket, object, expectedMD5Sum string, reader io.Reade
|
|||||||
if object == "" || strings.TrimSpace(object) == "" {
|
if object == "" || strings.TrimSpace(object) == "" {
|
||||||
return ObjectMetadata{}, iodine.New(InvalidArgument{}, errParams)
|
return ObjectMetadata{}, iodine.New(InvalidArgument{}, errParams)
|
||||||
}
|
}
|
||||||
if err := dt.listDonutBuckets(); err != nil {
|
if err := donut.listDonutBuckets(); err != nil {
|
||||||
return ObjectMetadata{}, iodine.New(err, errParams)
|
return ObjectMetadata{}, iodine.New(err, errParams)
|
||||||
}
|
}
|
||||||
if _, ok := dt.buckets[bucket]; !ok {
|
if _, ok := donut.buckets[bucket]; !ok {
|
||||||
return ObjectMetadata{}, iodine.New(BucketNotFound{Bucket: bucket}, nil)
|
return ObjectMetadata{}, iodine.New(BucketNotFound{Bucket: bucket}, nil)
|
||||||
}
|
}
|
||||||
bucketMeta, err := dt.getDonutBucketMetadata()
|
bucketMeta, err := donut.getDonutBucketMetadata()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return ObjectMetadata{}, iodine.New(err, errParams)
|
return ObjectMetadata{}, iodine.New(err, errParams)
|
||||||
}
|
}
|
||||||
if _, ok := bucketMeta.Buckets[bucket].BucketObjects[object]; ok {
|
if _, ok := bucketMeta.Buckets[bucket].BucketObjects[object]; ok {
|
||||||
return ObjectMetadata{}, iodine.New(ObjectExists{Object: object}, errParams)
|
return ObjectMetadata{}, iodine.New(ObjectExists{Object: object}, errParams)
|
||||||
}
|
}
|
||||||
objMetadata, err := dt.buckets[bucket].WriteObject(object, reader, expectedMD5Sum, metadata)
|
objMetadata, err := donut.buckets[bucket].WriteObject(object, reader, expectedMD5Sum, metadata)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return ObjectMetadata{}, iodine.New(err, errParams)
|
return ObjectMetadata{}, iodine.New(err, errParams)
|
||||||
}
|
}
|
||||||
bucketMeta.Buckets[bucket].BucketObjects[object] = 1
|
bucketMeta.Buckets[bucket].BucketObjects[object] = 1
|
||||||
if err := dt.setDonutBucketMetadata(bucketMeta); err != nil {
|
if err := donut.setDonutBucketMetadata(bucketMeta); err != nil {
|
||||||
return ObjectMetadata{}, iodine.New(err, errParams)
|
return ObjectMetadata{}, iodine.New(err, errParams)
|
||||||
}
|
}
|
||||||
return objMetadata, nil
|
return objMetadata, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetObject - get object
|
// getObject - get object
|
||||||
func (dt donut) GetObject(bucket, object string) (reader io.ReadCloser, size int64, err error) {
|
func (donut API) getObject(bucket, object string) (reader io.ReadCloser, size int64, err error) {
|
||||||
dt.lock.RLock()
|
donut.lock.RLock()
|
||||||
defer dt.lock.RUnlock()
|
defer donut.lock.RUnlock()
|
||||||
errParams := map[string]string{
|
errParams := map[string]string{
|
||||||
"bucket": bucket,
|
"bucket": bucket,
|
||||||
"object": object,
|
"object": object,
|
||||||
@ -226,37 +183,37 @@ func (dt donut) GetObject(bucket, object string) (reader io.ReadCloser, size int
|
|||||||
if object == "" || strings.TrimSpace(object) == "" {
|
if object == "" || strings.TrimSpace(object) == "" {
|
||||||
return nil, 0, iodine.New(InvalidArgument{}, errParams)
|
return nil, 0, iodine.New(InvalidArgument{}, errParams)
|
||||||
}
|
}
|
||||||
if err := dt.listDonutBuckets(); err != nil {
|
if err := donut.listDonutBuckets(); err != nil {
|
||||||
return nil, 0, iodine.New(err, nil)
|
return nil, 0, iodine.New(err, nil)
|
||||||
}
|
}
|
||||||
if _, ok := dt.buckets[bucket]; !ok {
|
if _, ok := donut.buckets[bucket]; !ok {
|
||||||
return nil, 0, iodine.New(BucketNotFound{Bucket: bucket}, errParams)
|
return nil, 0, iodine.New(BucketNotFound{Bucket: bucket}, errParams)
|
||||||
}
|
}
|
||||||
return dt.buckets[bucket].ReadObject(object)
|
return donut.buckets[bucket].ReadObject(object)
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetObjectMetadata - get object metadata
|
// getObjectMetadata - get object metadata
|
||||||
func (dt donut) GetObjectMetadata(bucket, object string) (ObjectMetadata, error) {
|
func (donut API) getObjectMetadata(bucket, object string) (ObjectMetadata, error) {
|
||||||
dt.lock.RLock()
|
donut.lock.RLock()
|
||||||
defer dt.lock.RUnlock()
|
defer donut.lock.RUnlock()
|
||||||
errParams := map[string]string{
|
errParams := map[string]string{
|
||||||
"bucket": bucket,
|
"bucket": bucket,
|
||||||
"object": object,
|
"object": object,
|
||||||
}
|
}
|
||||||
if err := dt.listDonutBuckets(); err != nil {
|
if err := donut.listDonutBuckets(); err != nil {
|
||||||
return ObjectMetadata{}, iodine.New(err, errParams)
|
return ObjectMetadata{}, iodine.New(err, errParams)
|
||||||
}
|
}
|
||||||
if _, ok := dt.buckets[bucket]; !ok {
|
if _, ok := donut.buckets[bucket]; !ok {
|
||||||
return ObjectMetadata{}, iodine.New(BucketNotFound{Bucket: bucket}, errParams)
|
return ObjectMetadata{}, iodine.New(BucketNotFound{Bucket: bucket}, errParams)
|
||||||
}
|
}
|
||||||
bucketMeta, err := dt.getDonutBucketMetadata()
|
bucketMeta, err := donut.getDonutBucketMetadata()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return ObjectMetadata{}, iodine.New(err, errParams)
|
return ObjectMetadata{}, iodine.New(err, errParams)
|
||||||
}
|
}
|
||||||
if _, ok := bucketMeta.Buckets[bucket].BucketObjects[object]; !ok {
|
if _, ok := bucketMeta.Buckets[bucket].BucketObjects[object]; !ok {
|
||||||
return ObjectMetadata{}, iodine.New(ObjectNotFound{Object: object}, errParams)
|
return ObjectMetadata{}, iodine.New(ObjectNotFound{Object: object}, errParams)
|
||||||
}
|
}
|
||||||
objectMetadata, err := dt.buckets[bucket].GetObjectMetadata(object)
|
objectMetadata, err := donut.buckets[bucket].GetObjectMetadata(object)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return ObjectMetadata{}, iodine.New(err, nil)
|
return ObjectMetadata{}, iodine.New(err, nil)
|
||||||
}
|
}
|
||||||
@ -264,16 +221,16 @@ func (dt donut) GetObjectMetadata(bucket, object string) (ObjectMetadata, error)
|
|||||||
}
|
}
|
||||||
|
|
||||||
// getDiskWriters -
|
// getDiskWriters -
|
||||||
func (dt donut) getBucketMetadataWriters() ([]io.WriteCloser, error) {
|
func (donut API) getBucketMetadataWriters() ([]io.WriteCloser, error) {
|
||||||
var writers []io.WriteCloser
|
var writers []io.WriteCloser
|
||||||
for _, node := range dt.nodes {
|
for _, node := range donut.nodes {
|
||||||
disks, err := node.ListDisks()
|
disks, err := node.ListDisks()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, iodine.New(err, nil)
|
return nil, iodine.New(err, nil)
|
||||||
}
|
}
|
||||||
writers = make([]io.WriteCloser, len(disks))
|
writers = make([]io.WriteCloser, len(disks))
|
||||||
for order, d := range disks {
|
for order, d := range disks {
|
||||||
bucketMetaDataWriter, err := d.CreateFile(filepath.Join(dt.name, bucketMetadataConfig))
|
bucketMetaDataWriter, err := d.CreateFile(filepath.Join(donut.config.DonutName, bucketMetadataConfig))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, iodine.New(err, nil)
|
return nil, iodine.New(err, nil)
|
||||||
}
|
}
|
||||||
@ -283,16 +240,16 @@ func (dt donut) getBucketMetadataWriters() ([]io.WriteCloser, error) {
|
|||||||
return writers, nil
|
return writers, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (dt donut) getBucketMetadataReaders() ([]io.ReadCloser, error) {
|
func (donut API) getBucketMetadataReaders() ([]io.ReadCloser, error) {
|
||||||
var readers []io.ReadCloser
|
var readers []io.ReadCloser
|
||||||
for _, node := range dt.nodes {
|
for _, node := range donut.nodes {
|
||||||
disks, err := node.ListDisks()
|
disks, err := node.ListDisks()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, iodine.New(err, nil)
|
return nil, iodine.New(err, nil)
|
||||||
}
|
}
|
||||||
readers = make([]io.ReadCloser, len(disks))
|
readers = make([]io.ReadCloser, len(disks))
|
||||||
for order, d := range disks {
|
for order, d := range disks {
|
||||||
bucketMetaDataReader, err := d.OpenFile(filepath.Join(dt.name, bucketMetadataConfig))
|
bucketMetaDataReader, err := d.OpenFile(filepath.Join(donut.config.DonutName, bucketMetadataConfig))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, iodine.New(err, nil)
|
return nil, iodine.New(err, nil)
|
||||||
}
|
}
|
||||||
@ -303,8 +260,8 @@ func (dt donut) getBucketMetadataReaders() ([]io.ReadCloser, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
//
|
//
|
||||||
func (dt donut) setDonutBucketMetadata(metadata *AllBuckets) error {
|
func (donut API) setDonutBucketMetadata(metadata *AllBuckets) error {
|
||||||
writers, err := dt.getBucketMetadataWriters()
|
writers, err := donut.getBucketMetadataWriters()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return iodine.New(err, nil)
|
return iodine.New(err, nil)
|
||||||
}
|
}
|
||||||
@ -320,9 +277,9 @@ func (dt donut) setDonutBucketMetadata(metadata *AllBuckets) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (dt donut) getDonutBucketMetadata() (*AllBuckets, error) {
|
func (donut API) getDonutBucketMetadata() (*AllBuckets, error) {
|
||||||
metadata := new(AllBuckets)
|
metadata := new(AllBuckets)
|
||||||
readers, err := dt.getBucketMetadataReaders()
|
readers, err := donut.getBucketMetadataReaders()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, iodine.New(err, nil)
|
return nil, iodine.New(err, nil)
|
||||||
}
|
}
|
||||||
@ -339,40 +296,40 @@ func (dt donut) getDonutBucketMetadata() (*AllBuckets, error) {
|
|||||||
return nil, iodine.New(InvalidArgument{}, nil)
|
return nil, iodine.New(InvalidArgument{}, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (dt donut) makeDonutBucket(bucketName, acl string) error {
|
func (donut API) makeDonutBucket(bucketName, acl string) error {
|
||||||
if err := dt.listDonutBuckets(); err != nil {
|
if err := donut.listDonutBuckets(); err != nil {
|
||||||
return iodine.New(err, nil)
|
return iodine.New(err, nil)
|
||||||
}
|
}
|
||||||
if _, ok := dt.buckets[bucketName]; ok {
|
if _, ok := donut.buckets[bucketName]; ok {
|
||||||
return iodine.New(BucketExists{Bucket: bucketName}, nil)
|
return iodine.New(BucketExists{Bucket: bucketName}, nil)
|
||||||
}
|
}
|
||||||
bucket, bucketMetadata, err := newBucket(bucketName, acl, dt.name, dt.nodes)
|
bucket, bucketMetadata, err := newBucket(bucketName, acl, donut.config.DonutName, donut.nodes)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return iodine.New(err, nil)
|
return iodine.New(err, nil)
|
||||||
}
|
}
|
||||||
nodeNumber := 0
|
nodeNumber := 0
|
||||||
dt.buckets[bucketName] = bucket
|
donut.buckets[bucketName] = bucket
|
||||||
for _, node := range dt.nodes {
|
for _, node := range donut.nodes {
|
||||||
disks, err := node.ListDisks()
|
disks, err := node.ListDisks()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return iodine.New(err, nil)
|
return iodine.New(err, nil)
|
||||||
}
|
}
|
||||||
for order, disk := range disks {
|
for order, disk := range disks {
|
||||||
bucketSlice := fmt.Sprintf("%s$%d$%d", bucketName, nodeNumber, order)
|
bucketSlice := fmt.Sprintf("%s$%d$%d", bucketName, nodeNumber, order)
|
||||||
err := disk.MakeDir(filepath.Join(dt.name, bucketSlice))
|
err := disk.MakeDir(filepath.Join(donut.config.DonutName, bucketSlice))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return iodine.New(err, nil)
|
return iodine.New(err, nil)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
nodeNumber = nodeNumber + 1
|
nodeNumber = nodeNumber + 1
|
||||||
}
|
}
|
||||||
metadata, err := dt.getDonutBucketMetadata()
|
metadata, err := donut.getDonutBucketMetadata()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if os.IsNotExist(iodine.ToError(err)) {
|
if os.IsNotExist(iodine.ToError(err)) {
|
||||||
metadata := new(AllBuckets)
|
metadata := new(AllBuckets)
|
||||||
metadata.Buckets = make(map[string]BucketMetadata)
|
metadata.Buckets = make(map[string]BucketMetadata)
|
||||||
metadata.Buckets[bucketName] = bucketMetadata
|
metadata.Buckets[bucketName] = bucketMetadata
|
||||||
err = dt.setDonutBucketMetadata(metadata)
|
err = donut.setDonutBucketMetadata(metadata)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return iodine.New(err, nil)
|
return iodine.New(err, nil)
|
||||||
}
|
}
|
||||||
@ -381,21 +338,21 @@ func (dt donut) makeDonutBucket(bucketName, acl string) error {
|
|||||||
return iodine.New(err, nil)
|
return iodine.New(err, nil)
|
||||||
}
|
}
|
||||||
metadata.Buckets[bucketName] = bucketMetadata
|
metadata.Buckets[bucketName] = bucketMetadata
|
||||||
err = dt.setDonutBucketMetadata(metadata)
|
err = donut.setDonutBucketMetadata(metadata)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return iodine.New(err, nil)
|
return iodine.New(err, nil)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (dt donut) listDonutBuckets() error {
|
func (donut API) listDonutBuckets() error {
|
||||||
for _, node := range dt.nodes {
|
for _, node := range donut.nodes {
|
||||||
disks, err := node.ListDisks()
|
disks, err := node.ListDisks()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return iodine.New(err, nil)
|
return iodine.New(err, nil)
|
||||||
}
|
}
|
||||||
for _, disk := range disks {
|
for _, disk := range disks {
|
||||||
dirs, err := disk.ListDir(dt.name)
|
dirs, err := disk.ListDir(donut.config.DonutName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return iodine.New(err, nil)
|
return iodine.New(err, nil)
|
||||||
}
|
}
|
||||||
@ -406,11 +363,11 @@ func (dt donut) listDonutBuckets() error {
|
|||||||
}
|
}
|
||||||
bucketName := splitDir[0]
|
bucketName := splitDir[0]
|
||||||
// we dont need this once we cache from makeDonutBucket()
|
// we dont need this once we cache from makeDonutBucket()
|
||||||
bucket, _, err := newBucket(bucketName, "private", dt.name, dt.nodes)
|
bucket, _, err := newBucket(bucketName, "private", donut.config.DonutName, donut.nodes)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return iodine.New(err, nil)
|
return iodine.New(err, nil)
|
||||||
}
|
}
|
||||||
dt.buckets[bucketName] = bucket
|
donut.buckets[bucketName] = bucket
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
@ -55,13 +55,22 @@ func createTestNodeDiskMap(p string) map[string][]string {
|
|||||||
return nodes
|
return nodes
|
||||||
}
|
}
|
||||||
|
|
||||||
var dd Cache
|
var dd Interface
|
||||||
|
|
||||||
func (s *MyDonutSuite) SetUpSuite(c *C) {
|
func (s *MyDonutSuite) SetUpSuite(c *C) {
|
||||||
root, err := ioutil.TempDir(os.TempDir(), "donut-")
|
root, err := ioutil.TempDir(os.TempDir(), "donut-")
|
||||||
c.Assert(err, IsNil)
|
c.Assert(err, IsNil)
|
||||||
s.root = root
|
s.root = root
|
||||||
dd = NewCache(100000, time.Duration(1*time.Hour), "test", createTestNodeDiskMap(root))
|
|
||||||
|
conf := new(Config)
|
||||||
|
conf.DonutName = "test"
|
||||||
|
conf.NodeDiskMap = createTestNodeDiskMap(root)
|
||||||
|
conf.Expiration = time.Duration(1 * time.Hour)
|
||||||
|
conf.MaxSize = 100000
|
||||||
|
|
||||||
|
dd, err = New(conf)
|
||||||
|
c.Assert(err, IsNil)
|
||||||
|
|
||||||
// testing empty donut
|
// testing empty donut
|
||||||
buckets, err := dd.ListBuckets()
|
buckets, err := dd.ListBuckets()
|
||||||
c.Assert(err, IsNil)
|
c.Assert(err, IsNil)
|
||||||
@ -145,7 +154,7 @@ func (s *MyDonutSuite) TestCreateMultipleBucketsAndList(c *C) {
|
|||||||
|
|
||||||
// test object create without bucket
|
// test object create without bucket
|
||||||
func (s *MyDonutSuite) TestNewObjectFailsWithoutBucket(c *C) {
|
func (s *MyDonutSuite) TestNewObjectFailsWithoutBucket(c *C) {
|
||||||
_, err := dd.CreateObject("unknown", "obj", "", "", 0, nil)
|
_, err := dd.CreateObject("unknown", "obj", "", 0, nil, nil)
|
||||||
c.Assert(err, Not(IsNil))
|
c.Assert(err, Not(IsNil))
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -160,7 +169,7 @@ func (s *MyDonutSuite) TestNewObjectMetadata(c *C) {
|
|||||||
err := dd.MakeBucket("foo6", "private")
|
err := dd.MakeBucket("foo6", "private")
|
||||||
c.Assert(err, IsNil)
|
c.Assert(err, IsNil)
|
||||||
|
|
||||||
objectMetadata, err := dd.CreateObject("foo6", "obj", "application/json", expectedMd5Sum, int64(len(data)), reader)
|
objectMetadata, err := dd.CreateObject("foo6", "obj", expectedMd5Sum, int64(len(data)), reader, map[string]string{"contentType": "application/json"})
|
||||||
c.Assert(err, IsNil)
|
c.Assert(err, IsNil)
|
||||||
c.Assert(objectMetadata.MD5Sum, Equals, hex.EncodeToString(hasher.Sum(nil)))
|
c.Assert(objectMetadata.MD5Sum, Equals, hex.EncodeToString(hasher.Sum(nil)))
|
||||||
c.Assert(objectMetadata.Metadata["contentType"], Equals, "application/json")
|
c.Assert(objectMetadata.Metadata["contentType"], Equals, "application/json")
|
||||||
@ -168,7 +177,7 @@ func (s *MyDonutSuite) TestNewObjectMetadata(c *C) {
|
|||||||
|
|
||||||
// test create object fails without name
|
// test create object fails without name
|
||||||
func (s *MyDonutSuite) TestNewObjectFailsWithEmptyName(c *C) {
|
func (s *MyDonutSuite) TestNewObjectFailsWithEmptyName(c *C) {
|
||||||
_, err := dd.CreateObject("foo", "", "", "", 0, nil)
|
_, err := dd.CreateObject("foo", "", "", 0, nil, nil)
|
||||||
c.Assert(err, Not(IsNil))
|
c.Assert(err, Not(IsNil))
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -184,7 +193,7 @@ func (s *MyDonutSuite) TestNewObjectCanBeWritten(c *C) {
|
|||||||
expectedMd5Sum := base64.StdEncoding.EncodeToString(hasher.Sum(nil))
|
expectedMd5Sum := base64.StdEncoding.EncodeToString(hasher.Sum(nil))
|
||||||
reader := ioutil.NopCloser(bytes.NewReader([]byte(data)))
|
reader := ioutil.NopCloser(bytes.NewReader([]byte(data)))
|
||||||
|
|
||||||
actualMetadata, err := dd.CreateObject("foo", "obj", "application/octet-stream", expectedMd5Sum, int64(len(data)), reader)
|
actualMetadata, err := dd.CreateObject("foo", "obj", expectedMd5Sum, int64(len(data)), reader, map[string]string{"contentType": "application/octet-stream"})
|
||||||
c.Assert(err, IsNil)
|
c.Assert(err, IsNil)
|
||||||
c.Assert(actualMetadata.MD5Sum, Equals, hex.EncodeToString(hasher.Sum(nil)))
|
c.Assert(actualMetadata.MD5Sum, Equals, hex.EncodeToString(hasher.Sum(nil)))
|
||||||
|
|
||||||
@ -206,11 +215,11 @@ func (s *MyDonutSuite) TestMultipleNewObjects(c *C) {
|
|||||||
|
|
||||||
one := ioutil.NopCloser(bytes.NewReader([]byte("one")))
|
one := ioutil.NopCloser(bytes.NewReader([]byte("one")))
|
||||||
|
|
||||||
_, err := dd.CreateObject("foo5", "obj1", "", "", int64(len("one")), one)
|
_, err := dd.CreateObject("foo5", "obj1", "", int64(len("one")), one, nil)
|
||||||
c.Assert(err, IsNil)
|
c.Assert(err, IsNil)
|
||||||
|
|
||||||
two := ioutil.NopCloser(bytes.NewReader([]byte("two")))
|
two := ioutil.NopCloser(bytes.NewReader([]byte("two")))
|
||||||
_, err = dd.CreateObject("foo5", "obj2", "", "", int64(len("two")), two)
|
_, err = dd.CreateObject("foo5", "obj2", "", int64(len("two")), two, nil)
|
||||||
c.Assert(err, IsNil)
|
c.Assert(err, IsNil)
|
||||||
|
|
||||||
var buffer1 bytes.Buffer
|
var buffer1 bytes.Buffer
|
||||||
@ -259,7 +268,7 @@ func (s *MyDonutSuite) TestMultipleNewObjects(c *C) {
|
|||||||
c.Assert(objectsMetadata[1].Object, Equals, "obj2")
|
c.Assert(objectsMetadata[1].Object, Equals, "obj2")
|
||||||
|
|
||||||
three := ioutil.NopCloser(bytes.NewReader([]byte("three")))
|
three := ioutil.NopCloser(bytes.NewReader([]byte("three")))
|
||||||
_, err = dd.CreateObject("foo5", "obj3", "", "", int64(len("three")), three)
|
_, err = dd.CreateObject("foo5", "obj3", "", int64(len("three")), three, nil)
|
||||||
c.Assert(err, IsNil)
|
c.Assert(err, IsNil)
|
||||||
|
|
||||||
var buffer bytes.Buffer
|
var buffer bytes.Buffer
|
@ -33,6 +33,7 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/minio/minio/pkg/iodine"
|
"github.com/minio/minio/pkg/iodine"
|
||||||
|
"github.com/minio/minio/pkg/quick"
|
||||||
"github.com/minio/minio/pkg/storage/donut/trove"
|
"github.com/minio/minio/pkg/storage/donut/trove"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -41,15 +42,24 @@ const (
|
|||||||
totalBuckets = 100
|
totalBuckets = 100
|
||||||
)
|
)
|
||||||
|
|
||||||
// Cache - local variables
|
// Config donut config
|
||||||
type Cache struct {
|
type Config struct {
|
||||||
storedBuckets map[string]storedBucket
|
Version string `json:"version"`
|
||||||
|
MaxSize uint64 `json:"max-size"`
|
||||||
|
Expiration time.Duration `json:"expiration"`
|
||||||
|
DonutName string `json:"donut-name"`
|
||||||
|
NodeDiskMap map[string][]string `json:"node-disk-map"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// API - local variables
|
||||||
|
type API struct {
|
||||||
|
config *Config
|
||||||
lock *sync.RWMutex
|
lock *sync.RWMutex
|
||||||
objects *trove.Cache
|
objects *trove.Cache
|
||||||
multiPartObjects *trove.Cache
|
multiPartObjects *trove.Cache
|
||||||
maxSize uint64
|
storedBuckets map[string]storedBucket
|
||||||
expiration time.Duration
|
nodes map[string]node
|
||||||
donut Donut
|
buckets map[string]bucket
|
||||||
}
|
}
|
||||||
|
|
||||||
// storedBucket saved bucket
|
// storedBucket saved bucket
|
||||||
@ -67,79 +77,85 @@ type multiPartSession struct {
|
|||||||
initiated time.Time
|
initiated time.Time
|
||||||
}
|
}
|
||||||
|
|
||||||
type proxyWriter struct {
|
// New instantiate a new donut
|
||||||
writer io.Writer
|
func New(c *Config) (Interface, error) {
|
||||||
writtenBytes []byte
|
if err := quick.CheckData(c); err != nil {
|
||||||
}
|
return nil, iodine.New(err, nil)
|
||||||
|
|
||||||
func (r *proxyWriter) Write(p []byte) (n int, err error) {
|
|
||||||
n, err = r.writer.Write(p)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
}
|
||||||
r.writtenBytes = append(r.writtenBytes, p[0:n]...)
|
a := API{config: c}
|
||||||
return
|
a.storedBuckets = make(map[string]storedBucket)
|
||||||
}
|
a.nodes = make(map[string]node)
|
||||||
|
a.buckets = make(map[string]bucket)
|
||||||
func newProxyWriter(w io.Writer) *proxyWriter {
|
a.objects = trove.NewCache(a.config.MaxSize, a.config.Expiration)
|
||||||
return &proxyWriter{writer: w, writtenBytes: nil}
|
a.multiPartObjects = trove.NewCache(0, time.Duration(0))
|
||||||
}
|
a.objects.OnExpired = a.expiredObject
|
||||||
|
a.multiPartObjects.OnExpired = a.expiredPart
|
||||||
// NewCache new cache
|
a.lock = new(sync.RWMutex)
|
||||||
func NewCache(maxSize uint64, expiration time.Duration, donutName string, nodeDiskMap map[string][]string) Cache {
|
|
||||||
c := Cache{}
|
|
||||||
c.storedBuckets = make(map[string]storedBucket)
|
|
||||||
c.objects = trove.NewCache(maxSize, expiration)
|
|
||||||
c.multiPartObjects = trove.NewCache(0, time.Duration(0))
|
|
||||||
c.objects.OnExpired = c.expiredObject
|
|
||||||
c.multiPartObjects.OnExpired = c.expiredPart
|
|
||||||
c.lock = new(sync.RWMutex)
|
|
||||||
c.maxSize = maxSize
|
|
||||||
c.expiration = expiration
|
|
||||||
|
|
||||||
// set up cache expiration
|
// set up cache expiration
|
||||||
c.objects.ExpireObjects(time.Second * 5)
|
a.objects.ExpireObjects(time.Second * 5)
|
||||||
c.donut, _ = NewDonut(donutName, nodeDiskMap)
|
|
||||||
return c
|
if len(a.config.NodeDiskMap) > 0 {
|
||||||
|
for k, v := range a.config.NodeDiskMap {
|
||||||
|
if len(v) == 0 {
|
||||||
|
return nil, iodine.New(InvalidDisksArgument{}, nil)
|
||||||
|
}
|
||||||
|
err := a.AttachNode(k, v)
|
||||||
|
if err != nil {
|
||||||
|
return nil, iodine.New(err, nil)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
/// Initialization, populate all buckets into memory
|
||||||
|
buckets, err := a.listBuckets()
|
||||||
|
if err != nil {
|
||||||
|
return nil, iodine.New(err, nil)
|
||||||
|
}
|
||||||
|
for k, v := range buckets {
|
||||||
|
storedBucket := a.storedBuckets[k]
|
||||||
|
storedBucket.bucketMetadata = v
|
||||||
|
a.storedBuckets[k] = storedBucket
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return a, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetObject - GET object from cache buffer
|
// GetObject - GET object from cache buffer
|
||||||
func (cache Cache) GetObject(w io.Writer, bucket string, object string) (int64, error) {
|
func (donut API) GetObject(w io.Writer, bucket string, object string) (int64, error) {
|
||||||
cache.lock.RLock()
|
donut.lock.RLock()
|
||||||
if !IsValidBucket(bucket) {
|
if !IsValidBucket(bucket) {
|
||||||
cache.lock.RUnlock()
|
donut.lock.RUnlock()
|
||||||
return 0, iodine.New(BucketNameInvalid{Bucket: bucket}, nil)
|
return 0, iodine.New(BucketNameInvalid{Bucket: bucket}, nil)
|
||||||
}
|
}
|
||||||
if !IsValidObjectName(object) {
|
if !IsValidObjectName(object) {
|
||||||
cache.lock.RUnlock()
|
donut.lock.RUnlock()
|
||||||
return 0, iodine.New(ObjectNameInvalid{Object: object}, nil)
|
return 0, iodine.New(ObjectNameInvalid{Object: object}, nil)
|
||||||
}
|
}
|
||||||
if _, ok := cache.storedBuckets[bucket]; ok == false {
|
if _, ok := donut.storedBuckets[bucket]; ok == false {
|
||||||
cache.lock.RUnlock()
|
donut.lock.RUnlock()
|
||||||
return 0, iodine.New(BucketNotFound{Bucket: bucket}, nil)
|
return 0, iodine.New(BucketNotFound{Bucket: bucket}, nil)
|
||||||
}
|
}
|
||||||
objectKey := bucket + "/" + object
|
objectKey := bucket + "/" + object
|
||||||
data, ok := cache.objects.Get(objectKey)
|
data, ok := donut.objects.Get(objectKey)
|
||||||
if !ok {
|
if !ok {
|
||||||
if cache.donut != nil {
|
if len(donut.config.NodeDiskMap) > 0 {
|
||||||
reader, size, err := cache.donut.GetObject(bucket, object)
|
reader, size, err := donut.getObject(bucket, object)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
cache.lock.RUnlock()
|
donut.lock.RUnlock()
|
||||||
return 0, iodine.New(err, nil)
|
return 0, iodine.New(err, nil)
|
||||||
}
|
}
|
||||||
// new proxy writer to capture data read from disk
|
// new proxy writer to capture data read from disk
|
||||||
pw := newProxyWriter(w)
|
pw := NewProxyWriter(w)
|
||||||
written, err := io.CopyN(pw, reader, size)
|
written, err := io.CopyN(pw, reader, size)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
cache.lock.RUnlock()
|
donut.lock.RUnlock()
|
||||||
return 0, iodine.New(err, nil)
|
return 0, iodine.New(err, nil)
|
||||||
}
|
}
|
||||||
cache.lock.RUnlock()
|
donut.lock.RUnlock()
|
||||||
/// cache object read from disk
|
/// cache object read from disk
|
||||||
{
|
{
|
||||||
cache.lock.Lock()
|
donut.lock.Lock()
|
||||||
ok := cache.objects.Set(objectKey, pw.writtenBytes)
|
ok := donut.objects.Set(objectKey, pw.writtenBytes)
|
||||||
cache.lock.Unlock()
|
donut.lock.Unlock()
|
||||||
pw.writtenBytes = nil
|
pw.writtenBytes = nil
|
||||||
go debug.FreeOSMemory()
|
go debug.FreeOSMemory()
|
||||||
if !ok {
|
if !ok {
|
||||||
@ -148,65 +164,65 @@ func (cache Cache) GetObject(w io.Writer, bucket string, object string) (int64,
|
|||||||
}
|
}
|
||||||
return written, nil
|
return written, nil
|
||||||
}
|
}
|
||||||
cache.lock.RUnlock()
|
donut.lock.RUnlock()
|
||||||
return 0, iodine.New(ObjectNotFound{Object: object}, nil)
|
return 0, iodine.New(ObjectNotFound{Object: object}, nil)
|
||||||
}
|
}
|
||||||
written, err := io.CopyN(w, bytes.NewBuffer(data), int64(cache.objects.Len(objectKey)))
|
written, err := io.CopyN(w, bytes.NewBuffer(data), int64(donut.objects.Len(objectKey)))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, iodine.New(err, nil)
|
return 0, iodine.New(err, nil)
|
||||||
}
|
}
|
||||||
cache.lock.RUnlock()
|
donut.lock.RUnlock()
|
||||||
return written, nil
|
return written, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetPartialObject - GET object from cache buffer range
|
// GetPartialObject - GET object from cache buffer range
|
||||||
func (cache Cache) GetPartialObject(w io.Writer, bucket, object string, start, length int64) (int64, error) {
|
func (donut API) GetPartialObject(w io.Writer, bucket, object string, start, length int64) (int64, error) {
|
||||||
errParams := map[string]string{
|
errParams := map[string]string{
|
||||||
"bucket": bucket,
|
"bucket": bucket,
|
||||||
"object": object,
|
"object": object,
|
||||||
"start": strconv.FormatInt(start, 10),
|
"start": strconv.FormatInt(start, 10),
|
||||||
"length": strconv.FormatInt(length, 10),
|
"length": strconv.FormatInt(length, 10),
|
||||||
}
|
}
|
||||||
cache.lock.RLock()
|
donut.lock.RLock()
|
||||||
if !IsValidBucket(bucket) {
|
if !IsValidBucket(bucket) {
|
||||||
cache.lock.RUnlock()
|
donut.lock.RUnlock()
|
||||||
return 0, iodine.New(BucketNameInvalid{Bucket: bucket}, errParams)
|
return 0, iodine.New(BucketNameInvalid{Bucket: bucket}, errParams)
|
||||||
}
|
}
|
||||||
if !IsValidObjectName(object) {
|
if !IsValidObjectName(object) {
|
||||||
cache.lock.RUnlock()
|
donut.lock.RUnlock()
|
||||||
return 0, iodine.New(ObjectNameInvalid{Object: object}, errParams)
|
return 0, iodine.New(ObjectNameInvalid{Object: object}, errParams)
|
||||||
}
|
}
|
||||||
if start < 0 {
|
if start < 0 {
|
||||||
cache.lock.RUnlock()
|
donut.lock.RUnlock()
|
||||||
return 0, iodine.New(InvalidRange{
|
return 0, iodine.New(InvalidRange{
|
||||||
Start: start,
|
Start: start,
|
||||||
Length: length,
|
Length: length,
|
||||||
}, errParams)
|
}, errParams)
|
||||||
}
|
}
|
||||||
objectKey := bucket + "/" + object
|
objectKey := bucket + "/" + object
|
||||||
data, ok := cache.objects.Get(objectKey)
|
data, ok := donut.objects.Get(objectKey)
|
||||||
if !ok {
|
if !ok {
|
||||||
if cache.donut != nil {
|
if len(donut.config.NodeDiskMap) > 0 {
|
||||||
reader, _, err := cache.donut.GetObject(bucket, object)
|
reader, _, err := donut.getObject(bucket, object)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
cache.lock.RUnlock()
|
donut.lock.RUnlock()
|
||||||
return 0, iodine.New(err, nil)
|
return 0, iodine.New(err, nil)
|
||||||
}
|
}
|
||||||
if _, err := io.CopyN(ioutil.Discard, reader, start); err != nil {
|
if _, err := io.CopyN(ioutil.Discard, reader, start); err != nil {
|
||||||
cache.lock.RUnlock()
|
donut.lock.RUnlock()
|
||||||
return 0, iodine.New(err, nil)
|
return 0, iodine.New(err, nil)
|
||||||
}
|
}
|
||||||
pw := newProxyWriter(w)
|
pw := NewProxyWriter(w)
|
||||||
written, err := io.CopyN(w, reader, length)
|
written, err := io.CopyN(w, reader, length)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
cache.lock.RUnlock()
|
donut.lock.RUnlock()
|
||||||
return 0, iodine.New(err, nil)
|
return 0, iodine.New(err, nil)
|
||||||
}
|
}
|
||||||
cache.lock.RUnlock()
|
donut.lock.RUnlock()
|
||||||
{
|
{
|
||||||
cache.lock.Lock()
|
donut.lock.Lock()
|
||||||
ok := cache.objects.Set(objectKey, pw.writtenBytes)
|
ok := donut.objects.Set(objectKey, pw.writtenBytes)
|
||||||
cache.lock.Unlock()
|
donut.lock.Unlock()
|
||||||
pw.writtenBytes = nil
|
pw.writtenBytes = nil
|
||||||
go debug.FreeOSMemory()
|
go debug.FreeOSMemory()
|
||||||
if !ok {
|
if !ok {
|
||||||
@ -215,72 +231,70 @@ func (cache Cache) GetPartialObject(w io.Writer, bucket, object string, start, l
|
|||||||
}
|
}
|
||||||
return written, nil
|
return written, nil
|
||||||
}
|
}
|
||||||
cache.lock.RUnlock()
|
donut.lock.RUnlock()
|
||||||
return 0, iodine.New(ObjectNotFound{Object: object}, nil)
|
return 0, iodine.New(ObjectNotFound{Object: object}, nil)
|
||||||
}
|
}
|
||||||
written, err := io.CopyN(w, bytes.NewBuffer(data[start:]), length)
|
written, err := io.CopyN(w, bytes.NewBuffer(data[start:]), length)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, iodine.New(err, nil)
|
return 0, iodine.New(err, nil)
|
||||||
}
|
}
|
||||||
cache.lock.RUnlock()
|
donut.lock.RUnlock()
|
||||||
return written, nil
|
return written, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetBucketMetadata -
|
// GetBucketMetadata -
|
||||||
func (cache Cache) GetBucketMetadata(bucket string) (BucketMetadata, error) {
|
func (donut API) GetBucketMetadata(bucket string) (BucketMetadata, error) {
|
||||||
cache.lock.RLock()
|
donut.lock.RLock()
|
||||||
if !IsValidBucket(bucket) {
|
if !IsValidBucket(bucket) {
|
||||||
cache.lock.RUnlock()
|
donut.lock.RUnlock()
|
||||||
return BucketMetadata{}, iodine.New(BucketNameInvalid{Bucket: bucket}, nil)
|
return BucketMetadata{}, iodine.New(BucketNameInvalid{Bucket: bucket}, nil)
|
||||||
}
|
}
|
||||||
if _, ok := cache.storedBuckets[bucket]; ok == false {
|
if _, ok := donut.storedBuckets[bucket]; ok == false {
|
||||||
if cache.donut == nil {
|
if len(donut.config.NodeDiskMap) > 0 {
|
||||||
cache.lock.RUnlock()
|
bucketMetadata, err := donut.getBucketMetadata(bucket)
|
||||||
return BucketMetadata{}, iodine.New(BucketNotFound{Bucket: bucket}, nil)
|
if err != nil {
|
||||||
|
donut.lock.RUnlock()
|
||||||
|
return BucketMetadata{}, iodine.New(err, nil)
|
||||||
|
}
|
||||||
|
storedBucket := donut.storedBuckets[bucket]
|
||||||
|
donut.lock.RUnlock()
|
||||||
|
{
|
||||||
|
donut.lock.Lock()
|
||||||
|
storedBucket.bucketMetadata = bucketMetadata
|
||||||
|
donut.storedBuckets[bucket] = storedBucket
|
||||||
|
donut.lock.Unlock()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
bucketMetadata, err := cache.donut.GetBucketMetadata(bucket)
|
return BucketMetadata{}, iodine.New(BucketNotFound{Bucket: bucket}, nil)
|
||||||
if err != nil {
|
|
||||||
cache.lock.RUnlock()
|
|
||||||
return BucketMetadata{}, iodine.New(err, nil)
|
|
||||||
}
|
|
||||||
storedBucket := cache.storedBuckets[bucket]
|
|
||||||
cache.lock.RUnlock()
|
|
||||||
cache.lock.Lock()
|
|
||||||
storedBucket.bucketMetadata = bucketMetadata
|
|
||||||
cache.storedBuckets[bucket] = storedBucket
|
|
||||||
cache.lock.Unlock()
|
|
||||||
}
|
}
|
||||||
cache.lock.RUnlock()
|
donut.lock.RUnlock()
|
||||||
return cache.storedBuckets[bucket].bucketMetadata, nil
|
return donut.storedBuckets[bucket].bucketMetadata, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetBucketMetadata -
|
// SetBucketMetadata -
|
||||||
func (cache Cache) SetBucketMetadata(bucket, acl string) error {
|
func (donut API) SetBucketMetadata(bucket string, metadata map[string]string) error {
|
||||||
cache.lock.RLock()
|
donut.lock.RLock()
|
||||||
if !IsValidBucket(bucket) {
|
if !IsValidBucket(bucket) {
|
||||||
cache.lock.RUnlock()
|
donut.lock.RUnlock()
|
||||||
return iodine.New(BucketNameInvalid{Bucket: bucket}, nil)
|
return iodine.New(BucketNameInvalid{Bucket: bucket}, nil)
|
||||||
}
|
}
|
||||||
if _, ok := cache.storedBuckets[bucket]; ok == false {
|
if _, ok := donut.storedBuckets[bucket]; ok == false {
|
||||||
cache.lock.RUnlock()
|
donut.lock.RUnlock()
|
||||||
return iodine.New(BucketNotFound{Bucket: bucket}, nil)
|
return iodine.New(BucketNotFound{Bucket: bucket}, nil)
|
||||||
}
|
}
|
||||||
if strings.TrimSpace(acl) == "" {
|
donut.lock.RUnlock()
|
||||||
acl = "private"
|
donut.lock.Lock()
|
||||||
}
|
{
|
||||||
cache.lock.RUnlock()
|
if len(donut.config.NodeDiskMap) > 0 {
|
||||||
cache.lock.Lock()
|
if err := donut.setBucketMetadata(bucket, metadata); err != nil {
|
||||||
m := make(map[string]string)
|
return iodine.New(err, nil)
|
||||||
m["acl"] = acl
|
}
|
||||||
if cache.donut != nil {
|
|
||||||
if err := cache.donut.SetBucketMetadata(bucket, m); err != nil {
|
|
||||||
return iodine.New(err, nil)
|
|
||||||
}
|
}
|
||||||
|
storedBucket := donut.storedBuckets[bucket]
|
||||||
|
storedBucket.bucketMetadata.ACL = BucketACL(metadata["acl"])
|
||||||
|
donut.storedBuckets[bucket] = storedBucket
|
||||||
}
|
}
|
||||||
storedBucket := cache.storedBuckets[bucket]
|
donut.lock.Unlock()
|
||||||
storedBucket.bucketMetadata.ACL = BucketACL(acl)
|
|
||||||
cache.storedBuckets[bucket] = storedBucket
|
|
||||||
cache.lock.Unlock()
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -304,44 +318,45 @@ func isMD5SumEqual(expectedMD5Sum, actualMD5Sum string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// CreateObject -
|
// CreateObject -
|
||||||
func (cache Cache) CreateObject(bucket, key, contentType, expectedMD5Sum string, size int64, data io.Reader) (ObjectMetadata, error) {
|
func (donut API) CreateObject(bucket, key, expectedMD5Sum string, size int64, data io.Reader, metadata map[string]string) (ObjectMetadata, error) {
|
||||||
if size > int64(cache.maxSize) {
|
if size > int64(donut.config.MaxSize) {
|
||||||
generic := GenericObjectError{Bucket: bucket, Object: key}
|
generic := GenericObjectError{Bucket: bucket, Object: key}
|
||||||
return ObjectMetadata{}, iodine.New(EntityTooLarge{
|
return ObjectMetadata{}, iodine.New(EntityTooLarge{
|
||||||
GenericObjectError: generic,
|
GenericObjectError: generic,
|
||||||
Size: strconv.FormatInt(size, 10),
|
Size: strconv.FormatInt(size, 10),
|
||||||
MaxSize: strconv.FormatUint(cache.maxSize, 10),
|
MaxSize: strconv.FormatUint(donut.config.MaxSize, 10),
|
||||||
}, nil)
|
}, nil)
|
||||||
}
|
}
|
||||||
objectMetadata, err := cache.createObject(bucket, key, contentType, expectedMD5Sum, size, data)
|
contentType := metadata["contentType"]
|
||||||
|
objectMetadata, err := donut.createObject(bucket, key, contentType, expectedMD5Sum, size, data)
|
||||||
// free
|
// free
|
||||||
debug.FreeOSMemory()
|
debug.FreeOSMemory()
|
||||||
return objectMetadata, iodine.New(err, nil)
|
return objectMetadata, iodine.New(err, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
// createObject - PUT object to cache buffer
|
// createObject - PUT object to cache buffer
|
||||||
func (cache Cache) createObject(bucket, key, contentType, expectedMD5Sum string, size int64, data io.Reader) (ObjectMetadata, error) {
|
func (donut API) createObject(bucket, key, contentType, expectedMD5Sum string, size int64, data io.Reader) (ObjectMetadata, error) {
|
||||||
cache.lock.RLock()
|
donut.lock.RLock()
|
||||||
if !IsValidBucket(bucket) {
|
if !IsValidBucket(bucket) {
|
||||||
cache.lock.RUnlock()
|
donut.lock.RUnlock()
|
||||||
return ObjectMetadata{}, iodine.New(BucketNameInvalid{Bucket: bucket}, nil)
|
return ObjectMetadata{}, iodine.New(BucketNameInvalid{Bucket: bucket}, nil)
|
||||||
}
|
}
|
||||||
if !IsValidObjectName(key) {
|
if !IsValidObjectName(key) {
|
||||||
cache.lock.RUnlock()
|
donut.lock.RUnlock()
|
||||||
return ObjectMetadata{}, iodine.New(ObjectNameInvalid{Object: key}, nil)
|
return ObjectMetadata{}, iodine.New(ObjectNameInvalid{Object: key}, nil)
|
||||||
}
|
}
|
||||||
if _, ok := cache.storedBuckets[bucket]; ok == false {
|
if _, ok := donut.storedBuckets[bucket]; ok == false {
|
||||||
cache.lock.RUnlock()
|
donut.lock.RUnlock()
|
||||||
return ObjectMetadata{}, iodine.New(BucketNotFound{Bucket: bucket}, nil)
|
return ObjectMetadata{}, iodine.New(BucketNotFound{Bucket: bucket}, nil)
|
||||||
}
|
}
|
||||||
storedBucket := cache.storedBuckets[bucket]
|
storedBucket := donut.storedBuckets[bucket]
|
||||||
// get object key
|
// get object key
|
||||||
objectKey := bucket + "/" + key
|
objectKey := bucket + "/" + key
|
||||||
if _, ok := storedBucket.objectMetadata[objectKey]; ok == true {
|
if _, ok := storedBucket.objectMetadata[objectKey]; ok == true {
|
||||||
cache.lock.RUnlock()
|
donut.lock.RUnlock()
|
||||||
return ObjectMetadata{}, iodine.New(ObjectExists{Object: key}, nil)
|
return ObjectMetadata{}, iodine.New(ObjectExists{Object: key}, nil)
|
||||||
}
|
}
|
||||||
cache.lock.RUnlock()
|
donut.lock.RUnlock()
|
||||||
|
|
||||||
if contentType == "" {
|
if contentType == "" {
|
||||||
contentType = "application/octet-stream"
|
contentType = "application/octet-stream"
|
||||||
@ -356,15 +371,15 @@ func (cache Cache) createObject(bucket, key, contentType, expectedMD5Sum string,
|
|||||||
expectedMD5Sum = hex.EncodeToString(expectedMD5SumBytes)
|
expectedMD5Sum = hex.EncodeToString(expectedMD5SumBytes)
|
||||||
}
|
}
|
||||||
|
|
||||||
if cache.donut != nil {
|
if len(donut.config.NodeDiskMap) > 0 {
|
||||||
objMetadata, err := cache.donut.PutObject(bucket, key, expectedMD5Sum, data, map[string]string{"contentType": contentType})
|
objMetadata, err := donut.putObject(bucket, key, expectedMD5Sum, data, map[string]string{"contentType": contentType})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return ObjectMetadata{}, iodine.New(err, nil)
|
return ObjectMetadata{}, iodine.New(err, nil)
|
||||||
}
|
}
|
||||||
cache.lock.Lock()
|
donut.lock.Lock()
|
||||||
storedBucket.objectMetadata[objectKey] = objMetadata
|
storedBucket.objectMetadata[objectKey] = objMetadata
|
||||||
cache.storedBuckets[bucket] = storedBucket
|
donut.storedBuckets[bucket] = storedBucket
|
||||||
cache.lock.Unlock()
|
donut.lock.Unlock()
|
||||||
return objMetadata, nil
|
return objMetadata, nil
|
||||||
}
|
}
|
||||||
// calculate md5
|
// calculate md5
|
||||||
@ -382,9 +397,9 @@ func (cache Cache) createObject(bucket, key, contentType, expectedMD5Sum string,
|
|||||||
break
|
break
|
||||||
}
|
}
|
||||||
hash.Write(byteBuffer[0:length])
|
hash.Write(byteBuffer[0:length])
|
||||||
cache.lock.Lock()
|
donut.lock.Lock()
|
||||||
ok := cache.objects.Append(objectKey, byteBuffer[0:length])
|
ok := donut.objects.Append(objectKey, byteBuffer[0:length])
|
||||||
cache.lock.Unlock()
|
donut.lock.Unlock()
|
||||||
if !ok {
|
if !ok {
|
||||||
return ObjectMetadata{}, iodine.New(InternalError{}, nil)
|
return ObjectMetadata{}, iodine.New(InternalError{}, nil)
|
||||||
}
|
}
|
||||||
@ -416,40 +431,40 @@ func (cache Cache) createObject(bucket, key, contentType, expectedMD5Sum string,
|
|||||||
Size: int64(totalLength),
|
Size: int64(totalLength),
|
||||||
}
|
}
|
||||||
|
|
||||||
cache.lock.Lock()
|
donut.lock.Lock()
|
||||||
storedBucket.objectMetadata[objectKey] = newObject
|
storedBucket.objectMetadata[objectKey] = newObject
|
||||||
cache.storedBuckets[bucket] = storedBucket
|
donut.storedBuckets[bucket] = storedBucket
|
||||||
cache.lock.Unlock()
|
donut.lock.Unlock()
|
||||||
return newObject, nil
|
return newObject, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// MakeBucket - create bucket in cache
|
// MakeBucket - create bucket in cache
|
||||||
func (cache Cache) MakeBucket(bucketName, acl string) error {
|
func (donut API) MakeBucket(bucketName, acl string) error {
|
||||||
cache.lock.RLock()
|
donut.lock.RLock()
|
||||||
if len(cache.storedBuckets) == totalBuckets {
|
if len(donut.storedBuckets) == totalBuckets {
|
||||||
cache.lock.RUnlock()
|
donut.lock.RUnlock()
|
||||||
return iodine.New(TooManyBuckets{Bucket: bucketName}, nil)
|
return iodine.New(TooManyBuckets{Bucket: bucketName}, nil)
|
||||||
}
|
}
|
||||||
if !IsValidBucket(bucketName) {
|
if !IsValidBucket(bucketName) {
|
||||||
cache.lock.RUnlock()
|
donut.lock.RUnlock()
|
||||||
return iodine.New(BucketNameInvalid{Bucket: bucketName}, nil)
|
return iodine.New(BucketNameInvalid{Bucket: bucketName}, nil)
|
||||||
}
|
}
|
||||||
if !IsValidBucketACL(acl) {
|
if !IsValidBucketACL(acl) {
|
||||||
cache.lock.RUnlock()
|
donut.lock.RUnlock()
|
||||||
return iodine.New(InvalidACL{ACL: acl}, nil)
|
return iodine.New(InvalidACL{ACL: acl}, nil)
|
||||||
}
|
}
|
||||||
if _, ok := cache.storedBuckets[bucketName]; ok == true {
|
if _, ok := donut.storedBuckets[bucketName]; ok == true {
|
||||||
cache.lock.RUnlock()
|
donut.lock.RUnlock()
|
||||||
return iodine.New(BucketExists{Bucket: bucketName}, nil)
|
return iodine.New(BucketExists{Bucket: bucketName}, nil)
|
||||||
}
|
}
|
||||||
cache.lock.RUnlock()
|
donut.lock.RUnlock()
|
||||||
|
|
||||||
if strings.TrimSpace(acl) == "" {
|
if strings.TrimSpace(acl) == "" {
|
||||||
// default is private
|
// default is private
|
||||||
acl = "private"
|
acl = "private"
|
||||||
}
|
}
|
||||||
if cache.donut != nil {
|
if len(donut.config.NodeDiskMap) > 0 {
|
||||||
if err := cache.donut.MakeBucket(bucketName, BucketACL(acl)); err != nil {
|
if err := donut.makeBucket(bucketName, BucketACL(acl)); err != nil {
|
||||||
return iodine.New(err, nil)
|
return iodine.New(err, nil)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -461,29 +476,29 @@ func (cache Cache) MakeBucket(bucketName, acl string) error {
|
|||||||
newBucket.bucketMetadata.Name = bucketName
|
newBucket.bucketMetadata.Name = bucketName
|
||||||
newBucket.bucketMetadata.Created = time.Now().UTC()
|
newBucket.bucketMetadata.Created = time.Now().UTC()
|
||||||
newBucket.bucketMetadata.ACL = BucketACL(acl)
|
newBucket.bucketMetadata.ACL = BucketACL(acl)
|
||||||
cache.lock.Lock()
|
donut.lock.Lock()
|
||||||
cache.storedBuckets[bucketName] = newBucket
|
donut.storedBuckets[bucketName] = newBucket
|
||||||
cache.lock.Unlock()
|
donut.lock.Unlock()
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// ListObjects - list objects from cache
|
// ListObjects - list objects from cache
|
||||||
func (cache Cache) ListObjects(bucket string, resources BucketResourcesMetadata) ([]ObjectMetadata, BucketResourcesMetadata, error) {
|
func (donut API) ListObjects(bucket string, resources BucketResourcesMetadata) ([]ObjectMetadata, BucketResourcesMetadata, error) {
|
||||||
cache.lock.RLock()
|
donut.lock.RLock()
|
||||||
defer cache.lock.RUnlock()
|
defer donut.lock.RUnlock()
|
||||||
if !IsValidBucket(bucket) {
|
if !IsValidBucket(bucket) {
|
||||||
return nil, BucketResourcesMetadata{IsTruncated: false}, iodine.New(BucketNameInvalid{Bucket: bucket}, nil)
|
return nil, BucketResourcesMetadata{IsTruncated: false}, iodine.New(BucketNameInvalid{Bucket: bucket}, nil)
|
||||||
}
|
}
|
||||||
if !IsValidPrefix(resources.Prefix) {
|
if !IsValidPrefix(resources.Prefix) {
|
||||||
return nil, BucketResourcesMetadata{IsTruncated: false}, iodine.New(ObjectNameInvalid{Object: resources.Prefix}, nil)
|
return nil, BucketResourcesMetadata{IsTruncated: false}, iodine.New(ObjectNameInvalid{Object: resources.Prefix}, nil)
|
||||||
}
|
}
|
||||||
if _, ok := cache.storedBuckets[bucket]; ok == false {
|
if _, ok := donut.storedBuckets[bucket]; ok == false {
|
||||||
return nil, BucketResourcesMetadata{IsTruncated: false}, iodine.New(BucketNotFound{Bucket: bucket}, nil)
|
return nil, BucketResourcesMetadata{IsTruncated: false}, iodine.New(BucketNotFound{Bucket: bucket}, nil)
|
||||||
}
|
}
|
||||||
var results []ObjectMetadata
|
var results []ObjectMetadata
|
||||||
var keys []string
|
var keys []string
|
||||||
if cache.donut != nil {
|
if len(donut.config.NodeDiskMap) > 0 {
|
||||||
listObjects, err := cache.donut.ListObjects(
|
listObjects, err := donut.listObjects(
|
||||||
bucket,
|
bucket,
|
||||||
resources.Prefix,
|
resources.Prefix,
|
||||||
resources.Marker,
|
resources.Marker,
|
||||||
@ -507,7 +522,7 @@ func (cache Cache) ListObjects(bucket string, resources BucketResourcesMetadata)
|
|||||||
}
|
}
|
||||||
return results, resources, nil
|
return results, resources, nil
|
||||||
}
|
}
|
||||||
storedBucket := cache.storedBuckets[bucket]
|
storedBucket := donut.storedBuckets[bucket]
|
||||||
for key := range storedBucket.objectMetadata {
|
for key := range storedBucket.objectMetadata {
|
||||||
if strings.HasPrefix(key, bucket+"/") {
|
if strings.HasPrefix(key, bucket+"/") {
|
||||||
key = key[len(bucket)+1:]
|
key = key[len(bucket)+1:]
|
||||||
@ -561,11 +576,11 @@ func (b byBucketName) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
|
|||||||
func (b byBucketName) Less(i, j int) bool { return b[i].Name < b[j].Name }
|
func (b byBucketName) Less(i, j int) bool { return b[i].Name < b[j].Name }
|
||||||
|
|
||||||
// ListBuckets - List buckets from cache
|
// ListBuckets - List buckets from cache
|
||||||
func (cache Cache) ListBuckets() ([]BucketMetadata, error) {
|
func (donut API) ListBuckets() ([]BucketMetadata, error) {
|
||||||
cache.lock.RLock()
|
donut.lock.RLock()
|
||||||
defer cache.lock.RUnlock()
|
defer donut.lock.RUnlock()
|
||||||
var results []BucketMetadata
|
var results []BucketMetadata
|
||||||
for _, bucket := range cache.storedBuckets {
|
for _, bucket := range donut.storedBuckets {
|
||||||
results = append(results, bucket.bucketMetadata)
|
results = append(results, bucket.bucketMetadata)
|
||||||
}
|
}
|
||||||
sort.Sort(byBucketName(results))
|
sort.Sort(byBucketName(results))
|
||||||
@ -573,50 +588,50 @@ func (cache Cache) ListBuckets() ([]BucketMetadata, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// GetObjectMetadata - get object metadata from cache
|
// GetObjectMetadata - get object metadata from cache
|
||||||
func (cache Cache) GetObjectMetadata(bucket, key string) (ObjectMetadata, error) {
|
func (donut API) GetObjectMetadata(bucket, key string) (ObjectMetadata, error) {
|
||||||
cache.lock.RLock()
|
donut.lock.RLock()
|
||||||
// check if bucket exists
|
// check if bucket exists
|
||||||
if !IsValidBucket(bucket) {
|
if !IsValidBucket(bucket) {
|
||||||
cache.lock.RUnlock()
|
donut.lock.RUnlock()
|
||||||
return ObjectMetadata{}, iodine.New(BucketNameInvalid{Bucket: bucket}, nil)
|
return ObjectMetadata{}, iodine.New(BucketNameInvalid{Bucket: bucket}, nil)
|
||||||
}
|
}
|
||||||
if !IsValidObjectName(key) {
|
if !IsValidObjectName(key) {
|
||||||
cache.lock.RUnlock()
|
donut.lock.RUnlock()
|
||||||
return ObjectMetadata{}, iodine.New(ObjectNameInvalid{Object: key}, nil)
|
return ObjectMetadata{}, iodine.New(ObjectNameInvalid{Object: key}, nil)
|
||||||
}
|
}
|
||||||
if _, ok := cache.storedBuckets[bucket]; ok == false {
|
if _, ok := donut.storedBuckets[bucket]; ok == false {
|
||||||
cache.lock.RUnlock()
|
donut.lock.RUnlock()
|
||||||
return ObjectMetadata{}, iodine.New(BucketNotFound{Bucket: bucket}, nil)
|
return ObjectMetadata{}, iodine.New(BucketNotFound{Bucket: bucket}, nil)
|
||||||
}
|
}
|
||||||
storedBucket := cache.storedBuckets[bucket]
|
storedBucket := donut.storedBuckets[bucket]
|
||||||
objectKey := bucket + "/" + key
|
objectKey := bucket + "/" + key
|
||||||
if objMetadata, ok := storedBucket.objectMetadata[objectKey]; ok == true {
|
if objMetadata, ok := storedBucket.objectMetadata[objectKey]; ok == true {
|
||||||
cache.lock.RUnlock()
|
donut.lock.RUnlock()
|
||||||
return objMetadata, nil
|
return objMetadata, nil
|
||||||
}
|
}
|
||||||
if cache.donut != nil {
|
if len(donut.config.NodeDiskMap) > 0 {
|
||||||
objMetadata, err := cache.donut.GetObjectMetadata(bucket, key)
|
objMetadata, err := donut.getObjectMetadata(bucket, key)
|
||||||
cache.lock.RUnlock()
|
donut.lock.RUnlock()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return ObjectMetadata{}, iodine.New(err, nil)
|
return ObjectMetadata{}, iodine.New(err, nil)
|
||||||
}
|
}
|
||||||
// update
|
// update
|
||||||
cache.lock.Lock()
|
donut.lock.Lock()
|
||||||
storedBucket.objectMetadata[objectKey] = objMetadata
|
storedBucket.objectMetadata[objectKey] = objMetadata
|
||||||
cache.lock.Unlock()
|
donut.lock.Unlock()
|
||||||
return objMetadata, nil
|
return objMetadata, nil
|
||||||
}
|
}
|
||||||
cache.lock.RUnlock()
|
donut.lock.RUnlock()
|
||||||
return ObjectMetadata{}, iodine.New(ObjectNotFound{Object: key}, nil)
|
return ObjectMetadata{}, iodine.New(ObjectNotFound{Object: key}, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cache Cache) expiredObject(a ...interface{}) {
|
func (donut API) expiredObject(a ...interface{}) {
|
||||||
cacheStats := cache.objects.Stats()
|
cacheStats := donut.objects.Stats()
|
||||||
log.Printf("CurrentSize: %d, CurrentItems: %d, TotalExpirations: %d",
|
log.Printf("CurrentSize: %d, CurrentItems: %d, TotalExpirations: %d",
|
||||||
cacheStats.Bytes, cacheStats.Items, cacheStats.Expired)
|
cacheStats.Bytes, cacheStats.Items, cacheStats.Expired)
|
||||||
key := a[0].(string)
|
key := a[0].(string)
|
||||||
// loop through all buckets
|
// loop through all buckets
|
||||||
for _, storedBucket := range cache.storedBuckets {
|
for _, storedBucket := range donut.storedBuckets {
|
||||||
delete(storedBucket.objectMetadata, key)
|
delete(storedBucket.objectMetadata, key)
|
||||||
}
|
}
|
||||||
debug.FreeOSMemory()
|
debug.FreeOSMemory()
|
@ -34,11 +34,20 @@ type MyCacheSuite struct{}
|
|||||||
|
|
||||||
var _ = Suite(&MyCacheSuite{})
|
var _ = Suite(&MyCacheSuite{})
|
||||||
|
|
||||||
var dc Cache
|
var dc Interface
|
||||||
|
|
||||||
func (s *MyCacheSuite) SetUpSuite(c *C) {
|
func (s *MyCacheSuite) SetUpSuite(c *C) {
|
||||||
// no donut this time
|
// test only cache
|
||||||
dc = NewCache(100000, time.Duration(1*time.Hour), "", nil)
|
conf := new(Config)
|
||||||
|
conf.DonutName = ""
|
||||||
|
conf.NodeDiskMap = nil
|
||||||
|
conf.Expiration = time.Duration(1 * time.Hour)
|
||||||
|
conf.MaxSize = 100000
|
||||||
|
|
||||||
|
var err error
|
||||||
|
dc, err = New(conf)
|
||||||
|
c.Assert(err, IsNil)
|
||||||
|
|
||||||
// testing empty cache
|
// testing empty cache
|
||||||
buckets, err := dc.ListBuckets()
|
buckets, err := dc.ListBuckets()
|
||||||
c.Assert(err, IsNil)
|
c.Assert(err, IsNil)
|
||||||
@ -118,7 +127,7 @@ func (s *MyCacheSuite) TestCreateMultipleBucketsAndList(c *C) {
|
|||||||
|
|
||||||
// test object create without bucket
|
// test object create without bucket
|
||||||
func (s *MyCacheSuite) TestNewObjectFailsWithoutBucket(c *C) {
|
func (s *MyCacheSuite) TestNewObjectFailsWithoutBucket(c *C) {
|
||||||
_, err := dc.CreateObject("unknown", "obj", "", "", 0, nil)
|
_, err := dc.CreateObject("unknown", "obj", "", 0, nil, nil)
|
||||||
c.Assert(err, Not(IsNil))
|
c.Assert(err, Not(IsNil))
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -133,7 +142,7 @@ func (s *MyCacheSuite) TestNewObjectMetadata(c *C) {
|
|||||||
err := dc.MakeBucket("foo6", "private")
|
err := dc.MakeBucket("foo6", "private")
|
||||||
c.Assert(err, IsNil)
|
c.Assert(err, IsNil)
|
||||||
|
|
||||||
objectMetadata, err := dc.CreateObject("foo6", "obj", "application/json", expectedMd5Sum, int64(len(data)), reader)
|
objectMetadata, err := dc.CreateObject("foo6", "obj", expectedMd5Sum, int64(len(data)), reader, map[string]string{"contentType": "application/json"})
|
||||||
c.Assert(err, IsNil)
|
c.Assert(err, IsNil)
|
||||||
c.Assert(objectMetadata.MD5Sum, Equals, hex.EncodeToString(hasher.Sum(nil)))
|
c.Assert(objectMetadata.MD5Sum, Equals, hex.EncodeToString(hasher.Sum(nil)))
|
||||||
c.Assert(objectMetadata.Metadata["contentType"], Equals, "application/json")
|
c.Assert(objectMetadata.Metadata["contentType"], Equals, "application/json")
|
||||||
@ -141,7 +150,7 @@ func (s *MyCacheSuite) TestNewObjectMetadata(c *C) {
|
|||||||
|
|
||||||
// test create object fails without name
|
// test create object fails without name
|
||||||
func (s *MyCacheSuite) TestNewObjectFailsWithEmptyName(c *C) {
|
func (s *MyCacheSuite) TestNewObjectFailsWithEmptyName(c *C) {
|
||||||
_, err := dc.CreateObject("foo", "", "", "", 0, nil)
|
_, err := dc.CreateObject("foo", "", "", 0, nil, nil)
|
||||||
c.Assert(err, Not(IsNil))
|
c.Assert(err, Not(IsNil))
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -157,7 +166,7 @@ func (s *MyCacheSuite) TestNewObjectCanBeWritten(c *C) {
|
|||||||
expectedMd5Sum := base64.StdEncoding.EncodeToString(hasher.Sum(nil))
|
expectedMd5Sum := base64.StdEncoding.EncodeToString(hasher.Sum(nil))
|
||||||
reader := ioutil.NopCloser(bytes.NewReader([]byte(data)))
|
reader := ioutil.NopCloser(bytes.NewReader([]byte(data)))
|
||||||
|
|
||||||
actualMetadata, err := dc.CreateObject("foo", "obj", "application/octet-stream", expectedMd5Sum, int64(len(data)), reader)
|
actualMetadata, err := dc.CreateObject("foo", "obj", expectedMd5Sum, int64(len(data)), reader, map[string]string{"contentType": "application/octet-stream"})
|
||||||
c.Assert(err, IsNil)
|
c.Assert(err, IsNil)
|
||||||
c.Assert(actualMetadata.MD5Sum, Equals, hex.EncodeToString(hasher.Sum(nil)))
|
c.Assert(actualMetadata.MD5Sum, Equals, hex.EncodeToString(hasher.Sum(nil)))
|
||||||
|
|
||||||
@ -179,11 +188,11 @@ func (s *MyCacheSuite) TestMultipleNewObjects(c *C) {
|
|||||||
|
|
||||||
one := ioutil.NopCloser(bytes.NewReader([]byte("one")))
|
one := ioutil.NopCloser(bytes.NewReader([]byte("one")))
|
||||||
|
|
||||||
_, err := dc.CreateObject("foo5", "obj1", "", "", int64(len("one")), one)
|
_, err := dc.CreateObject("foo5", "obj1", "", int64(len("one")), one, nil)
|
||||||
c.Assert(err, IsNil)
|
c.Assert(err, IsNil)
|
||||||
|
|
||||||
two := ioutil.NopCloser(bytes.NewReader([]byte("two")))
|
two := ioutil.NopCloser(bytes.NewReader([]byte("two")))
|
||||||
_, err = dc.CreateObject("foo5", "obj2", "", "", int64(len("two")), two)
|
_, err = dc.CreateObject("foo5", "obj2", "", int64(len("two")), two, nil)
|
||||||
c.Assert(err, IsNil)
|
c.Assert(err, IsNil)
|
||||||
|
|
||||||
var buffer1 bytes.Buffer
|
var buffer1 bytes.Buffer
|
||||||
@ -232,7 +241,7 @@ func (s *MyCacheSuite) TestMultipleNewObjects(c *C) {
|
|||||||
c.Assert(objectsMetadata[1].Object, Equals, "obj2")
|
c.Assert(objectsMetadata[1].Object, Equals, "obj2")
|
||||||
|
|
||||||
three := ioutil.NopCloser(bytes.NewReader([]byte("three")))
|
three := ioutil.NopCloser(bytes.NewReader([]byte("three")))
|
||||||
_, err = dc.CreateObject("foo5", "obj3", "", "", int64(len("three")), three)
|
_, err = dc.CreateObject("foo5", "obj3", "", int64(len("three")), three, nil)
|
||||||
c.Assert(err, IsNil)
|
c.Assert(err, IsNil)
|
||||||
|
|
||||||
var buffer bytes.Buffer
|
var buffer bytes.Buffer
|
@ -20,8 +20,8 @@ import "io"
|
|||||||
|
|
||||||
// Collection of Donut specification interfaces
|
// Collection of Donut specification interfaces
|
||||||
|
|
||||||
// Donut is a collection of object storage and management interface
|
// Interface is a collection of object storage and management interface
|
||||||
type Donut interface {
|
type Interface interface {
|
||||||
ObjectStorage
|
ObjectStorage
|
||||||
Management
|
Management
|
||||||
}
|
}
|
||||||
@ -31,16 +31,29 @@ type ObjectStorage interface {
|
|||||||
// Storage service operations
|
// Storage service operations
|
||||||
GetBucketMetadata(bucket string) (BucketMetadata, error)
|
GetBucketMetadata(bucket string) (BucketMetadata, error)
|
||||||
SetBucketMetadata(bucket string, metadata map[string]string) error
|
SetBucketMetadata(bucket string, metadata map[string]string) error
|
||||||
ListBuckets() (map[string]BucketMetadata, error)
|
ListBuckets() ([]BucketMetadata, error)
|
||||||
MakeBucket(bucket string, acl BucketACL) error
|
MakeBucket(bucket string, ACL string) error
|
||||||
|
|
||||||
// Bucket operations
|
// Bucket operations
|
||||||
ListObjects(bucket, prefix, marker, delim string, maxKeys int) (ListObjects, error)
|
ListObjects(bucket string, resources BucketResourcesMetadata) ([]ObjectMetadata, BucketResourcesMetadata, error)
|
||||||
|
|
||||||
// Object operations
|
// Object operations
|
||||||
GetObject(bucket, object string) (io.ReadCloser, int64, error)
|
GetObject(w io.Writer, bucket, object string) (int64, error)
|
||||||
|
GetPartialObject(w io.Writer, bucket, object string, start, length int64) (int64, error)
|
||||||
GetObjectMetadata(bucket, object string) (ObjectMetadata, error)
|
GetObjectMetadata(bucket, object string) (ObjectMetadata, error)
|
||||||
PutObject(bucket, object, expectedMD5Sum string, reader io.Reader, metadata map[string]string) (ObjectMetadata, error)
|
CreateObject(bucket, object, expectedMD5Sum string, size int64, reader io.Reader, metadata map[string]string) (ObjectMetadata, error)
|
||||||
|
|
||||||
|
Multipart
|
||||||
|
}
|
||||||
|
|
||||||
|
// Multipart API
|
||||||
|
type Multipart interface {
|
||||||
|
NewMultipartUpload(bucket, key, contentType string) (string, error)
|
||||||
|
AbortMultipartUpload(bucket, key, uploadID string) error
|
||||||
|
CreateObjectPart(bucket, key, uploadID string, partID int, contentType, expectedMD5Sum string, size int64, data io.Reader) (string, error)
|
||||||
|
CompleteMultipartUpload(bucket, key, uploadID string, parts map[int]string) (ObjectMetadata, error)
|
||||||
|
ListMultipartUploads(bucket string, resources BucketMultipartResourcesMetadata) (BucketMultipartResourcesMetadata, error)
|
||||||
|
ListObjectParts(bucket, key string, resources ObjectResourcesMetadata) (ObjectResourcesMetadata, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Management is a donut management system interface
|
// Management is a donut management system interface
|
||||||
|
@ -25,14 +25,14 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
// Heal - heal a donut and fix bad data blocks
|
// Heal - heal a donut and fix bad data blocks
|
||||||
func (dt donut) Heal() error {
|
func (donut API) Heal() error {
|
||||||
return iodine.New(NotImplemented{Function: "Heal"}, nil)
|
return iodine.New(NotImplemented{Function: "Heal"}, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Info - return info about donut configuration
|
// Info - return info about donut configuration
|
||||||
func (dt donut) Info() (nodeDiskMap map[string][]string, err error) {
|
func (donut API) Info() (nodeDiskMap map[string][]string, err error) {
|
||||||
nodeDiskMap = make(map[string][]string)
|
nodeDiskMap = make(map[string][]string)
|
||||||
for nodeName, node := range dt.nodes {
|
for nodeName, node := range donut.nodes {
|
||||||
disks, err := node.ListDisks()
|
disks, err := node.ListDisks()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, iodine.New(err, nil)
|
return nil, iodine.New(err, nil)
|
||||||
@ -47,7 +47,7 @@ func (dt donut) Info() (nodeDiskMap map[string][]string, err error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// AttachNode - attach node
|
// AttachNode - attach node
|
||||||
func (dt donut) AttachNode(hostname string, disks []string) error {
|
func (donut API) AttachNode(hostname string, disks []string) error {
|
||||||
if hostname == "" || len(disks) == 0 {
|
if hostname == "" || len(disks) == 0 {
|
||||||
return iodine.New(InvalidArgument{}, nil)
|
return iodine.New(InvalidArgument{}, nil)
|
||||||
}
|
}
|
||||||
@ -55,13 +55,13 @@ func (dt donut) AttachNode(hostname string, disks []string) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return iodine.New(err, nil)
|
return iodine.New(err, nil)
|
||||||
}
|
}
|
||||||
dt.nodes[hostname] = node
|
donut.nodes[hostname] = node
|
||||||
for i, d := range disks {
|
for i, d := range disks {
|
||||||
newDisk, err := disk.New(d)
|
newDisk, err := disk.New(d)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return iodine.New(err, nil)
|
return iodine.New(err, nil)
|
||||||
}
|
}
|
||||||
if err := newDisk.MakeDir(dt.name); err != nil {
|
if err := newDisk.MakeDir(donut.config.DonutName); err != nil {
|
||||||
return iodine.New(err, nil)
|
return iodine.New(err, nil)
|
||||||
}
|
}
|
||||||
if err := node.AttachDisk(newDisk, i); err != nil {
|
if err := node.AttachDisk(newDisk, i); err != nil {
|
||||||
@ -72,21 +72,21 @@ func (dt donut) AttachNode(hostname string, disks []string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// DetachNode - detach node
|
// DetachNode - detach node
|
||||||
func (dt donut) DetachNode(hostname string) error {
|
func (donut API) DetachNode(hostname string) error {
|
||||||
delete(dt.nodes, hostname)
|
delete(donut.nodes, hostname)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// SaveConfig - save donut configuration
|
// SaveConfig - save donut configuration
|
||||||
func (dt donut) SaveConfig() error {
|
func (donut API) SaveConfig() error {
|
||||||
nodeDiskMap := make(map[string][]string)
|
nodeDiskMap := make(map[string][]string)
|
||||||
for hostname, node := range dt.nodes {
|
for hostname, node := range donut.nodes {
|
||||||
disks, err := node.ListDisks()
|
disks, err := node.ListDisks()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return iodine.New(err, nil)
|
return iodine.New(err, nil)
|
||||||
}
|
}
|
||||||
for order, disk := range disks {
|
for order, disk := range disks {
|
||||||
donutConfigPath := filepath.Join(dt.name, donutConfig)
|
donutConfigPath := filepath.Join(donut.config.DonutName, donutConfig)
|
||||||
donutConfigWriter, err := disk.CreateFile(donutConfigPath)
|
donutConfigWriter, err := disk.CreateFile(donutConfigPath)
|
||||||
defer donutConfigWriter.Close()
|
defer donutConfigWriter.Close()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -103,6 +103,6 @@ func (dt donut) SaveConfig() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// LoadConfig - load configuration
|
// LoadConfig - load configuration
|
||||||
func (dt donut) LoadConfig() error {
|
func (donut API) LoadConfig() error {
|
||||||
return iodine.New(NotImplemented{Function: "LoadConfig"}, nil)
|
return iodine.New(NotImplemented{Function: "LoadConfig"}, nil)
|
||||||
}
|
}
|
||||||
|
@ -35,55 +35,55 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
// NewMultipartUpload -
|
// NewMultipartUpload -
|
||||||
func (cache Cache) NewMultipartUpload(bucket, key, contentType string) (string, error) {
|
func (donut API) NewMultipartUpload(bucket, key, contentType string) (string, error) {
|
||||||
cache.lock.RLock()
|
donut.lock.RLock()
|
||||||
if !IsValidBucket(bucket) {
|
if !IsValidBucket(bucket) {
|
||||||
cache.lock.RUnlock()
|
donut.lock.RUnlock()
|
||||||
return "", iodine.New(BucketNameInvalid{Bucket: bucket}, nil)
|
return "", iodine.New(BucketNameInvalid{Bucket: bucket}, nil)
|
||||||
}
|
}
|
||||||
if !IsValidObjectName(key) {
|
if !IsValidObjectName(key) {
|
||||||
cache.lock.RUnlock()
|
donut.lock.RUnlock()
|
||||||
return "", iodine.New(ObjectNameInvalid{Object: key}, nil)
|
return "", iodine.New(ObjectNameInvalid{Object: key}, nil)
|
||||||
}
|
}
|
||||||
if _, ok := cache.storedBuckets[bucket]; ok == false {
|
if _, ok := donut.storedBuckets[bucket]; ok == false {
|
||||||
cache.lock.RUnlock()
|
donut.lock.RUnlock()
|
||||||
return "", iodine.New(BucketNotFound{Bucket: bucket}, nil)
|
return "", iodine.New(BucketNotFound{Bucket: bucket}, nil)
|
||||||
}
|
}
|
||||||
storedBucket := cache.storedBuckets[bucket]
|
storedBucket := donut.storedBuckets[bucket]
|
||||||
objectKey := bucket + "/" + key
|
objectKey := bucket + "/" + key
|
||||||
if _, ok := storedBucket.objectMetadata[objectKey]; ok == true {
|
if _, ok := storedBucket.objectMetadata[objectKey]; ok == true {
|
||||||
cache.lock.RUnlock()
|
donut.lock.RUnlock()
|
||||||
return "", iodine.New(ObjectExists{Object: key}, nil)
|
return "", iodine.New(ObjectExists{Object: key}, nil)
|
||||||
}
|
}
|
||||||
cache.lock.RUnlock()
|
donut.lock.RUnlock()
|
||||||
|
|
||||||
cache.lock.Lock()
|
donut.lock.Lock()
|
||||||
id := []byte(strconv.FormatInt(rand.Int63(), 10) + bucket + key + time.Now().String())
|
id := []byte(strconv.FormatInt(rand.Int63(), 10) + bucket + key + time.Now().String())
|
||||||
uploadIDSum := sha512.Sum512(id)
|
uploadIDSum := sha512.Sum512(id)
|
||||||
uploadID := base64.URLEncoding.EncodeToString(uploadIDSum[:])[:47]
|
uploadID := base64.URLEncoding.EncodeToString(uploadIDSum[:])[:47]
|
||||||
|
|
||||||
cache.storedBuckets[bucket].multiPartSession[key] = multiPartSession{
|
donut.storedBuckets[bucket].multiPartSession[key] = multiPartSession{
|
||||||
uploadID: uploadID,
|
uploadID: uploadID,
|
||||||
initiated: time.Now(),
|
initiated: time.Now(),
|
||||||
totalParts: 0,
|
totalParts: 0,
|
||||||
}
|
}
|
||||||
cache.lock.Unlock()
|
donut.lock.Unlock()
|
||||||
|
|
||||||
return uploadID, nil
|
return uploadID, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// AbortMultipartUpload -
|
// AbortMultipartUpload -
|
||||||
func (cache Cache) AbortMultipartUpload(bucket, key, uploadID string) error {
|
func (donut API) AbortMultipartUpload(bucket, key, uploadID string) error {
|
||||||
cache.lock.RLock()
|
donut.lock.RLock()
|
||||||
storedBucket := cache.storedBuckets[bucket]
|
storedBucket := donut.storedBuckets[bucket]
|
||||||
if storedBucket.multiPartSession[key].uploadID != uploadID {
|
if storedBucket.multiPartSession[key].uploadID != uploadID {
|
||||||
cache.lock.RUnlock()
|
donut.lock.RUnlock()
|
||||||
return iodine.New(InvalidUploadID{UploadID: uploadID}, nil)
|
return iodine.New(InvalidUploadID{UploadID: uploadID}, nil)
|
||||||
}
|
}
|
||||||
cache.lock.RUnlock()
|
donut.lock.RUnlock()
|
||||||
|
|
||||||
cache.cleanupMultiparts(bucket, key, uploadID)
|
donut.cleanupMultiparts(bucket, key, uploadID)
|
||||||
cache.cleanupMultipartSession(bucket, key, uploadID)
|
donut.cleanupMultipartSession(bucket, key, uploadID)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -92,17 +92,17 @@ func getMultipartKey(key string, uploadID string, partNumber int) string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// CreateObjectPart -
|
// CreateObjectPart -
|
||||||
func (cache Cache) CreateObjectPart(bucket, key, uploadID string, partID int, contentType, expectedMD5Sum string, size int64, data io.Reader) (string, error) {
|
func (donut API) CreateObjectPart(bucket, key, uploadID string, partID int, contentType, expectedMD5Sum string, size int64, data io.Reader) (string, error) {
|
||||||
// Verify upload id
|
// Verify upload id
|
||||||
cache.lock.RLock()
|
donut.lock.RLock()
|
||||||
storedBucket := cache.storedBuckets[bucket]
|
storedBucket := donut.storedBuckets[bucket]
|
||||||
if storedBucket.multiPartSession[key].uploadID != uploadID {
|
if storedBucket.multiPartSession[key].uploadID != uploadID {
|
||||||
cache.lock.RUnlock()
|
donut.lock.RUnlock()
|
||||||
return "", iodine.New(InvalidUploadID{UploadID: uploadID}, nil)
|
return "", iodine.New(InvalidUploadID{UploadID: uploadID}, nil)
|
||||||
}
|
}
|
||||||
cache.lock.RUnlock()
|
donut.lock.RUnlock()
|
||||||
|
|
||||||
etag, err := cache.createObjectPart(bucket, key, uploadID, partID, "", expectedMD5Sum, size, data)
|
etag, err := donut.createObjectPart(bucket, key, uploadID, partID, "", expectedMD5Sum, size, data)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", iodine.New(err, nil)
|
return "", iodine.New(err, nil)
|
||||||
}
|
}
|
||||||
@ -112,28 +112,28 @@ func (cache Cache) CreateObjectPart(bucket, key, uploadID string, partID int, co
|
|||||||
}
|
}
|
||||||
|
|
||||||
// createObject - PUT object to cache buffer
|
// createObject - PUT object to cache buffer
|
||||||
func (cache Cache) createObjectPart(bucket, key, uploadID string, partID int, contentType, expectedMD5Sum string, size int64, data io.Reader) (string, error) {
|
func (donut API) createObjectPart(bucket, key, uploadID string, partID int, contentType, expectedMD5Sum string, size int64, data io.Reader) (string, error) {
|
||||||
cache.lock.RLock()
|
donut.lock.RLock()
|
||||||
if !IsValidBucket(bucket) {
|
if !IsValidBucket(bucket) {
|
||||||
cache.lock.RUnlock()
|
donut.lock.RUnlock()
|
||||||
return "", iodine.New(BucketNameInvalid{Bucket: bucket}, nil)
|
return "", iodine.New(BucketNameInvalid{Bucket: bucket}, nil)
|
||||||
}
|
}
|
||||||
if !IsValidObjectName(key) {
|
if !IsValidObjectName(key) {
|
||||||
cache.lock.RUnlock()
|
donut.lock.RUnlock()
|
||||||
return "", iodine.New(ObjectNameInvalid{Object: key}, nil)
|
return "", iodine.New(ObjectNameInvalid{Object: key}, nil)
|
||||||
}
|
}
|
||||||
if _, ok := cache.storedBuckets[bucket]; ok == false {
|
if _, ok := donut.storedBuckets[bucket]; ok == false {
|
||||||
cache.lock.RUnlock()
|
donut.lock.RUnlock()
|
||||||
return "", iodine.New(BucketNotFound{Bucket: bucket}, nil)
|
return "", iodine.New(BucketNotFound{Bucket: bucket}, nil)
|
||||||
}
|
}
|
||||||
storedBucket := cache.storedBuckets[bucket]
|
storedBucket := donut.storedBuckets[bucket]
|
||||||
// get object key
|
// get object key
|
||||||
partKey := bucket + "/" + getMultipartKey(key, uploadID, partID)
|
partKey := bucket + "/" + getMultipartKey(key, uploadID, partID)
|
||||||
if _, ok := storedBucket.partMetadata[partKey]; ok == true {
|
if _, ok := storedBucket.partMetadata[partKey]; ok == true {
|
||||||
cache.lock.RUnlock()
|
donut.lock.RUnlock()
|
||||||
return storedBucket.partMetadata[partKey].ETag, nil
|
return storedBucket.partMetadata[partKey].ETag, nil
|
||||||
}
|
}
|
||||||
cache.lock.RUnlock()
|
donut.lock.RUnlock()
|
||||||
|
|
||||||
if contentType == "" {
|
if contentType == "" {
|
||||||
contentType = "application/octet-stream"
|
contentType = "application/octet-stream"
|
||||||
@ -172,9 +172,9 @@ func (cache Cache) createObjectPart(bucket, key, uploadID string, partID int, co
|
|||||||
md5SumBytes := hash.Sum(nil)
|
md5SumBytes := hash.Sum(nil)
|
||||||
totalLength := int64(len(readBytes))
|
totalLength := int64(len(readBytes))
|
||||||
|
|
||||||
cache.lock.Lock()
|
donut.lock.Lock()
|
||||||
cache.multiPartObjects.Set(partKey, readBytes)
|
donut.multiPartObjects.Set(partKey, readBytes)
|
||||||
cache.lock.Unlock()
|
donut.lock.Unlock()
|
||||||
// setting up for de-allocation
|
// setting up for de-allocation
|
||||||
readBytes = nil
|
readBytes = nil
|
||||||
|
|
||||||
@ -192,32 +192,32 @@ func (cache Cache) createObjectPart(bucket, key, uploadID string, partID int, co
|
|||||||
Size: totalLength,
|
Size: totalLength,
|
||||||
}
|
}
|
||||||
|
|
||||||
cache.lock.Lock()
|
donut.lock.Lock()
|
||||||
storedBucket.partMetadata[partKey] = newPart
|
storedBucket.partMetadata[partKey] = newPart
|
||||||
multiPartSession := storedBucket.multiPartSession[key]
|
multiPartSession := storedBucket.multiPartSession[key]
|
||||||
multiPartSession.totalParts++
|
multiPartSession.totalParts++
|
||||||
storedBucket.multiPartSession[key] = multiPartSession
|
storedBucket.multiPartSession[key] = multiPartSession
|
||||||
cache.storedBuckets[bucket] = storedBucket
|
donut.storedBuckets[bucket] = storedBucket
|
||||||
cache.lock.Unlock()
|
donut.lock.Unlock()
|
||||||
|
|
||||||
return md5Sum, nil
|
return md5Sum, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cache Cache) cleanupMultipartSession(bucket, key, uploadID string) {
|
func (donut API) cleanupMultipartSession(bucket, key, uploadID string) {
|
||||||
cache.lock.Lock()
|
donut.lock.Lock()
|
||||||
defer cache.lock.Unlock()
|
defer donut.lock.Unlock()
|
||||||
delete(cache.storedBuckets[bucket].multiPartSession, key)
|
delete(donut.storedBuckets[bucket].multiPartSession, key)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cache Cache) cleanupMultiparts(bucket, key, uploadID string) {
|
func (donut API) cleanupMultiparts(bucket, key, uploadID string) {
|
||||||
for i := 1; i <= cache.storedBuckets[bucket].multiPartSession[key].totalParts; i++ {
|
for i := 1; i <= donut.storedBuckets[bucket].multiPartSession[key].totalParts; i++ {
|
||||||
objectKey := bucket + "/" + getMultipartKey(key, uploadID, i)
|
objectKey := bucket + "/" + getMultipartKey(key, uploadID, i)
|
||||||
cache.multiPartObjects.Delete(objectKey)
|
donut.multiPartObjects.Delete(objectKey)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// CompleteMultipartUpload -
|
// CompleteMultipartUpload -
|
||||||
func (cache Cache) CompleteMultipartUpload(bucket, key, uploadID string, parts map[int]string) (ObjectMetadata, error) {
|
func (donut API) CompleteMultipartUpload(bucket, key, uploadID string, parts map[int]string) (ObjectMetadata, error) {
|
||||||
if !IsValidBucket(bucket) {
|
if !IsValidBucket(bucket) {
|
||||||
return ObjectMetadata{}, iodine.New(BucketNameInvalid{Bucket: bucket}, nil)
|
return ObjectMetadata{}, iodine.New(BucketNameInvalid{Bucket: bucket}, nil)
|
||||||
}
|
}
|
||||||
@ -225,26 +225,26 @@ func (cache Cache) CompleteMultipartUpload(bucket, key, uploadID string, parts m
|
|||||||
return ObjectMetadata{}, iodine.New(ObjectNameInvalid{Object: key}, nil)
|
return ObjectMetadata{}, iodine.New(ObjectNameInvalid{Object: key}, nil)
|
||||||
}
|
}
|
||||||
// Verify upload id
|
// Verify upload id
|
||||||
cache.lock.RLock()
|
donut.lock.RLock()
|
||||||
if _, ok := cache.storedBuckets[bucket]; ok == false {
|
if _, ok := donut.storedBuckets[bucket]; ok == false {
|
||||||
cache.lock.RUnlock()
|
donut.lock.RUnlock()
|
||||||
return ObjectMetadata{}, iodine.New(BucketNotFound{Bucket: bucket}, nil)
|
return ObjectMetadata{}, iodine.New(BucketNotFound{Bucket: bucket}, nil)
|
||||||
}
|
}
|
||||||
storedBucket := cache.storedBuckets[bucket]
|
storedBucket := donut.storedBuckets[bucket]
|
||||||
if storedBucket.multiPartSession[key].uploadID != uploadID {
|
if storedBucket.multiPartSession[key].uploadID != uploadID {
|
||||||
cache.lock.RUnlock()
|
donut.lock.RUnlock()
|
||||||
return ObjectMetadata{}, iodine.New(InvalidUploadID{UploadID: uploadID}, nil)
|
return ObjectMetadata{}, iodine.New(InvalidUploadID{UploadID: uploadID}, nil)
|
||||||
}
|
}
|
||||||
cache.lock.RUnlock()
|
donut.lock.RUnlock()
|
||||||
|
|
||||||
cache.lock.Lock()
|
donut.lock.Lock()
|
||||||
var size int64
|
var size int64
|
||||||
var fullObject bytes.Buffer
|
var fullObject bytes.Buffer
|
||||||
for i := 1; i <= len(parts); i++ {
|
for i := 1; i <= len(parts); i++ {
|
||||||
recvMD5 := parts[i]
|
recvMD5 := parts[i]
|
||||||
object, ok := cache.multiPartObjects.Get(bucket + "/" + getMultipartKey(key, uploadID, i))
|
object, ok := donut.multiPartObjects.Get(bucket + "/" + getMultipartKey(key, uploadID, i))
|
||||||
if ok == false {
|
if ok == false {
|
||||||
cache.lock.Unlock()
|
donut.lock.Unlock()
|
||||||
return ObjectMetadata{}, iodine.New(errors.New("missing part: "+strconv.Itoa(i)), nil)
|
return ObjectMetadata{}, iodine.New(errors.New("missing part: "+strconv.Itoa(i)), nil)
|
||||||
}
|
}
|
||||||
size += int64(len(object))
|
size += int64(len(object))
|
||||||
@ -264,20 +264,20 @@ func (cache Cache) CompleteMultipartUpload(bucket, key, uploadID string, parts m
|
|||||||
object = nil
|
object = nil
|
||||||
go debug.FreeOSMemory()
|
go debug.FreeOSMemory()
|
||||||
}
|
}
|
||||||
cache.lock.Unlock()
|
donut.lock.Unlock()
|
||||||
|
|
||||||
md5sumSlice := md5.Sum(fullObject.Bytes())
|
md5sumSlice := md5.Sum(fullObject.Bytes())
|
||||||
// this is needed for final verification inside CreateObject, do not convert this to hex
|
// this is needed for final verification inside CreateObject, do not convert this to hex
|
||||||
md5sum := base64.StdEncoding.EncodeToString(md5sumSlice[:])
|
md5sum := base64.StdEncoding.EncodeToString(md5sumSlice[:])
|
||||||
objectMetadata, err := cache.CreateObject(bucket, key, "", md5sum, size, &fullObject)
|
objectMetadata, err := donut.CreateObject(bucket, key, md5sum, size, &fullObject, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// No need to call internal cleanup functions here, caller will call AbortMultipartUpload()
|
// No need to call internal cleanup functions here, caller will call AbortMultipartUpload()
|
||||||
// which would in-turn cleanup properly in accordance with S3 Spec
|
// which would in-turn cleanup properly in accordance with S3 Spec
|
||||||
return ObjectMetadata{}, iodine.New(err, nil)
|
return ObjectMetadata{}, iodine.New(err, nil)
|
||||||
}
|
}
|
||||||
fullObject.Reset()
|
fullObject.Reset()
|
||||||
cache.cleanupMultiparts(bucket, key, uploadID)
|
donut.cleanupMultiparts(bucket, key, uploadID)
|
||||||
cache.cleanupMultipartSession(bucket, key, uploadID)
|
donut.cleanupMultipartSession(bucket, key, uploadID)
|
||||||
return objectMetadata, nil
|
return objectMetadata, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -289,14 +289,14 @@ func (a byKey) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
|||||||
func (a byKey) Less(i, j int) bool { return a[i].Key < a[j].Key }
|
func (a byKey) Less(i, j int) bool { return a[i].Key < a[j].Key }
|
||||||
|
|
||||||
// ListMultipartUploads -
|
// ListMultipartUploads -
|
||||||
func (cache Cache) ListMultipartUploads(bucket string, resources BucketMultipartResourcesMetadata) (BucketMultipartResourcesMetadata, error) {
|
func (donut API) ListMultipartUploads(bucket string, resources BucketMultipartResourcesMetadata) (BucketMultipartResourcesMetadata, error) {
|
||||||
// TODO handle delimiter
|
// TODO handle delimiter
|
||||||
cache.lock.RLock()
|
donut.lock.RLock()
|
||||||
defer cache.lock.RUnlock()
|
defer donut.lock.RUnlock()
|
||||||
if _, ok := cache.storedBuckets[bucket]; ok == false {
|
if _, ok := donut.storedBuckets[bucket]; ok == false {
|
||||||
return BucketMultipartResourcesMetadata{}, iodine.New(BucketNotFound{Bucket: bucket}, nil)
|
return BucketMultipartResourcesMetadata{}, iodine.New(BucketNotFound{Bucket: bucket}, nil)
|
||||||
}
|
}
|
||||||
storedBucket := cache.storedBuckets[bucket]
|
storedBucket := donut.storedBuckets[bucket]
|
||||||
var uploads []*UploadMetadata
|
var uploads []*UploadMetadata
|
||||||
|
|
||||||
for key, session := range storedBucket.multiPartSession {
|
for key, session := range storedBucket.multiPartSession {
|
||||||
@ -351,14 +351,14 @@ func (a partNumber) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
|||||||
func (a partNumber) Less(i, j int) bool { return a[i].PartNumber < a[j].PartNumber }
|
func (a partNumber) Less(i, j int) bool { return a[i].PartNumber < a[j].PartNumber }
|
||||||
|
|
||||||
// ListObjectParts -
|
// ListObjectParts -
|
||||||
func (cache Cache) ListObjectParts(bucket, key string, resources ObjectResourcesMetadata) (ObjectResourcesMetadata, error) {
|
func (donut API) ListObjectParts(bucket, key string, resources ObjectResourcesMetadata) (ObjectResourcesMetadata, error) {
|
||||||
// Verify upload id
|
// Verify upload id
|
||||||
cache.lock.RLock()
|
donut.lock.RLock()
|
||||||
defer cache.lock.RUnlock()
|
defer donut.lock.RUnlock()
|
||||||
if _, ok := cache.storedBuckets[bucket]; ok == false {
|
if _, ok := donut.storedBuckets[bucket]; ok == false {
|
||||||
return ObjectResourcesMetadata{}, iodine.New(BucketNotFound{Bucket: bucket}, nil)
|
return ObjectResourcesMetadata{}, iodine.New(BucketNotFound{Bucket: bucket}, nil)
|
||||||
}
|
}
|
||||||
storedBucket := cache.storedBuckets[bucket]
|
storedBucket := donut.storedBuckets[bucket]
|
||||||
if _, ok := storedBucket.multiPartSession[key]; ok == false {
|
if _, ok := storedBucket.multiPartSession[key]; ok == false {
|
||||||
return ObjectResourcesMetadata{}, iodine.New(ObjectNotFound{Object: key}, nil)
|
return ObjectResourcesMetadata{}, iodine.New(ObjectNotFound{Object: key}, nil)
|
||||||
}
|
}
|
||||||
@ -395,10 +395,10 @@ func (cache Cache) ListObjectParts(bucket, key string, resources ObjectResources
|
|||||||
return objectResourcesMetadata, nil
|
return objectResourcesMetadata, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cache Cache) expiredPart(a ...interface{}) {
|
func (donut API) expiredPart(a ...interface{}) {
|
||||||
key := a[0].(string)
|
key := a[0].(string)
|
||||||
// loop through all buckets
|
// loop through all buckets
|
||||||
for _, storedBucket := range cache.storedBuckets {
|
for _, storedBucket := range donut.storedBuckets {
|
||||||
delete(storedBucket.partMetadata, key)
|
delete(storedBucket.partMetadata, key)
|
||||||
}
|
}
|
||||||
debug.FreeOSMemory()
|
debug.FreeOSMemory()
|
@ -26,11 +26,11 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
// Rebalance -
|
// Rebalance -
|
||||||
func (d donut) Rebalance() error {
|
func (donut API) Rebalance() error {
|
||||||
var totalOffSetLength int
|
var totalOffSetLength int
|
||||||
var newDisks []disk.Disk
|
var newDisks []disk.Disk
|
||||||
var existingDirs []os.FileInfo
|
var existingDirs []os.FileInfo
|
||||||
for _, node := range d.nodes {
|
for _, node := range donut.nodes {
|
||||||
disks, err := node.ListDisks()
|
disks, err := node.ListDisks()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return iodine.New(err, nil)
|
return iodine.New(err, nil)
|
||||||
@ -38,7 +38,7 @@ func (d donut) Rebalance() error {
|
|||||||
totalOffSetLength = len(disks)
|
totalOffSetLength = len(disks)
|
||||||
fmt.Println(totalOffSetLength)
|
fmt.Println(totalOffSetLength)
|
||||||
for _, disk := range disks {
|
for _, disk := range disks {
|
||||||
dirs, err := disk.ListDir(d.name)
|
dirs, err := disk.ListDir(donut.config.DonutName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return iodine.New(err, nil)
|
return iodine.New(err, nil)
|
||||||
}
|
}
|
||||||
|
Loading…
x
Reference in New Issue
Block a user