Merge pull request #707 from harshavardhana/pr_out_fix_another_deadlock_inside_createobjectpart_code_premature_return_without_unlocking_

This commit is contained in:
Harshavardhana 2015-07-06 03:29:25 +00:00
commit a74a2db8f0
7 changed files with 86 additions and 53 deletions

View File

@ -36,7 +36,7 @@ type Cache struct {
items *list.List
// reverseItems holds the time that related item's updated at
reverseItems map[string]*list.Element
reverseItems map[interface{}]*list.Element
// maxSize is a total size for overall cache
maxSize uint64
@ -59,7 +59,7 @@ type Stats struct {
}
type element struct {
key string
key interface{}
value []byte
}
@ -70,7 +70,7 @@ type element struct {
func NewCache(maxSize uint64) *Cache {
return &Cache{
items: list.New(),
reverseItems: make(map[string]*list.Element),
reverseItems: make(map[interface{}]*list.Element),
maxSize: maxSize,
}
}
@ -85,7 +85,7 @@ func (r *Cache) Stats() Stats {
}
// Get returns a value of a given key if it exists
func (r *Cache) Get(key string) ([]byte, bool) {
func (r *Cache) Get(key interface{}) ([]byte, bool) {
r.Lock()
defer r.Unlock()
ele, hit := r.reverseItems[key]
@ -97,7 +97,7 @@ func (r *Cache) Get(key string) ([]byte, bool) {
}
// Len returns length of the value of a given key, returns zero if key doesn't exist
func (r *Cache) Len(key string) int {
func (r *Cache) Len(key interface{}) int {
r.Lock()
defer r.Unlock()
_, ok := r.reverseItems[key]
@ -109,7 +109,7 @@ func (r *Cache) Len(key string) int {
// Append will append new data to an existing key,
// if key doesn't exist it behaves like Set()
func (r *Cache) Append(key string, value []byte) bool {
func (r *Cache) Append(key interface{}, value []byte) bool {
r.Lock()
defer r.Unlock()
valueLen := uint64(len(value))
@ -139,7 +139,7 @@ func (r *Cache) Append(key string, value []byte) bool {
}
// Set will persist a value to the cache
func (r *Cache) Set(key string, value []byte) bool {
func (r *Cache) Set(key interface{}, value []byte) bool {
r.Lock()
defer r.Unlock()
valueLen := uint64(len(value))
@ -164,7 +164,7 @@ func (r *Cache) Set(key string, value []byte) bool {
}
// Delete deletes a given key if exists
func (r *Cache) Delete(key string) {
func (r *Cache) Delete(key interface{}) {
r.Lock()
defer r.Unlock()
ele, ok := r.reverseItems[key]

View File

@ -19,7 +19,6 @@ package donut
import (
"os/user"
"path/filepath"
"time"
"github.com/minio/minio/pkg/iodine"
"github.com/minio/minio/pkg/quick"
@ -76,7 +75,6 @@ func LoadDonut() (Interface, error) {
conf = &Config{
Version: "0.0.1",
MaxSize: 512000000,
Expiration: 1 * time.Hour,
}
}
donut, err := New(conf)

View File

@ -26,7 +26,6 @@ import (
"path/filepath"
"strconv"
"testing"
"time"
. "github.com/minio/check"
)
@ -65,7 +64,6 @@ func (s *MyDonutSuite) SetUpSuite(c *C) {
conf := new(Config)
conf.DonutName = "test"
conf.NodeDiskMap = createTestNodeDiskMap(root)
conf.Expiration = time.Duration(1 * time.Hour)
conf.MaxSize = 100000
dd, err = New(conf)

View File

@ -47,7 +47,6 @@ const (
type Config struct {
Version string `json:"version"`
MaxSize uint64 `json:"max-size"`
Expiration time.Duration `json:"expiration"`
DonutName string `json:"donut-name"`
NodeDiskMap map[string][]string `json:"node-disk-map"`
}
@ -67,7 +66,7 @@ type API struct {
type storedBucket struct {
bucketMetadata BucketMetadata
objectMetadata map[string]ObjectMetadata
partMetadata map[string]PartMetadata
partMetadata map[int]PartMetadata
multiPartSession map[string]MultiPartSession
}
@ -270,7 +269,7 @@ func isMD5SumEqual(expectedMD5Sum, actualMD5Sum string) error {
return iodine.New(errors.New("invalid argument"), nil)
}
// CreateObject -
// CreateObject - create an object
func (donut API) CreateObject(bucket, key, expectedMD5Sum string, size int64, data io.Reader, metadata map[string]string) (ObjectMetadata, error) {
if size > int64(donut.config.MaxSize) {
generic := GenericObjectError{Bucket: bucket, Object: key}
@ -410,7 +409,7 @@ func (donut API) MakeBucket(bucketName, acl string) error {
var newBucket = storedBucket{}
newBucket.objectMetadata = make(map[string]ObjectMetadata)
newBucket.multiPartSession = make(map[string]MultiPartSession)
newBucket.partMetadata = make(map[string]PartMetadata)
newBucket.partMetadata = make(map[int]PartMetadata)
newBucket.bucketMetadata = BucketMetadata{}
newBucket.bucketMetadata.Name = bucketName
newBucket.bucketMetadata.Created = time.Now().UTC()
@ -567,6 +566,7 @@ func (donut API) GetObjectMetadata(bucket, key string) (ObjectMetadata, error) {
return ObjectMetadata{}, iodine.New(ObjectNotFound{Object: key}, nil)
}
// evictedObject callback function called when an item is evicted from memory
func (donut API) evictedObject(a ...interface{}) {
cacheStats := donut.objects.Stats()
log.Printf("CurrentSize: %d, CurrentItems: %d, TotalEvicted: %d",

View File

@ -23,7 +23,6 @@ import (
"encoding/hex"
"io/ioutil"
"testing"
"time"
. "github.com/minio/check"
)
@ -41,7 +40,6 @@ func (s *MyCacheSuite) SetUpSuite(c *C) {
conf := new(Config)
conf.DonutName = ""
conf.NodeDiskMap = nil
conf.Expiration = time.Duration(1 * time.Hour)
conf.MaxSize = 100000
var err error

View File

@ -34,7 +34,7 @@ import (
"github.com/minio/minio/pkg/iodine"
)
// NewMultipartUpload -
// NewMultipartUpload - initiate a new multipart session
func (donut API) NewMultipartUpload(bucket, key, contentType string) (string, error) {
donut.lock.Lock()
defer donut.lock.Unlock()
@ -65,7 +65,7 @@ func (donut API) NewMultipartUpload(bucket, key, contentType string) (string, er
return uploadID, nil
}
// AbortMultipartUpload -
// AbortMultipartUpload - abort an incomplete multipart session
func (donut API) AbortMultipartUpload(bucket, key, uploadID string) error {
donut.lock.Lock()
defer donut.lock.Unlock()
@ -83,11 +83,7 @@ func (donut API) AbortMultipartUpload(bucket, key, uploadID string) error {
return nil
}
func getMultipartKey(key string, uploadID string, partNumber int) string {
return key + "?uploadId=" + uploadID + "&partNumber=" + strconv.Itoa(partNumber)
}
// CreateObjectPart -
// CreateObjectPart - create a part in a multipart session
func (donut API) CreateObjectPart(bucket, key, uploadID string, partID int, contentType, expectedMD5Sum string, size int64, data io.Reader) (string, error) {
if !IsValidBucket(bucket) {
return "", iodine.New(BucketNameInvalid{Bucket: bucket}, nil)
@ -98,22 +94,15 @@ func (donut API) CreateObjectPart(bucket, key, uploadID string, partID int, cont
if !donut.storedBuckets.Exists(bucket) {
return "", iodine.New(BucketNotFound{Bucket: bucket}, nil)
}
var etag string
var err error
{
donut.lock.Lock()
etag, err = donut.createObjectPart(bucket, key, uploadID, partID, "", expectedMD5Sum, size, data)
if err != nil {
return "", iodine.New(err, nil)
}
etag, err := donut.createObjectPart(bucket, key, uploadID, partID, "", expectedMD5Sum, size, data)
donut.lock.Unlock()
}
// possible free
debug.FreeOSMemory()
return etag, nil
return etag, iodine.New(err, nil)
}
// createObject - PUT object to cache buffer
// createObject - internal wrapper function called by CreateObjectPart
func (donut API) createObjectPart(bucket, key, uploadID string, partID int, contentType, expectedMD5Sum string, size int64, data io.Reader) (string, error) {
storedBucket := donut.storedBuckets.Get(bucket).(storedBucket)
// Verify upload id
@ -122,9 +111,8 @@ func (donut API) createObjectPart(bucket, key, uploadID string, partID int, cont
}
// get object key
partKey := bucket + "/" + getMultipartKey(key, uploadID, partID)
if _, ok := storedBucket.partMetadata[partKey]; ok == true {
return storedBucket.partMetadata[partKey].ETag, nil
if _, ok := storedBucket.partMetadata[partID]; ok {
return storedBucket.partMetadata[partID].ETag, nil
}
if contentType == "" {
@ -164,7 +152,7 @@ func (donut API) createObjectPart(bucket, key, uploadID string, partID int, cont
md5SumBytes := hash.Sum(nil)
totalLength := int64(len(readBytes))
donut.multiPartObjects.Set(partKey, readBytes)
donut.multiPartObjects.Set(partID, readBytes)
// setting up for de-allocation
readBytes = nil
@ -182,7 +170,7 @@ func (donut API) createObjectPart(bucket, key, uploadID string, partID int, cont
Size: totalLength,
}
storedBucket.partMetadata[partKey] = newPart
storedBucket.partMetadata[partID] = newPart
multiPartSession := storedBucket.multiPartSession[key]
multiPartSession.totalParts++
storedBucket.multiPartSession[key] = multiPartSession
@ -190,38 +178,42 @@ func (donut API) createObjectPart(bucket, key, uploadID string, partID int, cont
return md5Sum, nil
}
// cleanupMultipartSession invoked during an abort or complete multipart session to cleanup session from memory
func (donut API) cleanupMultipartSession(bucket, key, uploadID string) {
storedBucket := donut.storedBuckets.Get(bucket).(storedBucket)
for i := 1; i <= storedBucket.multiPartSession[key].totalParts; i++ {
objectKey := bucket + "/" + getMultipartKey(key, uploadID, i)
donut.multiPartObjects.Delete(objectKey)
donut.multiPartObjects.Delete(i)
}
delete(storedBucket.multiPartSession, key)
donut.storedBuckets.Set(bucket, storedBucket)
}
// CompleteMultipartUpload -
// CompleteMultipartUpload - complete a multipart upload and persist the data
func (donut API) CompleteMultipartUpload(bucket, key, uploadID string, parts map[int]string) (ObjectMetadata, error) {
donut.lock.Lock()
if !IsValidBucket(bucket) {
donut.lock.Unlock()
return ObjectMetadata{}, iodine.New(BucketNameInvalid{Bucket: bucket}, nil)
}
if !IsValidObjectName(key) {
donut.lock.Unlock()
return ObjectMetadata{}, iodine.New(ObjectNameInvalid{Object: key}, nil)
}
// Verify upload id
if !donut.storedBuckets.Exists(bucket) {
donut.lock.Unlock()
return ObjectMetadata{}, iodine.New(BucketNotFound{Bucket: bucket}, nil)
}
storedBucket := donut.storedBuckets.Get(bucket).(storedBucket)
// Verify upload id
if storedBucket.multiPartSession[key].uploadID != uploadID {
donut.lock.Unlock()
return ObjectMetadata{}, iodine.New(InvalidUploadID{UploadID: uploadID}, nil)
}
var size int64
var fullObject bytes.Buffer
for i := 1; i <= len(parts); i++ {
recvMD5 := parts[i]
object, ok := donut.multiPartObjects.Get(bucket + "/" + getMultipartKey(key, uploadID, i))
object, ok := donut.multiPartObjects.Get(i)
if ok == false {
donut.lock.Unlock()
return ObjectMetadata{}, iodine.New(errors.New("missing part: "+strconv.Itoa(i)), nil)
@ -231,13 +223,16 @@ func (donut API) CompleteMultipartUpload(bucket, key, uploadID string, parts map
// complete multi part request header md5sum per part is hex encoded
recvMD5Bytes, err := hex.DecodeString(strings.Trim(recvMD5, "\""))
if err != nil {
donut.lock.Unlock()
return ObjectMetadata{}, iodine.New(InvalidDigest{Md5: recvMD5}, nil)
}
if !bytes.Equal(recvMD5Bytes, calcMD5Bytes[:]) {
donut.lock.Unlock()
return ObjectMetadata{}, iodine.New(BadDigest{}, nil)
}
_, err = io.Copy(&fullObject, bytes.NewBuffer(object))
if err != nil {
donut.lock.Unlock()
return ObjectMetadata{}, iodine.New(err, nil)
}
object = nil
@ -269,7 +264,7 @@ func (a byKey) Len() int { return len(a) }
func (a byKey) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a byKey) Less(i, j int) bool { return a[i].Key < a[j].Key }
// ListMultipartUploads -
// ListMultipartUploads - list incomplete multipart sessions for a given bucket
func (donut API) ListMultipartUploads(bucket string, resources BucketMultipartResourcesMetadata) (BucketMultipartResourcesMetadata, error) {
// TODO handle delimiter
donut.lock.Lock()
@ -331,7 +326,7 @@ func (a partNumber) Len() int { return len(a) }
func (a partNumber) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a partNumber) Less(i, j int) bool { return a[i].PartNumber < a[j].PartNumber }
// ListObjectParts -
// ListObjectParts - list parts from incomplete multipart session for a given object
func (donut API) ListObjectParts(bucket, key string, resources ObjectResourcesMetadata) (ObjectResourcesMetadata, error) {
// Verify upload id
donut.lock.Lock()
@ -365,7 +360,7 @@ func (donut API) ListObjectParts(bucket, key string, resources ObjectResourcesMe
objectResourcesMetadata.NextPartNumberMarker = i
return objectResourcesMetadata, nil
}
part, ok := storedBucket.partMetadata[bucket+"/"+getMultipartKey(key, resources.UploadID, i)]
part, ok := storedBucket.partMetadata[i]
if !ok {
return ObjectResourcesMetadata{}, iodine.New(errors.New("missing part: "+strconv.Itoa(i)), nil)
}
@ -376,8 +371,9 @@ func (donut API) ListObjectParts(bucket, key string, resources ObjectResourcesMe
return objectResourcesMetadata, nil
}
// evictedPart - call back function called by caching module during individual cache evictions
func (donut API) evictedPart(a ...interface{}) {
key := a[0].(string)
key := a[0].(int)
// loop through all buckets
buckets := donut.storedBuckets.GetAll()
for bucketName, bucket := range buckets {

View File

@ -0,0 +1,43 @@
/*
* Minimalist Object Storage, (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package rpc
import "net/http"
// DonutService donut service
type DonutService struct{}
// DonutArgs collections of disks and name to initialize donut
type DonutArgs struct {
Name string
Disks []string
}
// Reply reply for successful or failed Set operation
type Reply struct {
Message string `json:"message"`
Error error `json:"error"`
}
func setDonutArgs(args *DonutArgs, reply *Reply) error {
return nil
}
// Set method
func (s *DonutService) Set(r *http.Request, args *DonutArgs, reply *Reply) error {
return setDonutArgs(args, reply)
}