mirror of
https://github.com/minio/minio.git
synced 2025-11-25 12:06:10 -05:00
Breakaway from driver model, move cache into donut
This commit is contained in:
399
pkg/storage/donut/cache-multipart.go
Normal file
399
pkg/storage/donut/cache-multipart.go
Normal file
@@ -0,0 +1,399 @@
|
||||
/*
|
||||
* Minimalist Object Storage, (C) 2015 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package donut
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/md5"
|
||||
"crypto/sha512"
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"io"
|
||||
"math/rand"
|
||||
"runtime/debug"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/minio/minio/pkg/iodine"
|
||||
)
|
||||
|
||||
func (cache donut) NewMultipartUpload(bucket, key, contentType string) (string, error) {
|
||||
cache.lock.RLock()
|
||||
if !IsValidBucket(bucket) {
|
||||
cache.lock.RUnlock()
|
||||
return "", iodine.New(BucketNameInvalid{Bucket: bucket}, nil)
|
||||
}
|
||||
if !IsValidObjectName(key) {
|
||||
cache.lock.RUnlock()
|
||||
return "", iodine.New(ObjectNameInvalid{Object: key}, nil)
|
||||
}
|
||||
if _, ok := cache.storedBuckets[bucket]; ok == false {
|
||||
cache.lock.RUnlock()
|
||||
return "", iodine.New(BucketNotFound{Bucket: bucket}, nil)
|
||||
}
|
||||
storedBucket := cache.storedBuckets[bucket]
|
||||
objectKey := bucket + "/" + key
|
||||
if _, ok := storedBucket.objectMetadata[objectKey]; ok == true {
|
||||
cache.lock.RUnlock()
|
||||
return "", iodine.New(ObjectExists{Bucket: bucket, Object: key}, nil)
|
||||
}
|
||||
cache.lock.RUnlock()
|
||||
|
||||
cache.lock.Lock()
|
||||
id := []byte(strconv.FormatInt(rand.Int63(), 10) + bucket + key + time.Now().String())
|
||||
uploadIDSum := sha512.Sum512(id)
|
||||
uploadID := base64.URLEncoding.EncodeToString(uploadIDSum[:])[:47]
|
||||
|
||||
cache.storedBuckets[bucket].multiPartSession[key] = multiPartSession{
|
||||
uploadID: uploadID,
|
||||
initiated: time.Now(),
|
||||
totalParts: 0,
|
||||
}
|
||||
cache.lock.Unlock()
|
||||
|
||||
return uploadID, nil
|
||||
}
|
||||
|
||||
func (cache donut) AbortMultipartUpload(bucket, key, uploadID string) error {
|
||||
cache.lock.RLock()
|
||||
storedBucket := cache.storedBuckets[bucket]
|
||||
if storedBucket.multiPartSession[key].uploadID != uploadID {
|
||||
cache.lock.RUnlock()
|
||||
return iodine.New(InvalidUploadID{UploadID: uploadID}, nil)
|
||||
}
|
||||
cache.lock.RUnlock()
|
||||
|
||||
cache.cleanupMultiparts(bucket, key, uploadID)
|
||||
cache.cleanupMultipartSession(bucket, key, uploadID)
|
||||
return nil
|
||||
}
|
||||
|
||||
func getMultipartKey(key string, uploadID string, partNumber int) string {
|
||||
return key + "?uploadId=" + uploadID + "&partNumber=" + strconv.Itoa(partNumber)
|
||||
}
|
||||
|
||||
func (cache donut) CreateObjectPart(bucket, key, uploadID string, partID int, contentType, expectedMD5Sum string, size int64, data io.Reader) (string, error) {
|
||||
// Verify upload id
|
||||
cache.lock.RLock()
|
||||
storedBucket := cache.storedBuckets[bucket]
|
||||
if storedBucket.multiPartSession[key].uploadID != uploadID {
|
||||
cache.lock.RUnlock()
|
||||
return "", iodine.New(InvalidUploadID{UploadID: uploadID}, nil)
|
||||
}
|
||||
cache.lock.RUnlock()
|
||||
|
||||
etag, err := cache.createObjectPart(bucket, key, uploadID, partID, "", expectedMD5Sum, size, data)
|
||||
if err != nil {
|
||||
return "", iodine.New(err, nil)
|
||||
}
|
||||
// free
|
||||
debug.FreeOSMemory()
|
||||
return etag, nil
|
||||
}
|
||||
|
||||
// createObject - PUT object to cache buffer
|
||||
func (cache donut) createObjectPart(bucket, key, uploadID string, partID int, contentType, expectedMD5Sum string, size int64, data io.Reader) (string, error) {
|
||||
cache.lock.RLock()
|
||||
if !IsValidBucket(bucket) {
|
||||
cache.lock.RUnlock()
|
||||
return "", iodine.New(BucketNameInvalid{Bucket: bucket}, nil)
|
||||
}
|
||||
if !IsValidObjectName(key) {
|
||||
cache.lock.RUnlock()
|
||||
return "", iodine.New(ObjectNameInvalid{Object: key}, nil)
|
||||
}
|
||||
if _, ok := cache.storedBuckets[bucket]; ok == false {
|
||||
cache.lock.RUnlock()
|
||||
return "", iodine.New(BucketNotFound{Bucket: bucket}, nil)
|
||||
}
|
||||
storedBucket := cache.storedBuckets[bucket]
|
||||
// get object key
|
||||
partKey := bucket + "/" + getMultipartKey(key, uploadID, partID)
|
||||
if _, ok := storedBucket.partMetadata[partKey]; ok == true {
|
||||
cache.lock.RUnlock()
|
||||
return storedBucket.partMetadata[partKey].ETag, nil
|
||||
}
|
||||
cache.lock.RUnlock()
|
||||
|
||||
if contentType == "" {
|
||||
contentType = "application/octet-stream"
|
||||
}
|
||||
contentType = strings.TrimSpace(contentType)
|
||||
if strings.TrimSpace(expectedMD5Sum) != "" {
|
||||
expectedMD5SumBytes, err := base64.StdEncoding.DecodeString(strings.TrimSpace(expectedMD5Sum))
|
||||
if err != nil {
|
||||
// pro-actively close the connection
|
||||
return "", iodine.New(InvalidDigest{Md5: expectedMD5Sum}, nil)
|
||||
}
|
||||
expectedMD5Sum = hex.EncodeToString(expectedMD5SumBytes)
|
||||
}
|
||||
|
||||
// calculate md5
|
||||
hash := md5.New()
|
||||
var readBytes []byte
|
||||
|
||||
var err error
|
||||
var length int
|
||||
for err == nil {
|
||||
byteBuffer := make([]byte, 1024*1024)
|
||||
length, err = data.Read(byteBuffer)
|
||||
// While hash.Write() wouldn't mind a Nil byteBuffer
|
||||
// It is necessary for us to verify this and break
|
||||
if length == 0 {
|
||||
break
|
||||
}
|
||||
hash.Write(byteBuffer[0:length])
|
||||
readBytes = append(readBytes, byteBuffer[0:length]...)
|
||||
}
|
||||
if err != io.EOF {
|
||||
return "", iodine.New(err, nil)
|
||||
}
|
||||
go debug.FreeOSMemory()
|
||||
md5SumBytes := hash.Sum(nil)
|
||||
totalLength := int64(len(readBytes))
|
||||
|
||||
cache.lock.Lock()
|
||||
cache.multiPartObjects.Set(partKey, readBytes)
|
||||
cache.lock.Unlock()
|
||||
// setting up for de-allocation
|
||||
readBytes = nil
|
||||
|
||||
md5Sum := hex.EncodeToString(md5SumBytes)
|
||||
// Verify if the written object is equal to what is expected, only if it is requested as such
|
||||
if strings.TrimSpace(expectedMD5Sum) != "" {
|
||||
if err := isMD5SumEqual(strings.TrimSpace(expectedMD5Sum), md5Sum); err != nil {
|
||||
return "", iodine.New(BadDigest{Md5: expectedMD5Sum, Bucket: bucket, Key: key}, nil)
|
||||
}
|
||||
}
|
||||
newPart := PartMetadata{
|
||||
PartNumber: partID,
|
||||
LastModified: time.Now().UTC(),
|
||||
ETag: md5Sum,
|
||||
Size: totalLength,
|
||||
}
|
||||
|
||||
cache.lock.Lock()
|
||||
storedBucket.partMetadata[partKey] = newPart
|
||||
multiPartSession := storedBucket.multiPartSession[key]
|
||||
multiPartSession.totalParts++
|
||||
storedBucket.multiPartSession[key] = multiPartSession
|
||||
cache.storedBuckets[bucket] = storedBucket
|
||||
cache.lock.Unlock()
|
||||
|
||||
return md5Sum, nil
|
||||
}
|
||||
|
||||
func (cache donut) cleanupMultipartSession(bucket, key, uploadID string) {
|
||||
cache.lock.Lock()
|
||||
defer cache.lock.Unlock()
|
||||
delete(cache.storedBuckets[bucket].multiPartSession, key)
|
||||
}
|
||||
|
||||
func (cache donut) cleanupMultiparts(bucket, key, uploadID string) {
|
||||
for i := 1; i <= cache.storedBuckets[bucket].multiPartSession[key].totalParts; i++ {
|
||||
objectKey := bucket + "/" + getMultipartKey(key, uploadID, i)
|
||||
cache.multiPartObjects.Delete(objectKey)
|
||||
}
|
||||
}
|
||||
|
||||
func (cache donut) CompleteMultipartUpload(bucket, key, uploadID string, parts map[int]string) (string, error) {
|
||||
if !IsValidBucket(bucket) {
|
||||
return "", iodine.New(BucketNameInvalid{Bucket: bucket}, nil)
|
||||
}
|
||||
if !IsValidObjectName(key) {
|
||||
return "", iodine.New(ObjectNameInvalid{Object: key}, nil)
|
||||
}
|
||||
// Verify upload id
|
||||
cache.lock.RLock()
|
||||
if _, ok := cache.storedBuckets[bucket]; ok == false {
|
||||
cache.lock.RUnlock()
|
||||
return "", iodine.New(BucketNotFound{Bucket: bucket}, nil)
|
||||
}
|
||||
storedBucket := cache.storedBuckets[bucket]
|
||||
if storedBucket.multiPartSession[key].uploadID != uploadID {
|
||||
cache.lock.RUnlock()
|
||||
return "", iodine.New(InvalidUploadID{UploadID: uploadID}, nil)
|
||||
}
|
||||
cache.lock.RUnlock()
|
||||
|
||||
cache.lock.Lock()
|
||||
var size int64
|
||||
var fullObject bytes.Buffer
|
||||
for i := 1; i <= len(parts); i++ {
|
||||
recvMD5 := parts[i]
|
||||
object, ok := cache.multiPartObjects.Get(bucket + "/" + getMultipartKey(key, uploadID, i))
|
||||
if ok == false {
|
||||
cache.lock.Unlock()
|
||||
return "", iodine.New(errors.New("missing part: "+strconv.Itoa(i)), nil)
|
||||
}
|
||||
size += int64(len(object))
|
||||
calcMD5Bytes := md5.Sum(object)
|
||||
// complete multi part request header md5sum per part is hex encoded
|
||||
recvMD5Bytes, err := hex.DecodeString(strings.Trim(recvMD5, "\""))
|
||||
if err != nil {
|
||||
return "", iodine.New(InvalidDigest{Md5: recvMD5}, nil)
|
||||
}
|
||||
if !bytes.Equal(recvMD5Bytes, calcMD5Bytes[:]) {
|
||||
return "", iodine.New(BadDigest{Md5: recvMD5, Bucket: bucket, Key: getMultipartKey(key, uploadID, i)}, nil)
|
||||
}
|
||||
_, err = io.Copy(&fullObject, bytes.NewBuffer(object))
|
||||
if err != nil {
|
||||
return "", iodine.New(err, nil)
|
||||
}
|
||||
object = nil
|
||||
go debug.FreeOSMemory()
|
||||
}
|
||||
cache.lock.Unlock()
|
||||
|
||||
md5sumSlice := md5.Sum(fullObject.Bytes())
|
||||
// this is needed for final verification inside CreateObject, do not convert this to hex
|
||||
md5sum := base64.StdEncoding.EncodeToString(md5sumSlice[:])
|
||||
etag, err := cache.CreateObject(bucket, key, "", md5sum, size, &fullObject)
|
||||
if err != nil {
|
||||
// No need to call internal cleanup functions here, caller will call AbortMultipartUpload()
|
||||
// which would in-turn cleanup properly in accordance with S3 Spec
|
||||
return "", iodine.New(err, nil)
|
||||
}
|
||||
fullObject.Reset()
|
||||
cache.cleanupMultiparts(bucket, key, uploadID)
|
||||
cache.cleanupMultipartSession(bucket, key, uploadID)
|
||||
return etag, nil
|
||||
}
|
||||
|
||||
// byKey is a sortable interface for UploadMetadata slice
|
||||
type byKey []*UploadMetadata
|
||||
|
||||
func (a byKey) Len() int { return len(a) }
|
||||
func (a byKey) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||
func (a byKey) Less(i, j int) bool { return a[i].Key < a[j].Key }
|
||||
|
||||
func (cache donut) ListMultipartUploads(bucket string, resources BucketMultipartResourcesMetadata) (BucketMultipartResourcesMetadata, error) {
|
||||
// TODO handle delimiter
|
||||
cache.lock.RLock()
|
||||
defer cache.lock.RUnlock()
|
||||
if _, ok := cache.storedBuckets[bucket]; ok == false {
|
||||
return BucketMultipartResourcesMetadata{}, iodine.New(BucketNotFound{Bucket: bucket}, nil)
|
||||
}
|
||||
storedBucket := cache.storedBuckets[bucket]
|
||||
var uploads []*UploadMetadata
|
||||
|
||||
for key, session := range storedBucket.multiPartSession {
|
||||
if strings.HasPrefix(key, resources.Prefix) {
|
||||
if len(uploads) > resources.MaxUploads {
|
||||
sort.Sort(byKey(uploads))
|
||||
resources.Upload = uploads
|
||||
resources.NextKeyMarker = key
|
||||
resources.NextUploadIDMarker = session.uploadID
|
||||
resources.IsTruncated = true
|
||||
return resources, nil
|
||||
}
|
||||
// uploadIDMarker is ignored if KeyMarker is empty
|
||||
switch {
|
||||
case resources.KeyMarker != "" && resources.UploadIDMarker == "":
|
||||
if key > resources.KeyMarker {
|
||||
upload := new(UploadMetadata)
|
||||
upload.Key = key
|
||||
upload.UploadID = session.uploadID
|
||||
upload.Initiated = session.initiated
|
||||
uploads = append(uploads, upload)
|
||||
}
|
||||
case resources.KeyMarker != "" && resources.UploadIDMarker != "":
|
||||
if session.uploadID > resources.UploadIDMarker {
|
||||
if key >= resources.KeyMarker {
|
||||
upload := new(UploadMetadata)
|
||||
upload.Key = key
|
||||
upload.UploadID = session.uploadID
|
||||
upload.Initiated = session.initiated
|
||||
uploads = append(uploads, upload)
|
||||
}
|
||||
}
|
||||
default:
|
||||
upload := new(UploadMetadata)
|
||||
upload.Key = key
|
||||
upload.UploadID = session.uploadID
|
||||
upload.Initiated = session.initiated
|
||||
uploads = append(uploads, upload)
|
||||
}
|
||||
}
|
||||
}
|
||||
sort.Sort(byKey(uploads))
|
||||
resources.Upload = uploads
|
||||
return resources, nil
|
||||
}
|
||||
|
||||
// partNumber is a sortable interface for Part slice
|
||||
type partNumber []*PartMetadata
|
||||
|
||||
func (a partNumber) Len() int { return len(a) }
|
||||
func (a partNumber) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||
func (a partNumber) Less(i, j int) bool { return a[i].PartNumber < a[j].PartNumber }
|
||||
|
||||
func (cache donut) ListObjectParts(bucket, key string, resources ObjectResourcesMetadata) (ObjectResourcesMetadata, error) {
|
||||
// Verify upload id
|
||||
cache.lock.RLock()
|
||||
defer cache.lock.RUnlock()
|
||||
if _, ok := cache.storedBuckets[bucket]; ok == false {
|
||||
return ObjectResourcesMetadata{}, iodine.New(BucketNotFound{Bucket: bucket}, nil)
|
||||
}
|
||||
storedBucket := cache.storedBuckets[bucket]
|
||||
if _, ok := storedBucket.multiPartSession[key]; ok == false {
|
||||
return ObjectResourcesMetadata{}, iodine.New(ObjectNotFound{Bucket: bucket, Object: key}, nil)
|
||||
}
|
||||
if storedBucket.multiPartSession[key].uploadID != resources.UploadID {
|
||||
return ObjectResourcesMetadata{}, iodine.New(InvalidUploadID{UploadID: resources.UploadID}, nil)
|
||||
}
|
||||
objectResourcesMetadata := resources
|
||||
objectResourcesMetadata.Bucket = bucket
|
||||
objectResourcesMetadata.Key = key
|
||||
var parts []*PartMetadata
|
||||
var startPartNumber int
|
||||
switch {
|
||||
case objectResourcesMetadata.PartNumberMarker == 0:
|
||||
startPartNumber = 1
|
||||
default:
|
||||
startPartNumber = objectResourcesMetadata.PartNumberMarker
|
||||
}
|
||||
for i := startPartNumber; i <= storedBucket.multiPartSession[key].totalParts; i++ {
|
||||
if len(parts) > objectResourcesMetadata.MaxParts {
|
||||
sort.Sort(partNumber(parts))
|
||||
objectResourcesMetadata.IsTruncated = true
|
||||
objectResourcesMetadata.Part = parts
|
||||
objectResourcesMetadata.NextPartNumberMarker = i
|
||||
return objectResourcesMetadata, nil
|
||||
}
|
||||
part, ok := storedBucket.partMetadata[bucket+"/"+getMultipartKey(key, resources.UploadID, i)]
|
||||
if !ok {
|
||||
return ObjectResourcesMetadata{}, iodine.New(errors.New("missing part: "+strconv.Itoa(i)), nil)
|
||||
}
|
||||
parts = append(parts, &part)
|
||||
}
|
||||
sort.Sort(partNumber(parts))
|
||||
objectResourcesMetadata.Part = parts
|
||||
return objectResourcesMetadata, nil
|
||||
}
|
||||
|
||||
func (cache donut) expiredPart(a ...interface{}) {
|
||||
key := a[0].(string)
|
||||
// loop through all buckets
|
||||
for _, storedBucket := range cache.storedBuckets {
|
||||
delete(storedBucket.partMetadata, key)
|
||||
}
|
||||
debug.FreeOSMemory()
|
||||
}
|
||||
534
pkg/storage/donut/cache.go
Normal file
534
pkg/storage/donut/cache.go
Normal file
@@ -0,0 +1,534 @@
|
||||
/*
|
||||
* Minimalist Object Storage, (C) 2015 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package donut
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"crypto/md5"
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"io"
|
||||
"log"
|
||||
"runtime/debug"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/minio/minio/pkg/iodine"
|
||||
)
|
||||
|
||||
// total Number of buckets allowed
|
||||
const (
|
||||
totalBuckets = 100
|
||||
)
|
||||
|
||||
type proxyWriter struct {
|
||||
writer io.Writer
|
||||
writtenBytes []byte
|
||||
}
|
||||
|
||||
func (r *proxyWriter) Write(p []byte) (n int, err error) {
|
||||
n, err = r.writer.Write(p)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
r.writtenBytes = append(r.writtenBytes, p[0:n]...)
|
||||
return
|
||||
}
|
||||
|
||||
func newProxyWriter(w io.Writer) *proxyWriter {
|
||||
return &proxyWriter{writer: w, writtenBytes: nil}
|
||||
}
|
||||
|
||||
// GetObject - GET object from cache buffer
|
||||
func (cache donut) GetObject(w io.Writer, bucket string, object string) (int64, error) {
|
||||
cache.lock.RLock()
|
||||
defer cache.lock.RUnlock()
|
||||
if !IsValidBucket(bucket) {
|
||||
return 0, iodine.New(BucketNameInvalid{Bucket: bucket}, nil)
|
||||
}
|
||||
if !IsValidObjectName(object) {
|
||||
return 0, iodine.New(ObjectNameInvalid{Object: object}, nil)
|
||||
}
|
||||
if _, ok := cache.storedBuckets[bucket]; ok == false {
|
||||
return 0, iodine.New(BucketNotFound{Bucket: bucket}, nil)
|
||||
}
|
||||
objectKey := bucket + "/" + object
|
||||
data, ok := cache.objects.Get(objectKey)
|
||||
if !ok {
|
||||
if cache.driver != nil {
|
||||
return cache.driver.GetObject(w, bucket, object)
|
||||
}
|
||||
return 0, iodine.New(ObjectNotFound{Bucket: bucket, Object: object}, nil)
|
||||
}
|
||||
written, err := io.Copy(w, bytes.NewBuffer(data))
|
||||
if err != nil {
|
||||
return 0, iodine.New(err, nil)
|
||||
}
|
||||
return written, nil
|
||||
}
|
||||
|
||||
// GetPartialObject - GET object from cache buffer range
|
||||
func (cache donut) GetPartialObject(w io.Writer, bucket, object string, start, length int64) (int64, error) {
|
||||
errParams := map[string]string{
|
||||
"bucket": bucket,
|
||||
"object": object,
|
||||
"start": strconv.FormatInt(start, 10),
|
||||
"length": strconv.FormatInt(length, 10),
|
||||
}
|
||||
cache.lock.RLock()
|
||||
defer cache.lock.RUnlock()
|
||||
if !IsValidBucket(bucket) {
|
||||
return 0, iodine.New(BucketNameInvalid{Bucket: bucket}, errParams)
|
||||
}
|
||||
if !IsValidObjectName(object) {
|
||||
return 0, iodine.New(ObjectNameInvalid{Object: object}, errParams)
|
||||
}
|
||||
if start < 0 {
|
||||
return 0, iodine.New(InvalidRange{
|
||||
Start: start,
|
||||
Length: length,
|
||||
}, errParams)
|
||||
}
|
||||
objectKey := bucket + "/" + object
|
||||
data, ok := cache.objects.Get(objectKey)
|
||||
if !ok {
|
||||
if cache.driver != nil {
|
||||
return cache.driver.GetPartialObject(w, bucket, object, start, length)
|
||||
}
|
||||
return 0, iodine.New(ObjectNotFound{Bucket: bucket, Object: object}, nil)
|
||||
}
|
||||
written, err := io.CopyN(w, bytes.NewBuffer(data[start:]), length)
|
||||
if err != nil {
|
||||
return 0, iodine.New(err, nil)
|
||||
}
|
||||
return written, nil
|
||||
}
|
||||
|
||||
// GetBucketMetadata -
|
||||
func (cache donut) GetBucketMetadata(bucket string) (BucketMetadata, error) {
|
||||
cache.lock.RLock()
|
||||
if !IsValidBucket(bucket) {
|
||||
cache.lock.RUnlock()
|
||||
return BucketMetadata{}, iodine.New(BucketNameInvalid{Bucket: bucket}, nil)
|
||||
}
|
||||
if _, ok := cache.storedBuckets[bucket]; ok == false {
|
||||
if cache.driver == nil {
|
||||
cache.lock.RUnlock()
|
||||
return BucketMetadata{}, iodine.New(BucketNotFound{Bucket: bucket}, nil)
|
||||
}
|
||||
bucketMetadata, err := cache.driver.GetBucketMetadata(bucket)
|
||||
if err != nil {
|
||||
cache.lock.RUnlock()
|
||||
return BucketMetadata{}, iodine.New(err, nil)
|
||||
}
|
||||
storedBucket := cache.storedBuckets[bucket]
|
||||
cache.lock.RUnlock()
|
||||
cache.lock.Lock()
|
||||
storedBucket.bucketMetadata = bucketMetadata
|
||||
cache.storedBuckets[bucket] = storedBucket
|
||||
cache.lock.Unlock()
|
||||
}
|
||||
cache.lock.RUnlock()
|
||||
return cache.storedBuckets[bucket].bucketMetadata, nil
|
||||
}
|
||||
|
||||
// SetBucketMetadata -
|
||||
func (cache donut) SetBucketMetadata(bucket, acl string) error {
|
||||
cache.lock.RLock()
|
||||
if !IsValidBucket(bucket) {
|
||||
cache.lock.RUnlock()
|
||||
return iodine.New(BucketNameInvalid{Bucket: bucket}, nil)
|
||||
}
|
||||
if _, ok := cache.storedBuckets[bucket]; ok == false {
|
||||
cache.lock.RUnlock()
|
||||
return iodine.New(BucketNotFound{Bucket: bucket}, nil)
|
||||
}
|
||||
if strings.TrimSpace(acl) == "" {
|
||||
acl = "private"
|
||||
}
|
||||
cache.lock.RUnlock()
|
||||
cache.lock.Lock()
|
||||
if cache.driver != nil {
|
||||
if err := cache.driver.SetBucketMetadata(bucket, acl); err != nil {
|
||||
return iodine.New(err, nil)
|
||||
}
|
||||
}
|
||||
storedBucket := cache.storedBuckets[bucket]
|
||||
storedBucket.bucketMetadata.ACL = BucketACL(acl)
|
||||
cache.storedBuckets[bucket] = storedBucket
|
||||
cache.lock.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
// isMD5SumEqual - returns error if md5sum mismatches, success its `nil`
|
||||
func isMD5SumEqual(expectedMD5Sum, actualMD5Sum string) error {
|
||||
if strings.TrimSpace(expectedMD5Sum) != "" && strings.TrimSpace(actualMD5Sum) != "" {
|
||||
expectedMD5SumBytes, err := hex.DecodeString(expectedMD5Sum)
|
||||
if err != nil {
|
||||
return iodine.New(err, nil)
|
||||
}
|
||||
actualMD5SumBytes, err := hex.DecodeString(actualMD5Sum)
|
||||
if err != nil {
|
||||
return iodine.New(err, nil)
|
||||
}
|
||||
if !bytes.Equal(expectedMD5SumBytes, actualMD5SumBytes) {
|
||||
return iodine.New(errors.New("bad digest, md5sum mismatch"), nil)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
return iodine.New(errors.New("invalid argument"), nil)
|
||||
}
|
||||
|
||||
func (cache donut) CreateObject(bucket, key, contentType, expectedMD5Sum string, size int64, data io.Reader) (string, error) {
|
||||
if size > int64(cache.maxSize) {
|
||||
generic := GenericObjectError{Bucket: bucket, Object: key}
|
||||
return "", iodine.New(EntityTooLarge{
|
||||
GenericObjectError: generic,
|
||||
Size: strconv.FormatInt(size, 10),
|
||||
MaxSize: strconv.FormatUint(cache.maxSize, 10),
|
||||
}, nil)
|
||||
}
|
||||
md5sum, err := cache.createObject(bucket, key, contentType, expectedMD5Sum, size, data)
|
||||
// free
|
||||
debug.FreeOSMemory()
|
||||
return md5sum, iodine.New(err, nil)
|
||||
}
|
||||
|
||||
// createObject - PUT object to cache buffer
|
||||
func (cache donut) createObject(bucket, key, contentType, expectedMD5Sum string, size int64, data io.Reader) (string, error) {
|
||||
cache.lock.RLock()
|
||||
if !IsValidBucket(bucket) {
|
||||
cache.lock.RUnlock()
|
||||
return "", iodine.New(BucketNameInvalid{Bucket: bucket}, nil)
|
||||
}
|
||||
if !IsValidObjectName(key) {
|
||||
cache.lock.RUnlock()
|
||||
return "", iodine.New(ObjectNameInvalid{Object: key}, nil)
|
||||
}
|
||||
if _, ok := cache.storedBuckets[bucket]; ok == false {
|
||||
cache.lock.RUnlock()
|
||||
return "", iodine.New(BucketNotFound{Bucket: bucket}, nil)
|
||||
}
|
||||
storedBucket := cache.storedBuckets[bucket]
|
||||
// get object key
|
||||
objectKey := bucket + "/" + key
|
||||
if _, ok := storedBucket.objectMetadata[objectKey]; ok == true {
|
||||
cache.lock.RUnlock()
|
||||
return "", iodine.New(ObjectExists{Bucket: bucket, Object: key}, nil)
|
||||
}
|
||||
cache.lock.RUnlock()
|
||||
|
||||
if contentType == "" {
|
||||
contentType = "application/octet-stream"
|
||||
}
|
||||
contentType = strings.TrimSpace(contentType)
|
||||
if strings.TrimSpace(expectedMD5Sum) != "" {
|
||||
expectedMD5SumBytes, err := base64.StdEncoding.DecodeString(strings.TrimSpace(expectedMD5Sum))
|
||||
if err != nil {
|
||||
// pro-actively close the connection
|
||||
return "", iodine.New(InvalidDigest{Md5: expectedMD5Sum}, nil)
|
||||
}
|
||||
expectedMD5Sum = hex.EncodeToString(expectedMD5SumBytes)
|
||||
}
|
||||
|
||||
// calculate md5
|
||||
hash := md5.New()
|
||||
var readBytes []byte
|
||||
|
||||
var err error
|
||||
var length int
|
||||
for err == nil {
|
||||
byteBuffer := make([]byte, 1024*1024)
|
||||
length, err = data.Read(byteBuffer)
|
||||
// While hash.Write() wouldn't mind a Nil byteBuffer
|
||||
// It is necessary for us to verify this and break
|
||||
if length == 0 {
|
||||
break
|
||||
}
|
||||
hash.Write(byteBuffer[0:length])
|
||||
readBytes = append(readBytes, byteBuffer[0:length]...)
|
||||
}
|
||||
if err != io.EOF {
|
||||
return "", iodine.New(err, nil)
|
||||
}
|
||||
md5SumBytes := hash.Sum(nil)
|
||||
totalLength := len(readBytes)
|
||||
|
||||
cache.lock.Lock()
|
||||
ok := cache.objects.Set(objectKey, readBytes)
|
||||
// setting up for de-allocation
|
||||
readBytes = nil
|
||||
go debug.FreeOSMemory()
|
||||
cache.lock.Unlock()
|
||||
if !ok {
|
||||
return "", iodine.New(InternalError{}, nil)
|
||||
}
|
||||
|
||||
md5Sum := hex.EncodeToString(md5SumBytes)
|
||||
// Verify if the written object is equal to what is expected, only if it is requested as such
|
||||
if strings.TrimSpace(expectedMD5Sum) != "" {
|
||||
if err := isMD5SumEqual(strings.TrimSpace(expectedMD5Sum), md5Sum); err != nil {
|
||||
return "", iodine.New(BadDigest{Md5: expectedMD5Sum, Bucket: bucket, Key: key}, nil)
|
||||
}
|
||||
}
|
||||
|
||||
newObject := ObjectMetadata{
|
||||
Bucket: bucket,
|
||||
Key: key,
|
||||
|
||||
ContentType: contentType,
|
||||
Created: time.Now().UTC(),
|
||||
Md5: md5Sum,
|
||||
Size: int64(totalLength),
|
||||
}
|
||||
|
||||
cache.lock.Lock()
|
||||
storedBucket.objectMetadata[objectKey] = newObject
|
||||
cache.storedBuckets[bucket] = storedBucket
|
||||
cache.lock.Unlock()
|
||||
return newObject.Md5, nil
|
||||
}
|
||||
|
||||
// CreateBucket - create bucket in cache
|
||||
func (cache donut) CreateBucket(bucketName, acl string) error {
|
||||
cache.lock.RLock()
|
||||
if len(cache.storedBuckets) == totalBuckets {
|
||||
cache.lock.RUnlock()
|
||||
return iodine.New(TooManyBuckets{Bucket: bucketName}, nil)
|
||||
}
|
||||
if !IsValidBucket(bucketName) {
|
||||
cache.lock.RUnlock()
|
||||
return iodine.New(BucketNameInvalid{Bucket: bucketName}, nil)
|
||||
}
|
||||
if !IsValidBucketACL(acl) {
|
||||
cache.lock.RUnlock()
|
||||
return iodine.New(InvalidACL{ACL: acl}, nil)
|
||||
}
|
||||
if _, ok := cache.storedBuckets[bucketName]; ok == true {
|
||||
cache.lock.RUnlock()
|
||||
return iodine.New(BucketExists{Bucket: bucketName}, nil)
|
||||
}
|
||||
cache.lock.RUnlock()
|
||||
|
||||
if strings.TrimSpace(acl) == "" {
|
||||
// default is private
|
||||
acl = "private"
|
||||
}
|
||||
if cache.driver != nil {
|
||||
if err := cache.driver.CreateBucket(bucketName, acl); err != nil {
|
||||
return iodine.New(err, nil)
|
||||
}
|
||||
}
|
||||
var newBucket = storedBucket{}
|
||||
newBucket.objectMetadata = make(map[string]ObjectMetadata)
|
||||
newBucket.multiPartSession = make(map[string]multiPartSession)
|
||||
newBucket.partMetadata = make(map[string]PartMetadata)
|
||||
newBucket.bucketMetadata = BucketMetadata{}
|
||||
newBucket.bucketMetadata.Name = bucketName
|
||||
newBucket.bucketMetadata.Created = time.Now().UTC()
|
||||
newBucket.bucketMetadata.ACL = BucketACL(acl)
|
||||
cache.lock.Lock()
|
||||
cache.storedBuckets[bucketName] = newBucket
|
||||
cache.lock.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
func delimiter(object, delimiter string) string {
|
||||
readBuffer := bytes.NewBufferString(object)
|
||||
reader := bufio.NewReader(readBuffer)
|
||||
stringReader := strings.NewReader(delimiter)
|
||||
delimited, _ := stringReader.ReadByte()
|
||||
delimitedStr, _ := reader.ReadString(delimited)
|
||||
return delimitedStr
|
||||
}
|
||||
|
||||
func appendUniq(slice []string, i string) []string {
|
||||
for _, ele := range slice {
|
||||
if ele == i {
|
||||
return slice
|
||||
}
|
||||
}
|
||||
return append(slice, i)
|
||||
}
|
||||
|
||||
func (cache donut) filterDelimiterPrefix(keys []string, key, delim string, r BucketResourcesMetadata) ([]string, BucketResourcesMetadata) {
|
||||
switch true {
|
||||
case key == r.Prefix:
|
||||
keys = appendUniq(keys, key)
|
||||
// delim - requires r.Prefix as it was trimmed off earlier
|
||||
case key == r.Prefix+delim:
|
||||
keys = appendUniq(keys, key)
|
||||
case delim != "":
|
||||
r.CommonPrefixes = appendUniq(r.CommonPrefixes, r.Prefix+delim)
|
||||
}
|
||||
return keys, r
|
||||
}
|
||||
|
||||
func (cache donut) listObjects(keys []string, key string, r BucketResourcesMetadata) ([]string, BucketResourcesMetadata) {
|
||||
switch true {
|
||||
// Prefix absent, delimit object key based on delimiter
|
||||
case r.IsDelimiterSet():
|
||||
delim := delimiter(key, r.Delimiter)
|
||||
switch true {
|
||||
case delim == "" || delim == key:
|
||||
keys = appendUniq(keys, key)
|
||||
case delim != "":
|
||||
r.CommonPrefixes = appendUniq(r.CommonPrefixes, delim)
|
||||
}
|
||||
// Prefix present, delimit object key with prefix key based on delimiter
|
||||
case r.IsDelimiterPrefixSet():
|
||||
if strings.HasPrefix(key, r.Prefix) {
|
||||
trimmedName := strings.TrimPrefix(key, r.Prefix)
|
||||
delim := delimiter(trimmedName, r.Delimiter)
|
||||
keys, r = cache.filterDelimiterPrefix(keys, key, delim, r)
|
||||
}
|
||||
// Prefix present, nothing to delimit
|
||||
case r.IsPrefixSet():
|
||||
keys = appendUniq(keys, key)
|
||||
// Prefix and delimiter absent
|
||||
case r.IsDefault():
|
||||
keys = appendUniq(keys, key)
|
||||
}
|
||||
return keys, r
|
||||
}
|
||||
|
||||
// ListObjects - list objects from cache
|
||||
func (cache donut) ListObjects(bucket string, resources BucketResourcesMetadata) ([]ObjectMetadata, BucketResourcesMetadata, error) {
|
||||
cache.lock.RLock()
|
||||
defer cache.lock.RUnlock()
|
||||
if !IsValidBucket(bucket) {
|
||||
return nil, BucketResourcesMetadata{IsTruncated: false}, iodine.New(BucketNameInvalid{Bucket: bucket}, nil)
|
||||
}
|
||||
if !IsValidObjectName(resources.Prefix) {
|
||||
return nil, BucketResourcesMetadata{IsTruncated: false}, iodine.New(ObjectNameInvalid{Object: resources.Prefix}, nil)
|
||||
}
|
||||
if _, ok := cache.storedBuckets[bucket]; ok == false {
|
||||
return nil, BucketResourcesMetadata{IsTruncated: false}, iodine.New(BucketNotFound{Bucket: bucket}, nil)
|
||||
}
|
||||
var results []ObjectMetadata
|
||||
var keys []string
|
||||
storedBucket := cache.storedBuckets[bucket]
|
||||
for key := range storedBucket.objectMetadata {
|
||||
if strings.HasPrefix(key, bucket+"/") {
|
||||
key = key[len(bucket)+1:]
|
||||
keys, resources = cache.listObjects(keys, key, resources)
|
||||
}
|
||||
}
|
||||
var newKeys []string
|
||||
switch {
|
||||
case resources.Marker != "":
|
||||
for _, key := range keys {
|
||||
if key > resources.Marker {
|
||||
newKeys = appendUniq(newKeys, key)
|
||||
}
|
||||
}
|
||||
default:
|
||||
newKeys = keys
|
||||
}
|
||||
sort.Strings(newKeys)
|
||||
for _, key := range newKeys {
|
||||
if len(results) == resources.Maxkeys {
|
||||
resources.IsTruncated = true
|
||||
if resources.IsTruncated && resources.IsDelimiterSet() {
|
||||
resources.NextMarker = results[len(results)-1].Key
|
||||
}
|
||||
return results, resources, nil
|
||||
}
|
||||
object := storedBucket.objectMetadata[bucket+"/"+key]
|
||||
results = append(results, object)
|
||||
}
|
||||
return results, resources, nil
|
||||
}
|
||||
|
||||
// byBucketName is a type for sorting bucket metadata by bucket name
|
||||
type byBucketName []BucketMetadata
|
||||
|
||||
func (b byBucketName) Len() int { return len(b) }
|
||||
func (b byBucketName) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
|
||||
func (b byBucketName) Less(i, j int) bool { return b[i].Name < b[j].Name }
|
||||
|
||||
// ListBuckets - List buckets from cache
|
||||
func (cache donut) ListBuckets() ([]BucketMetadata, error) {
|
||||
cache.lock.RLock()
|
||||
defer cache.lock.RUnlock()
|
||||
var results []BucketMetadata
|
||||
for _, bucket := range cache.storedBuckets {
|
||||
results = append(results, bucket.bucketMetadata)
|
||||
}
|
||||
sort.Sort(byBucketName(results))
|
||||
return results, nil
|
||||
}
|
||||
|
||||
// GetObjectMetadata - get object metadata from cache
|
||||
func (cache donut) GetObjectMetadata(bucket, key string) (ObjectMetadata, error) {
|
||||
cache.lock.RLock()
|
||||
// check if bucket exists
|
||||
if !IsValidBucket(bucket) {
|
||||
cache.lock.RUnlock()
|
||||
return ObjectMetadata{}, iodine.New(BucketNameInvalid{Bucket: bucket}, nil)
|
||||
}
|
||||
if !IsValidObjectName(key) {
|
||||
cache.lock.RUnlock()
|
||||
return ObjectMetadata{}, iodine.New(ObjectNameInvalid{Object: key}, nil)
|
||||
}
|
||||
if _, ok := cache.storedBuckets[bucket]; ok == false {
|
||||
cache.lock.RUnlock()
|
||||
return ObjectMetadata{}, iodine.New(BucketNotFound{Bucket: bucket}, nil)
|
||||
}
|
||||
storedBucket := cache.storedBuckets[bucket]
|
||||
objectKey := bucket + "/" + key
|
||||
if objMetadata, ok := storedBucket.objectMetadata[objectKey]; ok == true {
|
||||
cache.lock.RUnlock()
|
||||
return objMetadata, nil
|
||||
}
|
||||
if cache.driver != nil {
|
||||
objMetadata, err := cache.driver.GetObjectMetadata(bucket, key)
|
||||
cache.lock.RUnlock()
|
||||
if err != nil {
|
||||
return ObjectMetadata{}, iodine.New(err, nil)
|
||||
}
|
||||
// update
|
||||
cache.lock.Lock()
|
||||
storedBucket.objectMetadata[objectKey] = objMetadata
|
||||
cache.lock.Unlock()
|
||||
return objMetadata, nil
|
||||
}
|
||||
cache.lock.RUnlock()
|
||||
return ObjectMetadata{}, iodine.New(ObjectNotFound{Bucket: bucket, Object: key}, nil)
|
||||
}
|
||||
|
||||
func (cache donut) expiredObject(a ...interface{}) {
|
||||
cacheStats := cache.objects.Stats()
|
||||
log.Printf("CurrentSize: %d, CurrentItems: %d, TotalExpirations: %d",
|
||||
cacheStats.Bytes, cacheStats.Items, cacheStats.Expired)
|
||||
key := a[0].(string)
|
||||
// loop through all buckets
|
||||
for bucket, storedBucket := range cache.storedBuckets {
|
||||
delete(storedBucket.objectMetadata, key)
|
||||
// remove bucket if no objects found anymore
|
||||
if len(storedBucket.objectMetadata) == 0 {
|
||||
if time.Since(cache.storedBuckets[bucket].bucketMetadata.Created) > cache.expiration {
|
||||
delete(cache.storedBuckets, bucket)
|
||||
}
|
||||
}
|
||||
}
|
||||
debug.FreeOSMemory()
|
||||
}
|
||||
78
pkg/storage/donut/date.go
Normal file
78
pkg/storage/donut/date.go
Normal file
@@ -0,0 +1,78 @@
|
||||
/*
|
||||
* Minimalist Object Storage, (C) 2015 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package donut
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Date - [0000-00-00]
|
||||
type Date struct {
|
||||
Year int16
|
||||
Month byte
|
||||
Day byte
|
||||
}
|
||||
|
||||
// String output in yyyy-mm-dd format
|
||||
func (d Date) String() string {
|
||||
return fmt.Sprintf("%04d-%02d-%02d", d.Year, d.Month, d.Day)
|
||||
}
|
||||
|
||||
// IsZero true if date is 0000-00-00
|
||||
func (d Date) IsZero() bool {
|
||||
return d.Day == 0 && d.Month == 0 && d.Year == 0
|
||||
}
|
||||
|
||||
// Convert string date in format YYYY-MM-DD to Date.
|
||||
// Leading and trailing spaces are ignored. If format is invalid returns zero.
|
||||
func parseDate(str string) (d Date, err error) {
|
||||
str = strings.TrimSpace(str)
|
||||
if str == "0000-00-00" {
|
||||
return
|
||||
}
|
||||
var (
|
||||
y, m, n int
|
||||
)
|
||||
if len(str) != 10 || str[4] != '-' || str[7] != '-' {
|
||||
err = errors.New("Invalid 0000-00-000 style DATE string: " + str)
|
||||
return
|
||||
}
|
||||
if y, err = strconv.Atoi(str[0:4]); err != nil {
|
||||
return
|
||||
}
|
||||
if m, err = strconv.Atoi(str[5:7]); err != nil {
|
||||
return
|
||||
}
|
||||
if m < 1 || m > 12 {
|
||||
err = errors.New("Invalid 0000-00-000 style DATE string: " + str)
|
||||
return
|
||||
}
|
||||
if n, err = strconv.Atoi(str[8:10]); err != nil {
|
||||
return
|
||||
}
|
||||
if n < 1 || n > 31 {
|
||||
err = errors.New("Invalid 0000-00-000 style DATE string: " + str)
|
||||
return
|
||||
}
|
||||
d.Year = int16(y)
|
||||
d.Month = byte(m)
|
||||
d.Day = byte(n)
|
||||
return
|
||||
}
|
||||
@@ -25,8 +25,10 @@ import (
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/minio/minio/pkg/iodine"
|
||||
"github.com/minio/minio/pkg/storage/donut/trove"
|
||||
)
|
||||
|
||||
// donut struct internal data
|
||||
@@ -35,6 +37,32 @@ type donut struct {
|
||||
buckets map[string]bucket
|
||||
nodes map[string]node
|
||||
lock *sync.RWMutex
|
||||
cache cache
|
||||
}
|
||||
|
||||
// cache - local variables
|
||||
type cache struct {
|
||||
storedBuckets map[string]storedBucket
|
||||
lock *sync.RWMutex
|
||||
objects *trove.Cache
|
||||
multiPartObjects *trove.Cache
|
||||
maxSize uint64
|
||||
expiration time.Duration
|
||||
}
|
||||
|
||||
// storedBucket saved bucket
|
||||
type storedBucket struct {
|
||||
bucketMetadata BucketMetadata
|
||||
objectMetadata map[string]ObjectMetadata
|
||||
partMetadata map[string]PartMetadata
|
||||
multiPartSession map[string]multiPartSession
|
||||
}
|
||||
|
||||
// multiPartSession multipart session
|
||||
type multiPartSession struct {
|
||||
totalParts int
|
||||
uploadID string
|
||||
initiated time.Time
|
||||
}
|
||||
|
||||
// config files used inside Donut
|
||||
@@ -82,6 +110,15 @@ func NewDonut(donutName string, nodeDiskMap map[string][]string) (Donut, error)
|
||||
return nil, iodine.New(err, nil)
|
||||
}
|
||||
}
|
||||
d.cache.storedBuckets = make(map[string]storedBucket)
|
||||
d.cache.objects = trove.NewCache(maxSize, expiration)
|
||||
d.cache.multiPartObjects = trove.NewCache(0, time.Duration(0))
|
||||
|
||||
d.cache.objects.OnExpired = d.expiredObject
|
||||
d.cache.multiPartObjects.OnExpired = d.expiredPart
|
||||
|
||||
// set up cache expiration
|
||||
d.cache.objects.ExpireObjects(time.Second * 5)
|
||||
return d, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -16,6 +16,8 @@
|
||||
|
||||
package donut
|
||||
|
||||
import "fmt"
|
||||
|
||||
// InvalidArgument invalid argument
|
||||
type InvalidArgument struct{}
|
||||
|
||||
@@ -138,3 +140,172 @@ type InvalidErasureTechnique struct {
|
||||
func (e InvalidErasureTechnique) Error() string {
|
||||
return "Invalid erasure technique: " + e.Technique
|
||||
}
|
||||
|
||||
// InternalError - generic internal error
|
||||
type InternalError struct {
|
||||
}
|
||||
|
||||
// BackendError - generic disk backend error
|
||||
type BackendError struct {
|
||||
Path string
|
||||
}
|
||||
|
||||
// BackendCorrupted - path has corrupted data
|
||||
type BackendCorrupted BackendError
|
||||
|
||||
// APINotImplemented - generic API not implemented error
|
||||
type APINotImplemented struct {
|
||||
API string
|
||||
}
|
||||
|
||||
// GenericBucketError - generic bucket error
|
||||
type GenericBucketError struct {
|
||||
Bucket string
|
||||
}
|
||||
|
||||
// GenericObjectError - generic object error
|
||||
type GenericObjectError struct {
|
||||
Bucket string
|
||||
Object string
|
||||
}
|
||||
|
||||
// ImplementationError - generic implementation error
|
||||
type ImplementationError struct {
|
||||
Bucket string
|
||||
Object string
|
||||
Err error
|
||||
}
|
||||
|
||||
// DigestError - Generic Md5 error
|
||||
type DigestError struct {
|
||||
Bucket string
|
||||
Key string
|
||||
Md5 string
|
||||
}
|
||||
|
||||
/// ACL related errors
|
||||
|
||||
// InvalidACL - acl invalid
|
||||
type InvalidACL struct {
|
||||
ACL string
|
||||
}
|
||||
|
||||
func (e InvalidACL) Error() string {
|
||||
return "Requested ACL is " + e.ACL + " invalid"
|
||||
}
|
||||
|
||||
/// Bucket related errors
|
||||
|
||||
// BucketNameInvalid - bucketname provided is invalid
|
||||
type BucketNameInvalid GenericBucketError
|
||||
|
||||
// TooManyBuckets - total buckets exceeded
|
||||
type TooManyBuckets GenericBucketError
|
||||
|
||||
/// Object related errors
|
||||
|
||||
// EntityTooLarge - object size exceeds maximum limit
|
||||
type EntityTooLarge struct {
|
||||
GenericObjectError
|
||||
Size string
|
||||
MaxSize string
|
||||
}
|
||||
|
||||
// ObjectNameInvalid - object name provided is invalid
|
||||
type ObjectNameInvalid GenericObjectError
|
||||
|
||||
// InvalidDigest - md5 in request header invalid
|
||||
type InvalidDigest DigestError
|
||||
|
||||
// Return string an error formatted as the given text
|
||||
func (e ImplementationError) Error() string {
|
||||
error := ""
|
||||
if e.Bucket != "" {
|
||||
error = error + "Bucket: " + e.Bucket + " "
|
||||
}
|
||||
if e.Object != "" {
|
||||
error = error + "Object: " + e.Object + " "
|
||||
}
|
||||
error = error + "Error: " + e.Err.Error()
|
||||
return error
|
||||
}
|
||||
|
||||
// EmbedError - wrapper function for error object
|
||||
func EmbedError(bucket, object string, err error) ImplementationError {
|
||||
return ImplementationError{
|
||||
Bucket: bucket,
|
||||
Object: object,
|
||||
Err: err,
|
||||
}
|
||||
}
|
||||
|
||||
// Return string an error formatted as the given text
|
||||
func (e InternalError) Error() string {
|
||||
return "Internal error occured"
|
||||
}
|
||||
|
||||
// Return string an error formatted as the given text
|
||||
func (e APINotImplemented) Error() string {
|
||||
return "Api not implemented: " + e.API
|
||||
}
|
||||
|
||||
// Return string an error formatted as the given text
|
||||
func (e BucketNameInvalid) Error() string {
|
||||
return "Bucket name invalid: " + e.Bucket
|
||||
}
|
||||
|
||||
// Return string an error formatted as the given text
|
||||
func (e TooManyBuckets) Error() string {
|
||||
return "Bucket limit exceeded beyond 100, cannot create bucket: " + e.Bucket
|
||||
}
|
||||
|
||||
// Return string an error formatted as the given text
|
||||
func (e ObjectNameInvalid) Error() string {
|
||||
return "Object name invalid: " + e.Bucket + "#" + e.Object
|
||||
}
|
||||
|
||||
// Return string an error formatted as the given text
|
||||
func (e EntityTooLarge) Error() string {
|
||||
return e.Bucket + "#" + e.Object + "with " + e.Size + "reached maximum allowed size limit " + e.MaxSize
|
||||
}
|
||||
|
||||
// Return string an error formatted as the given text
|
||||
func (e BackendCorrupted) Error() string {
|
||||
return "Backend corrupted: " + e.Path
|
||||
}
|
||||
|
||||
// Return string an error formatted as the given text
|
||||
func (e InvalidDigest) Error() string {
|
||||
return "Md5 provided " + e.Md5 + " is invalid"
|
||||
}
|
||||
|
||||
// OperationNotPermitted - operation not permitted
|
||||
type OperationNotPermitted struct {
|
||||
Op string
|
||||
Reason string
|
||||
}
|
||||
|
||||
func (e OperationNotPermitted) Error() string {
|
||||
return "Operation " + e.Op + " not permitted for reason: " + e.Reason
|
||||
}
|
||||
|
||||
// InvalidRange - invalid range
|
||||
type InvalidRange struct {
|
||||
Start int64
|
||||
Length int64
|
||||
}
|
||||
|
||||
func (e InvalidRange) Error() string {
|
||||
return fmt.Sprintf("Invalid range start:%d length:%d", e.Start, e.Length)
|
||||
}
|
||||
|
||||
/// Multipart related errors
|
||||
|
||||
// InvalidUploadID invalid upload id
|
||||
type InvalidUploadID struct {
|
||||
UploadID string
|
||||
}
|
||||
|
||||
func (e InvalidUploadID) Error() string {
|
||||
return "Invalid upload id " + e.UploadID
|
||||
}
|
||||
|
||||
215
pkg/storage/donut/trove/trove.go
Normal file
215
pkg/storage/donut/trove/trove.go
Normal file
@@ -0,0 +1,215 @@
|
||||
/*
|
||||
* Minimalist Object Storage, (C) 2015 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
// Package trove implements in memory caching methods
|
||||
package trove
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
var noExpiration = time.Duration(0)
|
||||
|
||||
// Cache holds the required variables to compose an in memory cache system
|
||||
// which also provides expiring key mechanism and also maxSize
|
||||
type Cache struct {
|
||||
// Mutex is used for handling the concurrent
|
||||
// read/write requests for cache
|
||||
sync.Mutex
|
||||
|
||||
// items hold the cached objects
|
||||
items map[string][]byte
|
||||
|
||||
// updatedAt holds the time that related item's updated at
|
||||
updatedAt map[string]time.Time
|
||||
|
||||
// expiration is a duration for a cache key to expire
|
||||
expiration time.Duration
|
||||
|
||||
// stopExpireTimer channel to quit the timer thread
|
||||
stopExpireTimer chan struct{}
|
||||
|
||||
// maxSize is a total size for overall cache
|
||||
maxSize uint64
|
||||
|
||||
// currentSize is a current size in memory
|
||||
currentSize uint64
|
||||
|
||||
// OnExpired - callback function for eviction
|
||||
OnExpired func(a ...interface{})
|
||||
|
||||
// totalExpired counter to keep track of total expirations
|
||||
totalExpired uint64
|
||||
}
|
||||
|
||||
// Stats current cache statistics
|
||||
type Stats struct {
|
||||
Bytes uint64
|
||||
Items uint64
|
||||
Expired uint64
|
||||
}
|
||||
|
||||
// NewCache creates an inmemory cache
|
||||
//
|
||||
// maxSize is used for expiring objects before we run out of memory
|
||||
// expiration is used for expiration of a key from cache
|
||||
func NewCache(maxSize uint64, expiration time.Duration) *Cache {
|
||||
return &Cache{
|
||||
items: make(map[string][]byte),
|
||||
updatedAt: map[string]time.Time{},
|
||||
expiration: expiration,
|
||||
maxSize: maxSize,
|
||||
}
|
||||
}
|
||||
|
||||
// Stats get current cache statistics
|
||||
func (r *Cache) Stats() Stats {
|
||||
return Stats{
|
||||
Bytes: r.currentSize,
|
||||
Items: uint64(len(r.items)),
|
||||
Expired: r.totalExpired,
|
||||
}
|
||||
}
|
||||
|
||||
// ExpireObjects expire objects in go routine
|
||||
func (r *Cache) ExpireObjects(gcInterval time.Duration) {
|
||||
r.stopExpireTimer = make(chan struct{})
|
||||
ticker := time.NewTicker(gcInterval)
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
r.Expire()
|
||||
case <-r.stopExpireTimer:
|
||||
ticker.Stop()
|
||||
return
|
||||
}
|
||||
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// Get returns a value of a given key if it exists
|
||||
func (r *Cache) Get(key string) ([]byte, bool) {
|
||||
r.Lock()
|
||||
defer r.Unlock()
|
||||
value, ok := r.items[key]
|
||||
if !ok {
|
||||
return nil, false
|
||||
}
|
||||
r.updatedAt[key] = time.Now()
|
||||
return value, true
|
||||
}
|
||||
|
||||
// Append will append new data to an existing key,
|
||||
// if key doesn't exist it behaves like Set()
|
||||
func (r *Cache) Append(key string, value []byte) bool {
|
||||
r.Lock()
|
||||
defer r.Unlock()
|
||||
valueLen := uint64(len(value))
|
||||
if r.maxSize > 0 {
|
||||
// check if the size of the object is not bigger than the
|
||||
// capacity of the cache
|
||||
if valueLen > r.maxSize {
|
||||
return false
|
||||
}
|
||||
// remove random key if only we reach the maxSize threshold
|
||||
for (r.currentSize + valueLen) > r.maxSize {
|
||||
for randomKey := range r.items {
|
||||
r.doDelete(randomKey)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
_, ok := r.items[key]
|
||||
if !ok {
|
||||
r.items[key] = value
|
||||
r.currentSize += valueLen
|
||||
r.updatedAt[key] = time.Now()
|
||||
return true
|
||||
}
|
||||
r.items[key] = append(r.items[key], value...)
|
||||
r.currentSize += valueLen
|
||||
r.updatedAt[key] = time.Now()
|
||||
return true
|
||||
}
|
||||
|
||||
// Set will persist a value to the cache
|
||||
func (r *Cache) Set(key string, value []byte) bool {
|
||||
r.Lock()
|
||||
defer r.Unlock()
|
||||
valueLen := uint64(len(value))
|
||||
if r.maxSize > 0 {
|
||||
// check if the size of the object is not bigger than the
|
||||
// capacity of the cache
|
||||
if valueLen > r.maxSize {
|
||||
return false
|
||||
}
|
||||
// remove random key if only we reach the maxSize threshold
|
||||
for (r.currentSize + valueLen) > r.maxSize {
|
||||
for randomKey := range r.items {
|
||||
r.doDelete(randomKey)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
r.items[key] = value
|
||||
r.currentSize += valueLen
|
||||
r.updatedAt[key] = time.Now()
|
||||
return true
|
||||
}
|
||||
|
||||
// Expire expires keys which have expired
|
||||
func (r *Cache) Expire() {
|
||||
r.Lock()
|
||||
defer r.Unlock()
|
||||
for key := range r.items {
|
||||
if !r.isValid(key) {
|
||||
r.doDelete(key)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Delete deletes a given key if exists
|
||||
func (r *Cache) Delete(key string) {
|
||||
r.Lock()
|
||||
defer r.Unlock()
|
||||
r.doDelete(key)
|
||||
}
|
||||
|
||||
func (r *Cache) doDelete(key string) {
|
||||
if _, ok := r.items[key]; ok {
|
||||
r.currentSize -= uint64(len(r.items[key]))
|
||||
delete(r.items, key)
|
||||
delete(r.updatedAt, key)
|
||||
r.totalExpired++
|
||||
if r.OnExpired != nil {
|
||||
r.OnExpired(key)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Cache) isValid(key string) bool {
|
||||
updatedAt, ok := r.updatedAt[key]
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
if r.expiration == noExpiration {
|
||||
return true
|
||||
}
|
||||
return updatedAt.Add(r.expiration).After(time.Now())
|
||||
}
|
||||
45
pkg/storage/donut/trove/trove_test.go
Normal file
45
pkg/storage/donut/trove/trove_test.go
Normal file
@@ -0,0 +1,45 @@
|
||||
/*
|
||||
* Minimalist Object Storage, (C) 2015 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package trove
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
. "github.com/minio/check"
|
||||
)
|
||||
|
||||
func Test(t *testing.T) { TestingT(t) }
|
||||
|
||||
type MySuite struct{}
|
||||
|
||||
var _ = Suite(&MySuite{})
|
||||
|
||||
func (s *MySuite) TestCache(c *C) {
|
||||
cache := NewCache(1000, 0)
|
||||
data := []byte("Hello, world!")
|
||||
ok := cache.Set("filename", data)
|
||||
|
||||
c.Assert(ok, Equals, true)
|
||||
storedata, ok := cache.Get("filename")
|
||||
|
||||
c.Assert(ok, Equals, true)
|
||||
c.Assert(data, DeepEquals, storedata)
|
||||
|
||||
cache.Delete("filename")
|
||||
_, ok = cache.Get("filename")
|
||||
c.Assert(ok, Equals, false)
|
||||
}
|
||||
211
pkg/storage/donut/utils.go
Normal file
211
pkg/storage/donut/utils.go
Normal file
@@ -0,0 +1,211 @@
|
||||
package donut
|
||||
|
||||
import (
|
||||
"regexp"
|
||||
"strings"
|
||||
"time"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
// BucketACL - bucket level access control
|
||||
type BucketACL string
|
||||
|
||||
// different types of ACL's currently supported for buckets
|
||||
const (
|
||||
BucketPrivate = BucketACL("private")
|
||||
BucketPublicRead = BucketACL("public-read")
|
||||
BucketPublicReadWrite = BucketACL("public-read-write")
|
||||
)
|
||||
|
||||
func (b BucketACL) String() string {
|
||||
return string(b)
|
||||
}
|
||||
|
||||
// IsPrivate - is acl Private
|
||||
func (b BucketACL) IsPrivate() bool {
|
||||
return b == BucketACL("private")
|
||||
}
|
||||
|
||||
// IsPublicRead - is acl PublicRead
|
||||
func (b BucketACL) IsPublicRead() bool {
|
||||
return b == BucketACL("public-read")
|
||||
}
|
||||
|
||||
// IsPublicReadWrite - is acl PublicReadWrite
|
||||
func (b BucketACL) IsPublicReadWrite() bool {
|
||||
return b == BucketACL("public-read-write")
|
||||
}
|
||||
|
||||
// BucketMetadata - name and create date
|
||||
type BucketMetadata struct {
|
||||
Name string
|
||||
Created time.Time
|
||||
ACL BucketACL
|
||||
}
|
||||
|
||||
// ObjectMetadata - object key and its relevant metadata
|
||||
type ObjectMetadata struct {
|
||||
Bucket string
|
||||
Key string
|
||||
|
||||
ContentType string
|
||||
Created time.Time
|
||||
Md5 string
|
||||
Size int64
|
||||
}
|
||||
|
||||
// FilterMode type
|
||||
type FilterMode int
|
||||
|
||||
// FilterMode list
|
||||
const (
|
||||
DelimiterPrefixMode FilterMode = iota
|
||||
DelimiterMode
|
||||
PrefixMode
|
||||
DefaultMode
|
||||
)
|
||||
|
||||
// PartMetadata - various types of individual part resources
|
||||
type PartMetadata struct {
|
||||
PartNumber int
|
||||
LastModified time.Time
|
||||
ETag string
|
||||
Size int64
|
||||
}
|
||||
|
||||
// ObjectResourcesMetadata - various types of object resources
|
||||
type ObjectResourcesMetadata struct {
|
||||
Bucket string
|
||||
EncodingType string
|
||||
Key string
|
||||
UploadID string
|
||||
StorageClass string
|
||||
PartNumberMarker int
|
||||
NextPartNumberMarker int
|
||||
MaxParts int
|
||||
IsTruncated bool
|
||||
|
||||
Part []*PartMetadata
|
||||
}
|
||||
|
||||
// UploadMetadata container capturing metadata on in progress multipart upload in a given bucket
|
||||
type UploadMetadata struct {
|
||||
Key string
|
||||
UploadID string
|
||||
StorageClass string
|
||||
Initiated time.Time
|
||||
}
|
||||
|
||||
// BucketMultipartResourcesMetadata - various types of bucket resources for inprogress multipart uploads
|
||||
type BucketMultipartResourcesMetadata struct {
|
||||
KeyMarker string
|
||||
UploadIDMarker string
|
||||
NextKeyMarker string
|
||||
NextUploadIDMarker string
|
||||
EncodingType string
|
||||
MaxUploads int
|
||||
IsTruncated bool
|
||||
Upload []*UploadMetadata
|
||||
Prefix string
|
||||
Delimiter string
|
||||
CommonPrefixes []string
|
||||
}
|
||||
|
||||
// BucketResourcesMetadata - various types of bucket resources
|
||||
type BucketResourcesMetadata struct {
|
||||
Prefix string
|
||||
Marker string
|
||||
NextMarker string
|
||||
Maxkeys int
|
||||
EncodingType string
|
||||
Delimiter string
|
||||
IsTruncated bool
|
||||
CommonPrefixes []string
|
||||
Mode FilterMode
|
||||
}
|
||||
|
||||
// GetMode - Populate filter mode
|
||||
func GetMode(resources BucketResourcesMetadata) FilterMode {
|
||||
var f FilterMode
|
||||
switch true {
|
||||
case resources.Delimiter != "" && resources.Prefix != "":
|
||||
f = DelimiterPrefixMode
|
||||
case resources.Delimiter != "" && resources.Prefix == "":
|
||||
f = DelimiterMode
|
||||
case resources.Delimiter == "" && resources.Prefix != "":
|
||||
f = PrefixMode
|
||||
case resources.Delimiter == "" && resources.Prefix == "":
|
||||
f = DefaultMode
|
||||
}
|
||||
|
||||
return f
|
||||
}
|
||||
|
||||
// IsValidBucketACL - is provided acl string supported
|
||||
func IsValidBucketACL(acl string) bool {
|
||||
switch acl {
|
||||
case "private":
|
||||
fallthrough
|
||||
case "public-read":
|
||||
fallthrough
|
||||
case "public-read-write":
|
||||
return true
|
||||
case "":
|
||||
// by default its "private"
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// IsDelimiterPrefixSet Delimiter and Prefix set
|
||||
func (b BucketResourcesMetadata) IsDelimiterPrefixSet() bool {
|
||||
return b.Mode == DelimiterPrefixMode
|
||||
}
|
||||
|
||||
// IsDelimiterSet Delimiter set
|
||||
func (b BucketResourcesMetadata) IsDelimiterSet() bool {
|
||||
return b.Mode == DelimiterMode
|
||||
}
|
||||
|
||||
// IsPrefixSet Prefix set
|
||||
func (b BucketResourcesMetadata) IsPrefixSet() bool {
|
||||
return b.Mode == PrefixMode
|
||||
}
|
||||
|
||||
// IsDefault No query values
|
||||
func (b BucketResourcesMetadata) IsDefault() bool {
|
||||
return b.Mode == DefaultMode
|
||||
}
|
||||
|
||||
// IsValidBucket - verify bucket name in accordance with
|
||||
// - http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html
|
||||
func IsValidBucket(bucket string) bool {
|
||||
if len(bucket) < 3 || len(bucket) > 63 {
|
||||
return false
|
||||
}
|
||||
if bucket[0] == '.' || bucket[len(bucket)-1] == '.' {
|
||||
return false
|
||||
}
|
||||
if match, _ := regexp.MatchString("\\.\\.", bucket); match == true {
|
||||
return false
|
||||
}
|
||||
// We don't support buckets with '.' in them
|
||||
match, _ := regexp.MatchString("^[a-zA-Z][a-zA-Z0-9\\-]+[a-zA-Z0-9]$", bucket)
|
||||
return match
|
||||
}
|
||||
|
||||
// IsValidObjectName - verify object name in accordance with
|
||||
// - http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html
|
||||
func IsValidObjectName(object string) bool {
|
||||
if strings.TrimSpace(object) == "" {
|
||||
return true
|
||||
}
|
||||
if len(object) > 1024 || len(object) == 0 {
|
||||
return false
|
||||
}
|
||||
if !utf8.ValidString(object) {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
Reference in New Issue
Block a user