mirror of
https://github.com/minio/minio.git
synced 2025-02-26 04:49:16 -05:00
Integrate cache with donut, add tests
This commit is contained in:
parent
0533abf6a8
commit
bce93c1b3a
@ -217,23 +217,23 @@ func (cache Cache) cleanupMultiparts(bucket, key, uploadID string) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// CompleteMultipartUpload -
|
// CompleteMultipartUpload -
|
||||||
func (cache Cache) CompleteMultipartUpload(bucket, key, uploadID string, parts map[int]string) (string, error) {
|
func (cache Cache) CompleteMultipartUpload(bucket, key, uploadID string, parts map[int]string) (ObjectMetadata, error) {
|
||||||
if !IsValidBucket(bucket) {
|
if !IsValidBucket(bucket) {
|
||||||
return "", iodine.New(BucketNameInvalid{Bucket: bucket}, nil)
|
return ObjectMetadata{}, iodine.New(BucketNameInvalid{Bucket: bucket}, nil)
|
||||||
}
|
}
|
||||||
if !IsValidObjectName(key) {
|
if !IsValidObjectName(key) {
|
||||||
return "", iodine.New(ObjectNameInvalid{Object: key}, nil)
|
return ObjectMetadata{}, iodine.New(ObjectNameInvalid{Object: key}, nil)
|
||||||
}
|
}
|
||||||
// Verify upload id
|
// Verify upload id
|
||||||
cache.lock.RLock()
|
cache.lock.RLock()
|
||||||
if _, ok := cache.storedBuckets[bucket]; ok == false {
|
if _, ok := cache.storedBuckets[bucket]; ok == false {
|
||||||
cache.lock.RUnlock()
|
cache.lock.RUnlock()
|
||||||
return "", iodine.New(BucketNotFound{Bucket: bucket}, nil)
|
return ObjectMetadata{}, iodine.New(BucketNotFound{Bucket: bucket}, nil)
|
||||||
}
|
}
|
||||||
storedBucket := cache.storedBuckets[bucket]
|
storedBucket := cache.storedBuckets[bucket]
|
||||||
if storedBucket.multiPartSession[key].uploadID != uploadID {
|
if storedBucket.multiPartSession[key].uploadID != uploadID {
|
||||||
cache.lock.RUnlock()
|
cache.lock.RUnlock()
|
||||||
return "", iodine.New(InvalidUploadID{UploadID: uploadID}, nil)
|
return ObjectMetadata{}, iodine.New(InvalidUploadID{UploadID: uploadID}, nil)
|
||||||
}
|
}
|
||||||
cache.lock.RUnlock()
|
cache.lock.RUnlock()
|
||||||
|
|
||||||
@ -245,21 +245,21 @@ func (cache Cache) CompleteMultipartUpload(bucket, key, uploadID string, parts m
|
|||||||
object, ok := cache.multiPartObjects.Get(bucket + "/" + getMultipartKey(key, uploadID, i))
|
object, ok := cache.multiPartObjects.Get(bucket + "/" + getMultipartKey(key, uploadID, i))
|
||||||
if ok == false {
|
if ok == false {
|
||||||
cache.lock.Unlock()
|
cache.lock.Unlock()
|
||||||
return "", iodine.New(errors.New("missing part: "+strconv.Itoa(i)), nil)
|
return ObjectMetadata{}, iodine.New(errors.New("missing part: "+strconv.Itoa(i)), nil)
|
||||||
}
|
}
|
||||||
size += int64(len(object))
|
size += int64(len(object))
|
||||||
calcMD5Bytes := md5.Sum(object)
|
calcMD5Bytes := md5.Sum(object)
|
||||||
// complete multi part request header md5sum per part is hex encoded
|
// complete multi part request header md5sum per part is hex encoded
|
||||||
recvMD5Bytes, err := hex.DecodeString(strings.Trim(recvMD5, "\""))
|
recvMD5Bytes, err := hex.DecodeString(strings.Trim(recvMD5, "\""))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", iodine.New(InvalidDigest{Md5: recvMD5}, nil)
|
return ObjectMetadata{}, iodine.New(InvalidDigest{Md5: recvMD5}, nil)
|
||||||
}
|
}
|
||||||
if !bytes.Equal(recvMD5Bytes, calcMD5Bytes[:]) {
|
if !bytes.Equal(recvMD5Bytes, calcMD5Bytes[:]) {
|
||||||
return "", iodine.New(BadDigest{}, nil)
|
return ObjectMetadata{}, iodine.New(BadDigest{}, nil)
|
||||||
}
|
}
|
||||||
_, err = io.Copy(&fullObject, bytes.NewBuffer(object))
|
_, err = io.Copy(&fullObject, bytes.NewBuffer(object))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", iodine.New(err, nil)
|
return ObjectMetadata{}, iodine.New(err, nil)
|
||||||
}
|
}
|
||||||
object = nil
|
object = nil
|
||||||
go debug.FreeOSMemory()
|
go debug.FreeOSMemory()
|
||||||
@ -269,16 +269,16 @@ func (cache Cache) CompleteMultipartUpload(bucket, key, uploadID string, parts m
|
|||||||
md5sumSlice := md5.Sum(fullObject.Bytes())
|
md5sumSlice := md5.Sum(fullObject.Bytes())
|
||||||
// this is needed for final verification inside CreateObject, do not convert this to hex
|
// this is needed for final verification inside CreateObject, do not convert this to hex
|
||||||
md5sum := base64.StdEncoding.EncodeToString(md5sumSlice[:])
|
md5sum := base64.StdEncoding.EncodeToString(md5sumSlice[:])
|
||||||
etag, err := cache.CreateObject(bucket, key, "", md5sum, size, &fullObject)
|
objectMetadata, err := cache.CreateObject(bucket, key, "", md5sum, size, &fullObject)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// No need to call internal cleanup functions here, caller will call AbortMultipartUpload()
|
// No need to call internal cleanup functions here, caller will call AbortMultipartUpload()
|
||||||
// which would in-turn cleanup properly in accordance with S3 Spec
|
// which would in-turn cleanup properly in accordance with S3 Spec
|
||||||
return "", iodine.New(err, nil)
|
return ObjectMetadata{}, iodine.New(err, nil)
|
||||||
}
|
}
|
||||||
fullObject.Reset()
|
fullObject.Reset()
|
||||||
cache.cleanupMultiparts(bucket, key, uploadID)
|
cache.cleanupMultiparts(bucket, key, uploadID)
|
||||||
cache.cleanupMultipartSession(bucket, key, uploadID)
|
cache.cleanupMultipartSession(bucket, key, uploadID)
|
||||||
return etag, nil
|
return objectMetadata, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// byKey is a sortable interface for UploadMetadata slice
|
// byKey is a sortable interface for UploadMetadata slice
|
||||||
|
@ -93,6 +93,9 @@ func NewCache(maxSize uint64, expiration time.Duration, donutName string, nodeDi
|
|||||||
c.multiPartObjects = trove.NewCache(0, time.Duration(0))
|
c.multiPartObjects = trove.NewCache(0, time.Duration(0))
|
||||||
c.objects.OnExpired = c.expiredObject
|
c.objects.OnExpired = c.expiredObject
|
||||||
c.multiPartObjects.OnExpired = c.expiredPart
|
c.multiPartObjects.OnExpired = c.expiredPart
|
||||||
|
c.lock = new(sync.RWMutex)
|
||||||
|
c.maxSize = maxSize
|
||||||
|
c.expiration = expiration
|
||||||
|
|
||||||
// set up cache expiration
|
// set up cache expiration
|
||||||
c.objects.ExpireObjects(time.Second * 5)
|
c.objects.ExpireObjects(time.Second * 5)
|
||||||
@ -262,42 +265,42 @@ func isMD5SumEqual(expectedMD5Sum, actualMD5Sum string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// CreateObject -
|
// CreateObject -
|
||||||
func (cache Cache) CreateObject(bucket, key, contentType, expectedMD5Sum string, size int64, data io.Reader) (string, error) {
|
func (cache Cache) CreateObject(bucket, key, contentType, expectedMD5Sum string, size int64, data io.Reader) (ObjectMetadata, error) {
|
||||||
if size > int64(cache.maxSize) {
|
if size > int64(cache.maxSize) {
|
||||||
generic := GenericObjectError{Bucket: bucket, Object: key}
|
generic := GenericObjectError{Bucket: bucket, Object: key}
|
||||||
return "", iodine.New(EntityTooLarge{
|
return ObjectMetadata{}, iodine.New(EntityTooLarge{
|
||||||
GenericObjectError: generic,
|
GenericObjectError: generic,
|
||||||
Size: strconv.FormatInt(size, 10),
|
Size: strconv.FormatInt(size, 10),
|
||||||
MaxSize: strconv.FormatUint(cache.maxSize, 10),
|
MaxSize: strconv.FormatUint(cache.maxSize, 10),
|
||||||
}, nil)
|
}, nil)
|
||||||
}
|
}
|
||||||
md5sum, err := cache.createObject(bucket, key, contentType, expectedMD5Sum, size, data)
|
objectMetadata, err := cache.createObject(bucket, key, contentType, expectedMD5Sum, size, data)
|
||||||
// free
|
// free
|
||||||
debug.FreeOSMemory()
|
debug.FreeOSMemory()
|
||||||
return md5sum, iodine.New(err, nil)
|
return objectMetadata, iodine.New(err, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
// createObject - PUT object to cache buffer
|
// createObject - PUT object to cache buffer
|
||||||
func (cache Cache) createObject(bucket, key, contentType, expectedMD5Sum string, size int64, data io.Reader) (string, error) {
|
func (cache Cache) createObject(bucket, key, contentType, expectedMD5Sum string, size int64, data io.Reader) (ObjectMetadata, error) {
|
||||||
cache.lock.RLock()
|
cache.lock.RLock()
|
||||||
if !IsValidBucket(bucket) {
|
if !IsValidBucket(bucket) {
|
||||||
cache.lock.RUnlock()
|
cache.lock.RUnlock()
|
||||||
return "", iodine.New(BucketNameInvalid{Bucket: bucket}, nil)
|
return ObjectMetadata{}, iodine.New(BucketNameInvalid{Bucket: bucket}, nil)
|
||||||
}
|
}
|
||||||
if !IsValidObjectName(key) {
|
if !IsValidObjectName(key) {
|
||||||
cache.lock.RUnlock()
|
cache.lock.RUnlock()
|
||||||
return "", iodine.New(ObjectNameInvalid{Object: key}, nil)
|
return ObjectMetadata{}, iodine.New(ObjectNameInvalid{Object: key}, nil)
|
||||||
}
|
}
|
||||||
if _, ok := cache.storedBuckets[bucket]; ok == false {
|
if _, ok := cache.storedBuckets[bucket]; ok == false {
|
||||||
cache.lock.RUnlock()
|
cache.lock.RUnlock()
|
||||||
return "", iodine.New(BucketNotFound{Bucket: bucket}, nil)
|
return ObjectMetadata{}, iodine.New(BucketNotFound{Bucket: bucket}, nil)
|
||||||
}
|
}
|
||||||
storedBucket := cache.storedBuckets[bucket]
|
storedBucket := cache.storedBuckets[bucket]
|
||||||
// get object key
|
// get object key
|
||||||
objectKey := bucket + "/" + key
|
objectKey := bucket + "/" + key
|
||||||
if _, ok := storedBucket.objectMetadata[objectKey]; ok == true {
|
if _, ok := storedBucket.objectMetadata[objectKey]; ok == true {
|
||||||
cache.lock.RUnlock()
|
cache.lock.RUnlock()
|
||||||
return "", iodine.New(ObjectExists{Object: key}, nil)
|
return ObjectMetadata{}, iodine.New(ObjectExists{Object: key}, nil)
|
||||||
}
|
}
|
||||||
cache.lock.RUnlock()
|
cache.lock.RUnlock()
|
||||||
|
|
||||||
@ -309,7 +312,7 @@ func (cache Cache) createObject(bucket, key, contentType, expectedMD5Sum string,
|
|||||||
expectedMD5SumBytes, err := base64.StdEncoding.DecodeString(strings.TrimSpace(expectedMD5Sum))
|
expectedMD5SumBytes, err := base64.StdEncoding.DecodeString(strings.TrimSpace(expectedMD5Sum))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// pro-actively close the connection
|
// pro-actively close the connection
|
||||||
return "", iodine.New(InvalidDigest{Md5: expectedMD5Sum}, nil)
|
return ObjectMetadata{}, iodine.New(InvalidDigest{Md5: expectedMD5Sum}, nil)
|
||||||
}
|
}
|
||||||
expectedMD5Sum = hex.EncodeToString(expectedMD5SumBytes)
|
expectedMD5Sum = hex.EncodeToString(expectedMD5SumBytes)
|
||||||
}
|
}
|
||||||
@ -332,7 +335,7 @@ func (cache Cache) createObject(bucket, key, contentType, expectedMD5Sum string,
|
|||||||
readBytes = append(readBytes, byteBuffer[0:length]...)
|
readBytes = append(readBytes, byteBuffer[0:length]...)
|
||||||
}
|
}
|
||||||
if err != io.EOF {
|
if err != io.EOF {
|
||||||
return "", iodine.New(err, nil)
|
return ObjectMetadata{}, iodine.New(err, nil)
|
||||||
}
|
}
|
||||||
md5SumBytes := hash.Sum(nil)
|
md5SumBytes := hash.Sum(nil)
|
||||||
totalLength := len(readBytes)
|
totalLength := len(readBytes)
|
||||||
@ -344,14 +347,14 @@ func (cache Cache) createObject(bucket, key, contentType, expectedMD5Sum string,
|
|||||||
go debug.FreeOSMemory()
|
go debug.FreeOSMemory()
|
||||||
cache.lock.Unlock()
|
cache.lock.Unlock()
|
||||||
if !ok {
|
if !ok {
|
||||||
return "", iodine.New(InternalError{}, nil)
|
return ObjectMetadata{}, iodine.New(InternalError{}, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
md5Sum := hex.EncodeToString(md5SumBytes)
|
md5Sum := hex.EncodeToString(md5SumBytes)
|
||||||
// Verify if the written object is equal to what is expected, only if it is requested as such
|
// Verify if the written object is equal to what is expected, only if it is requested as such
|
||||||
if strings.TrimSpace(expectedMD5Sum) != "" {
|
if strings.TrimSpace(expectedMD5Sum) != "" {
|
||||||
if err := isMD5SumEqual(strings.TrimSpace(expectedMD5Sum), md5Sum); err != nil {
|
if err := isMD5SumEqual(strings.TrimSpace(expectedMD5Sum), md5Sum); err != nil {
|
||||||
return "", iodine.New(BadDigest{}, nil)
|
return ObjectMetadata{}, iodine.New(BadDigest{}, nil)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -371,11 +374,11 @@ func (cache Cache) createObject(bucket, key, contentType, expectedMD5Sum string,
|
|||||||
storedBucket.objectMetadata[objectKey] = newObject
|
storedBucket.objectMetadata[objectKey] = newObject
|
||||||
cache.storedBuckets[bucket] = storedBucket
|
cache.storedBuckets[bucket] = storedBucket
|
||||||
cache.lock.Unlock()
|
cache.lock.Unlock()
|
||||||
return newObject.MD5Sum, nil
|
return newObject, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// CreateBucket - create bucket in cache
|
// MakeBucket - create bucket in cache
|
||||||
func (cache Cache) CreateBucket(bucketName, acl string) error {
|
func (cache Cache) MakeBucket(bucketName, acl string) error {
|
||||||
cache.lock.RLock()
|
cache.lock.RLock()
|
||||||
if len(cache.storedBuckets) == totalBuckets {
|
if len(cache.storedBuckets) == totalBuckets {
|
||||||
cache.lock.RUnlock()
|
cache.lock.RUnlock()
|
||||||
@ -418,22 +421,21 @@ func (cache Cache) CreateBucket(bucketName, acl string) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cache Cache) filterDelimiterPrefix(keys []string, key, prefix, delim string) ([]string, []string) {
|
func (cache Cache) filterDelimiterPrefix(keys []string, commonPrefixes []string, key, prefix, delim string) ([]string, []string) {
|
||||||
var commonPrefixes []string
|
|
||||||
switch true {
|
switch true {
|
||||||
case key == prefix:
|
case key == prefix:
|
||||||
keys = append(keys, key)
|
keys = append(keys, key)
|
||||||
// delim - requires r.Prefix as it was trimmed off earlier
|
// delim - requires r.Prefix as it was trimmed off earlier
|
||||||
case key == prefix+delim:
|
case key == prefix+delim:
|
||||||
keys = append(keys, key)
|
keys = append(keys, key)
|
||||||
|
fallthrough
|
||||||
case delim != "":
|
case delim != "":
|
||||||
commonPrefixes = append(commonPrefixes, prefix+delim)
|
commonPrefixes = append(commonPrefixes, prefix+delim)
|
||||||
}
|
}
|
||||||
return RemoveDuplicates(keys), RemoveDuplicates(commonPrefixes)
|
return keys, commonPrefixes
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cache Cache) listObjects(keys []string, key string, r BucketResourcesMetadata) ([]string, []string) {
|
func (cache Cache) listObjects(keys []string, commonPrefixes []string, key string, r BucketResourcesMetadata) ([]string, []string) {
|
||||||
var commonPrefixes []string
|
|
||||||
switch true {
|
switch true {
|
||||||
// Prefix absent, delimit object key based on delimiter
|
// Prefix absent, delimit object key based on delimiter
|
||||||
case r.IsDelimiterSet():
|
case r.IsDelimiterSet():
|
||||||
@ -449,7 +451,7 @@ func (cache Cache) listObjects(keys []string, key string, r BucketResourcesMetad
|
|||||||
if strings.HasPrefix(key, r.Prefix) {
|
if strings.HasPrefix(key, r.Prefix) {
|
||||||
trimmedName := strings.TrimPrefix(key, r.Prefix)
|
trimmedName := strings.TrimPrefix(key, r.Prefix)
|
||||||
delim := Delimiter(trimmedName, r.Delimiter)
|
delim := Delimiter(trimmedName, r.Delimiter)
|
||||||
keys, commonPrefixes = cache.filterDelimiterPrefix(keys, key, r.Prefix, delim)
|
keys, commonPrefixes = cache.filterDelimiterPrefix(keys, commonPrefixes, key, r.Prefix, delim)
|
||||||
}
|
}
|
||||||
// Prefix present, nothing to delimit
|
// Prefix present, nothing to delimit
|
||||||
case r.IsPrefixSet():
|
case r.IsPrefixSet():
|
||||||
@ -458,7 +460,7 @@ func (cache Cache) listObjects(keys []string, key string, r BucketResourcesMetad
|
|||||||
case r.IsDefault():
|
case r.IsDefault():
|
||||||
keys = append(keys, key)
|
keys = append(keys, key)
|
||||||
}
|
}
|
||||||
return RemoveDuplicates(keys), RemoveDuplicates(commonPrefixes)
|
return keys, commonPrefixes
|
||||||
}
|
}
|
||||||
|
|
||||||
// ListObjects - list objects from cache
|
// ListObjects - list objects from cache
|
||||||
@ -468,7 +470,7 @@ func (cache Cache) ListObjects(bucket string, resources BucketResourcesMetadata)
|
|||||||
if !IsValidBucket(bucket) {
|
if !IsValidBucket(bucket) {
|
||||||
return nil, BucketResourcesMetadata{IsTruncated: false}, iodine.New(BucketNameInvalid{Bucket: bucket}, nil)
|
return nil, BucketResourcesMetadata{IsTruncated: false}, iodine.New(BucketNameInvalid{Bucket: bucket}, nil)
|
||||||
}
|
}
|
||||||
if !IsValidObjectName(resources.Prefix) {
|
if !IsValidPrefix(resources.Prefix) {
|
||||||
return nil, BucketResourcesMetadata{IsTruncated: false}, iodine.New(ObjectNameInvalid{Object: resources.Prefix}, nil)
|
return nil, BucketResourcesMetadata{IsTruncated: false}, iodine.New(ObjectNameInvalid{Object: resources.Prefix}, nil)
|
||||||
}
|
}
|
||||||
if _, ok := cache.storedBuckets[bucket]; ok == false {
|
if _, ok := cache.storedBuckets[bucket]; ok == false {
|
||||||
@ -476,12 +478,11 @@ func (cache Cache) ListObjects(bucket string, resources BucketResourcesMetadata)
|
|||||||
}
|
}
|
||||||
var results []ObjectMetadata
|
var results []ObjectMetadata
|
||||||
var keys []string
|
var keys []string
|
||||||
var commonPrefixes []string
|
|
||||||
storedBucket := cache.storedBuckets[bucket]
|
storedBucket := cache.storedBuckets[bucket]
|
||||||
for key := range storedBucket.objectMetadata {
|
for key := range storedBucket.objectMetadata {
|
||||||
if strings.HasPrefix(key, bucket+"/") {
|
if strings.HasPrefix(key, bucket+"/") {
|
||||||
key = key[len(bucket)+1:]
|
key = key[len(bucket)+1:]
|
||||||
keys, commonPrefixes = cache.listObjects(keys, key, resources)
|
keys, resources.CommonPrefixes = cache.listObjects(keys, resources.CommonPrefixes, key, resources)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
var newKeys []string
|
var newKeys []string
|
||||||
@ -508,7 +509,7 @@ func (cache Cache) ListObjects(bucket string, resources BucketResourcesMetadata)
|
|||||||
object := storedBucket.objectMetadata[bucket+"/"+key]
|
object := storedBucket.objectMetadata[bucket+"/"+key]
|
||||||
results = append(results, object)
|
results = append(results, object)
|
||||||
}
|
}
|
||||||
resources.CommonPrefixes = commonPrefixes
|
resources.CommonPrefixes = RemoveDuplicates(resources.CommonPrefixes)
|
||||||
return results, resources, nil
|
return results, resources, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -19,20 +19,23 @@ package donut
|
|||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"crypto/md5"
|
"crypto/md5"
|
||||||
|
"encoding/base64"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"io"
|
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strconv"
|
"strconv"
|
||||||
"testing"
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
. "github.com/minio/check"
|
. "github.com/minio/check"
|
||||||
)
|
)
|
||||||
|
|
||||||
func Test(t *testing.T) { TestingT(t) }
|
func Test(t *testing.T) { TestingT(t) }
|
||||||
|
|
||||||
type MySuite struct{}
|
type MySuite struct {
|
||||||
|
root string
|
||||||
|
}
|
||||||
|
|
||||||
var _ = Suite(&MySuite{})
|
var _ = Suite(&MySuite{})
|
||||||
|
|
||||||
@ -52,293 +55,224 @@ func createTestNodeDiskMap(p string) map[string][]string {
|
|||||||
return nodes
|
return nodes
|
||||||
}
|
}
|
||||||
|
|
||||||
// test empty donut
|
var d Cache
|
||||||
func (s *MySuite) TestEmptyDonut(c *C) {
|
|
||||||
|
func (s *MySuite) SetUpSuite(c *C) {
|
||||||
root, err := ioutil.TempDir(os.TempDir(), "donut-")
|
root, err := ioutil.TempDir(os.TempDir(), "donut-")
|
||||||
c.Assert(err, IsNil)
|
c.Assert(err, IsNil)
|
||||||
defer os.RemoveAll(root)
|
s.root = root
|
||||||
donut, err := NewDonut("test", createTestNodeDiskMap(root))
|
d = NewCache(100000, time.Duration(1*time.Hour), "test", createTestNodeDiskMap(root))
|
||||||
|
buckets, err := d.ListBuckets()
|
||||||
c.Assert(err, IsNil)
|
c.Assert(err, IsNil)
|
||||||
|
c.Assert(len(buckets), Equals, 0)
|
||||||
|
}
|
||||||
|
|
||||||
// check donut is empty
|
func (s *MySuite) TearDownSuite(c *C) {
|
||||||
metadata, err := donut.ListBuckets()
|
os.RemoveAll(s.root)
|
||||||
c.Assert(err, IsNil)
|
|
||||||
c.Assert(len(metadata), Equals, 0)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// test make bucket without name
|
// test make bucket without name
|
||||||
func (s *MySuite) TestBucketWithoutNameFails(c *C) {
|
func (s *MySuite) TestBucketWithoutNameFails(c *C) {
|
||||||
root, err := ioutil.TempDir(os.TempDir(), "donut-")
|
|
||||||
c.Assert(err, IsNil)
|
|
||||||
defer os.RemoveAll(root)
|
|
||||||
donut, err := NewDonut("test", createTestNodeDiskMap(root))
|
|
||||||
c.Assert(err, IsNil)
|
|
||||||
// fail to create new bucket without a name
|
// fail to create new bucket without a name
|
||||||
err = donut.MakeBucket("", "private")
|
err := d.MakeBucket("", "private")
|
||||||
c.Assert(err, Not(IsNil))
|
c.Assert(err, Not(IsNil))
|
||||||
|
|
||||||
err = donut.MakeBucket(" ", "private")
|
err = d.MakeBucket(" ", "private")
|
||||||
c.Assert(err, Not(IsNil))
|
c.Assert(err, Not(IsNil))
|
||||||
}
|
}
|
||||||
|
|
||||||
// test empty bucket
|
// test empty bucket
|
||||||
func (s *MySuite) TestEmptyBucket(c *C) {
|
func (s *MySuite) TestEmptyBucket(c *C) {
|
||||||
root, err := ioutil.TempDir(os.TempDir(), "donut-")
|
c.Assert(d.MakeBucket("foo1", "private"), IsNil)
|
||||||
c.Assert(err, IsNil)
|
|
||||||
defer os.RemoveAll(root)
|
|
||||||
donut, err := NewDonut("test", createTestNodeDiskMap(root))
|
|
||||||
c.Assert(err, IsNil)
|
|
||||||
|
|
||||||
c.Assert(donut.MakeBucket("foo", BucketACL("private")), IsNil)
|
|
||||||
// check if bucket is empty
|
// check if bucket is empty
|
||||||
listObjects, err := donut.ListObjects("foo", "", "", "", 1)
|
var resources BucketResourcesMetadata
|
||||||
|
resources.Maxkeys = 1
|
||||||
|
objectsMetadata, resources, err := d.ListObjects("foo1", resources)
|
||||||
c.Assert(err, IsNil)
|
c.Assert(err, IsNil)
|
||||||
c.Assert(len(listObjects.Objects), Equals, 0)
|
c.Assert(len(objectsMetadata), Equals, 0)
|
||||||
c.Assert(listObjects.CommonPrefixes, DeepEquals, []string{})
|
c.Assert(resources.CommonPrefixes, DeepEquals, []string{})
|
||||||
c.Assert(listObjects.IsTruncated, Equals, false)
|
c.Assert(resources.IsTruncated, Equals, false)
|
||||||
}
|
}
|
||||||
|
|
||||||
// test bucket list
|
// test bucket list
|
||||||
func (s *MySuite) TestMakeBucketAndList(c *C) {
|
func (s *MySuite) TestMakeBucketAndList(c *C) {
|
||||||
root, err := ioutil.TempDir(os.TempDir(), "donut-")
|
|
||||||
c.Assert(err, IsNil)
|
|
||||||
defer os.RemoveAll(root)
|
|
||||||
donut, err := NewDonut("test", createTestNodeDiskMap(root))
|
|
||||||
c.Assert(err, IsNil)
|
|
||||||
// create bucket
|
// create bucket
|
||||||
err = donut.MakeBucket("foo", BucketACL("private"))
|
err := d.MakeBucket("foo2", "private")
|
||||||
c.Assert(err, IsNil)
|
c.Assert(err, IsNil)
|
||||||
|
|
||||||
// check bucket exists
|
// check bucket exists
|
||||||
buckets, err := donut.ListBuckets()
|
buckets, err := d.ListBuckets()
|
||||||
c.Assert(err, IsNil)
|
c.Assert(err, IsNil)
|
||||||
c.Assert(len(buckets), Equals, 1)
|
c.Assert(len(buckets), Equals, 5)
|
||||||
c.Assert(buckets["foo"].ACL, Equals, BucketACL("private"))
|
c.Assert(buckets[0].ACL, Equals, BucketACL("private"))
|
||||||
}
|
}
|
||||||
|
|
||||||
// test re-create bucket
|
// test re-create bucket
|
||||||
func (s *MySuite) TestMakeBucketWithSameNameFails(c *C) {
|
func (s *MySuite) TestMakeBucketWithSameNameFails(c *C) {
|
||||||
root, err := ioutil.TempDir(os.TempDir(), "donut-")
|
err := d.MakeBucket("foo3", "private")
|
||||||
c.Assert(err, IsNil)
|
|
||||||
defer os.RemoveAll(root)
|
|
||||||
donut, err := NewDonut("test", createTestNodeDiskMap(root))
|
|
||||||
c.Assert(err, IsNil)
|
|
||||||
err = donut.MakeBucket("foo", BucketACL("private"))
|
|
||||||
c.Assert(err, IsNil)
|
c.Assert(err, IsNil)
|
||||||
|
|
||||||
err = donut.MakeBucket("foo", BucketACL("private"))
|
err = d.MakeBucket("foo3", "private")
|
||||||
c.Assert(err, Not(IsNil))
|
c.Assert(err, Not(IsNil))
|
||||||
}
|
}
|
||||||
|
|
||||||
// test make multiple buckets
|
// test make multiple buckets
|
||||||
func (s *MySuite) TestCreateMultipleBucketsAndList(c *C) {
|
func (s *MySuite) TestCreateMultipleBucketsAndList(c *C) {
|
||||||
root, err := ioutil.TempDir(os.TempDir(), "donut-")
|
|
||||||
c.Assert(err, IsNil)
|
|
||||||
defer os.RemoveAll(root)
|
|
||||||
donut, err := NewDonut("test", createTestNodeDiskMap(root))
|
|
||||||
c.Assert(err, IsNil)
|
|
||||||
// add a second bucket
|
// add a second bucket
|
||||||
err = donut.MakeBucket("foo", BucketACL("private"))
|
err := d.MakeBucket("foo4", "private")
|
||||||
c.Assert(err, IsNil)
|
c.Assert(err, IsNil)
|
||||||
|
|
||||||
err = donut.MakeBucket("bar", BucketACL("private"))
|
err = d.MakeBucket("bar1", "private")
|
||||||
c.Assert(err, IsNil)
|
c.Assert(err, IsNil)
|
||||||
|
|
||||||
buckets, err := donut.ListBuckets()
|
buckets, err := d.ListBuckets()
|
||||||
c.Assert(err, IsNil)
|
c.Assert(err, IsNil)
|
||||||
|
|
||||||
_, ok := buckets["foo"]
|
c.Assert(len(buckets), Equals, 2)
|
||||||
c.Assert(ok, Equals, true)
|
c.Assert(buckets[0].Name, Equals, "bar1")
|
||||||
_, ok = buckets["bar"]
|
c.Assert(buckets[1].Name, Equals, "foo4")
|
||||||
c.Assert(ok, Equals, true)
|
|
||||||
|
|
||||||
err = donut.MakeBucket("foobar", BucketACL("private"))
|
err = d.MakeBucket("foobar1", "private")
|
||||||
c.Assert(err, IsNil)
|
c.Assert(err, IsNil)
|
||||||
|
|
||||||
buckets, err = donut.ListBuckets()
|
buckets, err = d.ListBuckets()
|
||||||
c.Assert(err, IsNil)
|
c.Assert(err, IsNil)
|
||||||
_, ok = buckets["foobar"]
|
|
||||||
c.Assert(ok, Equals, true)
|
c.Assert(len(buckets), Equals, 3)
|
||||||
|
c.Assert(buckets[2].Name, Equals, "foobar1")
|
||||||
}
|
}
|
||||||
|
|
||||||
// test object create without bucket
|
// test object create without bucket
|
||||||
func (s *MySuite) TestNewObjectFailsWithoutBucket(c *C) {
|
func (s *MySuite) TestNewObjectFailsWithoutBucket(c *C) {
|
||||||
root, err := ioutil.TempDir(os.TempDir(), "donut-")
|
_, err := d.CreateObject("unknown", "obj", "", "", 0, nil)
|
||||||
c.Assert(err, IsNil)
|
|
||||||
defer os.RemoveAll(root)
|
|
||||||
donut, err := NewDonut("test", createTestNodeDiskMap(root))
|
|
||||||
c.Assert(err, IsNil)
|
|
||||||
_, err = donut.PutObject("foo", "obj", "", nil, nil)
|
|
||||||
c.Assert(err, Not(IsNil))
|
c.Assert(err, Not(IsNil))
|
||||||
}
|
}
|
||||||
|
|
||||||
// test create object metadata
|
// test create object metadata
|
||||||
func (s *MySuite) TestNewObjectMetadata(c *C) {
|
func (s *MySuite) TestNewObjectMetadata(c *C) {
|
||||||
root, err := ioutil.TempDir(os.TempDir(), "donut-")
|
|
||||||
c.Assert(err, IsNil)
|
|
||||||
defer os.RemoveAll(root)
|
|
||||||
donut, err := NewDonut("test", createTestNodeDiskMap(root))
|
|
||||||
c.Assert(err, IsNil)
|
|
||||||
|
|
||||||
metadata := make(map[string]string)
|
|
||||||
metadata["contentType"] = "application/json"
|
|
||||||
metadata["foo"] = "value1"
|
|
||||||
metadata["hello"] = "world"
|
|
||||||
|
|
||||||
data := "Hello World"
|
data := "Hello World"
|
||||||
hasher := md5.New()
|
hasher := md5.New()
|
||||||
hasher.Write([]byte(data))
|
hasher.Write([]byte(data))
|
||||||
expectedMd5Sum := hex.EncodeToString(hasher.Sum(nil))
|
expectedMd5Sum := base64.StdEncoding.EncodeToString(hasher.Sum(nil))
|
||||||
reader := ioutil.NopCloser(bytes.NewReader([]byte(data)))
|
reader := ioutil.NopCloser(bytes.NewReader([]byte(data)))
|
||||||
metadata["contentLength"] = strconv.Itoa(len(data))
|
|
||||||
|
|
||||||
err = donut.MakeBucket("foo", "private")
|
err := d.MakeBucket("foo6", "private")
|
||||||
c.Assert(err, IsNil)
|
c.Assert(err, IsNil)
|
||||||
|
|
||||||
objectMetadata, err := donut.PutObject("foo", "obj", expectedMd5Sum, reader, metadata)
|
objectMetadata, err := d.CreateObject("foo6", "obj", "application/json", expectedMd5Sum, int64(len(data)), reader)
|
||||||
c.Assert(err, IsNil)
|
c.Assert(err, IsNil)
|
||||||
c.Assert(objectMetadata.MD5Sum, Equals, expectedMd5Sum)
|
c.Assert(objectMetadata.MD5Sum, Equals, hex.EncodeToString(hasher.Sum(nil)))
|
||||||
c.Assert(objectMetadata.Metadata["contentType"], Equals, metadata["contentType"])
|
c.Assert(objectMetadata.Metadata["contentType"], Equals, "application/json")
|
||||||
c.Assert(objectMetadata.Metadata["foo"], Equals, metadata["foo"])
|
|
||||||
c.Assert(objectMetadata.Metadata["hello"], Equals, metadata["hello"])
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// test create object fails without name
|
// test create object fails without name
|
||||||
func (s *MySuite) TestNewObjectFailsWithEmptyName(c *C) {
|
func (s *MySuite) TestNewObjectFailsWithEmptyName(c *C) {
|
||||||
root, err := ioutil.TempDir(os.TempDir(), "donut-")
|
_, err := d.CreateObject("foo", "", "", "", 0, nil)
|
||||||
c.Assert(err, IsNil)
|
|
||||||
defer os.RemoveAll(root)
|
|
||||||
donut, err := NewDonut("test", createTestNodeDiskMap(root))
|
|
||||||
c.Assert(err, IsNil)
|
|
||||||
|
|
||||||
_, err = donut.PutObject("foo", "", "", nil, nil)
|
|
||||||
c.Assert(err, Not(IsNil))
|
c.Assert(err, Not(IsNil))
|
||||||
}
|
}
|
||||||
|
|
||||||
// test create object
|
// test create object
|
||||||
func (s *MySuite) TestNewObjectCanBeWritten(c *C) {
|
func (s *MySuite) TestNewObjectCanBeWritten(c *C) {
|
||||||
root, err := ioutil.TempDir(os.TempDir(), "donut-")
|
err := d.MakeBucket("foo", "private")
|
||||||
c.Assert(err, IsNil)
|
|
||||||
defer os.RemoveAll(root)
|
|
||||||
donut, err := NewDonut("test", createTestNodeDiskMap(root))
|
|
||||||
c.Assert(err, IsNil)
|
c.Assert(err, IsNil)
|
||||||
|
|
||||||
err = donut.MakeBucket("foo", "private")
|
|
||||||
c.Assert(err, IsNil)
|
|
||||||
|
|
||||||
metadata := make(map[string]string)
|
|
||||||
metadata["contentType"] = "application/octet-stream"
|
|
||||||
data := "Hello World"
|
data := "Hello World"
|
||||||
|
|
||||||
hasher := md5.New()
|
hasher := md5.New()
|
||||||
hasher.Write([]byte(data))
|
hasher.Write([]byte(data))
|
||||||
expectedMd5Sum := hex.EncodeToString(hasher.Sum(nil))
|
expectedMd5Sum := base64.StdEncoding.EncodeToString(hasher.Sum(nil))
|
||||||
reader := ioutil.NopCloser(bytes.NewReader([]byte(data)))
|
reader := ioutil.NopCloser(bytes.NewReader([]byte(data)))
|
||||||
metadata["contentLength"] = strconv.Itoa(len(data))
|
|
||||||
|
|
||||||
actualMetadata, err := donut.PutObject("foo", "obj", expectedMd5Sum, reader, metadata)
|
actualMetadata, err := d.CreateObject("foo", "obj", "application/octet-stream", expectedMd5Sum, int64(len(data)), reader)
|
||||||
c.Assert(err, IsNil)
|
c.Assert(err, IsNil)
|
||||||
c.Assert(actualMetadata.MD5Sum, Equals, expectedMd5Sum)
|
c.Assert(actualMetadata.MD5Sum, Equals, hex.EncodeToString(hasher.Sum(nil)))
|
||||||
|
|
||||||
reader, size, err := donut.GetObject("foo", "obj")
|
var buffer bytes.Buffer
|
||||||
|
size, err := d.GetObject(&buffer, "foo", "obj")
|
||||||
c.Assert(err, IsNil)
|
c.Assert(err, IsNil)
|
||||||
c.Assert(size, Equals, int64(len(data)))
|
c.Assert(size, Equals, int64(len(data)))
|
||||||
|
c.Assert(buffer.Bytes(), DeepEquals, []byte(data))
|
||||||
|
|
||||||
var actualData bytes.Buffer
|
actualMetadata, err = d.GetObjectMetadata("foo", "obj")
|
||||||
_, err = io.Copy(&actualData, reader)
|
|
||||||
c.Assert(err, IsNil)
|
c.Assert(err, IsNil)
|
||||||
c.Assert(actualData.Bytes(), DeepEquals, []byte(data))
|
c.Assert(hex.EncodeToString(hasher.Sum(nil)), Equals, actualMetadata.MD5Sum)
|
||||||
|
|
||||||
actualMetadata, err = donut.GetObjectMetadata("foo", "obj")
|
|
||||||
c.Assert(err, IsNil)
|
|
||||||
c.Assert(expectedMd5Sum, Equals, actualMetadata.MD5Sum)
|
|
||||||
c.Assert(int64(len(data)), Equals, actualMetadata.Size)
|
c.Assert(int64(len(data)), Equals, actualMetadata.Size)
|
||||||
c.Assert("1.0.0", Equals, actualMetadata.Version)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// test list objects
|
// test list objects
|
||||||
func (s *MySuite) TestMultipleNewObjects(c *C) {
|
func (s *MySuite) TestMultipleNewObjects(c *C) {
|
||||||
root, err := ioutil.TempDir(os.TempDir(), "donut-")
|
c.Assert(d.MakeBucket("foo5", "private"), IsNil)
|
||||||
c.Assert(err, IsNil)
|
|
||||||
defer os.RemoveAll(root)
|
|
||||||
donut, err := NewDonut("test", createTestNodeDiskMap(root))
|
|
||||||
c.Assert(err, IsNil)
|
|
||||||
|
|
||||||
c.Assert(donut.MakeBucket("foo", BucketACL("private")), IsNil)
|
|
||||||
|
|
||||||
one := ioutil.NopCloser(bytes.NewReader([]byte("one")))
|
one := ioutil.NopCloser(bytes.NewReader([]byte("one")))
|
||||||
metadata := make(map[string]string)
|
|
||||||
metadata["contentLength"] = strconv.Itoa(len("one"))
|
|
||||||
|
|
||||||
_, err = donut.PutObject("foo", "obj1", "", one, metadata)
|
_, err := d.CreateObject("foo5", "obj1", "", "", int64(len("one")), one)
|
||||||
c.Assert(err, IsNil)
|
c.Assert(err, IsNil)
|
||||||
|
|
||||||
two := ioutil.NopCloser(bytes.NewReader([]byte("two")))
|
two := ioutil.NopCloser(bytes.NewReader([]byte("two")))
|
||||||
|
_, err = d.CreateObject("foo5", "obj2", "", "", int64(len("two")), two)
|
||||||
metadata["contentLength"] = strconv.Itoa(len("two"))
|
|
||||||
_, err = donut.PutObject("foo", "obj2", "", two, metadata)
|
|
||||||
c.Assert(err, IsNil)
|
c.Assert(err, IsNil)
|
||||||
|
|
||||||
obj1, size, err := donut.GetObject("foo", "obj1")
|
var buffer1 bytes.Buffer
|
||||||
|
size, err := d.GetObject(&buffer1, "foo5", "obj1")
|
||||||
c.Assert(err, IsNil)
|
c.Assert(err, IsNil)
|
||||||
c.Assert(size, Equals, int64(len([]byte("one"))))
|
c.Assert(size, Equals, int64(len([]byte("one"))))
|
||||||
|
c.Assert(buffer1.Bytes(), DeepEquals, []byte("one"))
|
||||||
|
|
||||||
var readerBuffer1 bytes.Buffer
|
var buffer2 bytes.Buffer
|
||||||
_, err = io.CopyN(&readerBuffer1, obj1, size)
|
size, err = d.GetObject(&buffer2, "foo5", "obj2")
|
||||||
c.Assert(err, IsNil)
|
|
||||||
c.Assert(readerBuffer1.Bytes(), DeepEquals, []byte("one"))
|
|
||||||
|
|
||||||
obj2, size, err := donut.GetObject("foo", "obj2")
|
|
||||||
c.Assert(err, IsNil)
|
c.Assert(err, IsNil)
|
||||||
c.Assert(size, Equals, int64(len([]byte("two"))))
|
c.Assert(size, Equals, int64(len([]byte("two"))))
|
||||||
|
|
||||||
var readerBuffer2 bytes.Buffer
|
c.Assert(buffer2.Bytes(), DeepEquals, []byte("two"))
|
||||||
_, err = io.CopyN(&readerBuffer2, obj2, size)
|
|
||||||
c.Assert(err, IsNil)
|
|
||||||
c.Assert(readerBuffer2.Bytes(), DeepEquals, []byte("two"))
|
|
||||||
|
|
||||||
/// test list of objects
|
/// test list of objects
|
||||||
|
|
||||||
// test list objects with prefix and delimiter
|
// test list objects with prefix and delimiter
|
||||||
listObjects, err := donut.ListObjects("foo", "o", "", "1", 10)
|
var resources BucketResourcesMetadata
|
||||||
|
resources.Prefix = "o"
|
||||||
|
resources.Delimiter = "1"
|
||||||
|
resources.Maxkeys = 10
|
||||||
|
objectsMetadata, resources, err := d.ListObjects("foo5", resources)
|
||||||
c.Assert(err, IsNil)
|
c.Assert(err, IsNil)
|
||||||
c.Assert(listObjects.IsTruncated, Equals, false)
|
c.Assert(resources.IsTruncated, Equals, false)
|
||||||
c.Assert(listObjects.CommonPrefixes[0], Equals, "obj1")
|
c.Assert(resources.CommonPrefixes[0], Equals, "obj1")
|
||||||
|
|
||||||
// test list objects with only delimiter
|
// test list objects with only delimiter
|
||||||
listObjects, err = donut.ListObjects("foo", "", "", "1", 10)
|
resources.Prefix = ""
|
||||||
|
resources.Delimiter = "1"
|
||||||
|
resources.Maxkeys = 10
|
||||||
|
objectsMetadata, resources, err = d.ListObjects("foo5", resources)
|
||||||
c.Assert(err, IsNil)
|
c.Assert(err, IsNil)
|
||||||
_, ok := listObjects.Objects["obj2"]
|
c.Assert(objectsMetadata[0].Object, Equals, "obj1")
|
||||||
c.Assert(ok, Equals, true)
|
c.Assert(resources.IsTruncated, Equals, false)
|
||||||
c.Assert(listObjects.IsTruncated, Equals, false)
|
c.Assert(resources.CommonPrefixes[0], Equals, "obj1")
|
||||||
c.Assert(listObjects.CommonPrefixes[0], Equals, "obj1")
|
|
||||||
|
|
||||||
// test list objects with only prefix
|
// test list objects with only prefix
|
||||||
listObjects, err = donut.ListObjects("foo", "o", "", "", 10)
|
resources.Prefix = "o"
|
||||||
|
resources.Delimiter = ""
|
||||||
|
resources.Maxkeys = 10
|
||||||
|
objectsMetadata, resources, err = d.ListObjects("foo5", resources)
|
||||||
c.Assert(err, IsNil)
|
c.Assert(err, IsNil)
|
||||||
c.Assert(listObjects.IsTruncated, Equals, false)
|
c.Assert(resources.IsTruncated, Equals, false)
|
||||||
_, ok1 := listObjects.Objects["obj1"]
|
c.Assert(objectsMetadata[0].Object, Equals, "obj1")
|
||||||
_, ok2 := listObjects.Objects["obj2"]
|
c.Assert(objectsMetadata[1].Object, Equals, "obj2")
|
||||||
c.Assert(ok1, Equals, true)
|
|
||||||
c.Assert(ok2, Equals, true)
|
|
||||||
|
|
||||||
three := ioutil.NopCloser(bytes.NewReader([]byte("three")))
|
three := ioutil.NopCloser(bytes.NewReader([]byte("three")))
|
||||||
metadata["contentLength"] = strconv.Itoa(len("three"))
|
_, err = d.CreateObject("foo5", "obj3", "", "", int64(len("three")), three)
|
||||||
_, err = donut.PutObject("foo", "obj3", "", three, metadata)
|
|
||||||
c.Assert(err, IsNil)
|
c.Assert(err, IsNil)
|
||||||
|
|
||||||
obj3, size, err := donut.GetObject("foo", "obj3")
|
var buffer bytes.Buffer
|
||||||
|
size, err = d.GetObject(&buffer, "foo5", "obj3")
|
||||||
c.Assert(err, IsNil)
|
c.Assert(err, IsNil)
|
||||||
c.Assert(size, Equals, int64(len([]byte("three"))))
|
c.Assert(size, Equals, int64(len([]byte("three"))))
|
||||||
|
c.Assert(buffer.Bytes(), DeepEquals, []byte("three"))
|
||||||
var readerBuffer3 bytes.Buffer
|
|
||||||
_, err = io.CopyN(&readerBuffer3, obj3, size)
|
|
||||||
c.Assert(err, IsNil)
|
|
||||||
c.Assert(readerBuffer3.Bytes(), DeepEquals, []byte("three"))
|
|
||||||
|
|
||||||
// test list objects with maxkeys
|
// test list objects with maxkeys
|
||||||
listObjects, err = donut.ListObjects("foo", "o", "", "", 2)
|
resources.Prefix = "o"
|
||||||
|
resources.Delimiter = ""
|
||||||
|
resources.Maxkeys = 2
|
||||||
|
objectsMetadata, resources, err = d.ListObjects("foo5", resources)
|
||||||
c.Assert(err, IsNil)
|
c.Assert(err, IsNil)
|
||||||
c.Assert(listObjects.IsTruncated, Equals, true)
|
c.Assert(resources.IsTruncated, Equals, true)
|
||||||
c.Assert(len(listObjects.Objects), Equals, 2)
|
c.Assert(len(objectsMetadata), Equals, 2)
|
||||||
}
|
}
|
||||||
|
@ -181,7 +181,7 @@ func IsValidBucket(bucket string) bool {
|
|||||||
// - http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html
|
// - http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html
|
||||||
func IsValidObjectName(object string) bool {
|
func IsValidObjectName(object string) bool {
|
||||||
if strings.TrimSpace(object) == "" {
|
if strings.TrimSpace(object) == "" {
|
||||||
return true
|
return false
|
||||||
}
|
}
|
||||||
if len(object) > 1024 || len(object) == 0 {
|
if len(object) > 1024 || len(object) == 0 {
|
||||||
return false
|
return false
|
||||||
@ -191,3 +191,11 @@ func IsValidObjectName(object string) bool {
|
|||||||
}
|
}
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// IsValidPrefix - verify prefix name is correct, an empty prefix is valid
|
||||||
|
func IsValidPrefix(prefix string) bool {
|
||||||
|
if strings.TrimSpace(prefix) == "" {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return IsValidObjectName(prefix)
|
||||||
|
}
|
||||||
|
Loading…
x
Reference in New Issue
Block a user