Breakaway from driver model, move cache into donut

This commit is contained in:
Harshavardhana 2015-06-30 16:30:02 -07:00
parent 72572d6c71
commit dc0df3dc0e
25 changed files with 346 additions and 2072 deletions

View File

@ -4,7 +4,7 @@ import (
"os/user"
"github.com/minio/cli"
"github.com/minio/minio/pkg/server"
"github.com/minio/minio/pkg/api"
)
func removeDuplicates(slice []string) []string {
@ -67,13 +67,9 @@ func runServer(c *cli.Context) {
cli.ShowCommandHelpAndExit(c, "server", 1) // last argument is exit code
}
apiServerConfig := getAPIServerConfig(c)
s := server.Factory{
Config: apiServerConfig,
if err := api.Start(apiServerConfig); err != nil {
Fatalln(err)
}
apiServer := s.GetStartServerFunc()
// webServer := getWebServerConfigFunc(c)
servers := []server.StartServerFunc{apiServer} //, webServer}
server.StartMinio(servers)
}
func runController(c *cli.Context) {
@ -84,12 +80,4 @@ func runController(c *cli.Context) {
if len(c.Args()) < 1 {
cli.ShowCommandHelpAndExit(c, "control", 1) // last argument is exit code
}
apiServerConfig := getAPIServerConfig(c)
s := server.Factory{
Config: apiServerConfig,
}
apiServer := s.GetStartServerFunc()
// webServer := getWebServerConfigFunc(c)
servers := []server.StartServerFunc{apiServer} //, webServer}
server.StartMinio(servers)
}

19
main.go
View File

@ -26,8 +26,8 @@ import (
"github.com/dustin/go-humanize"
"github.com/minio/cli"
"github.com/minio/minio/pkg/api"
"github.com/minio/minio/pkg/iodine"
"github.com/minio/minio/pkg/server/httpserver"
)
var globalDebugFlag = false
@ -78,7 +78,7 @@ func getAPIServerConfig(c *cli.Context) httpserver.Config {
Fatalln("Both certificate and key are required to enable https.")
}
tls := (certFile != "" && keyFile != "")
return httpserver.Config{
return api.Config{
Address: c.GlobalString("address"),
TLS: tls,
CertFile: certFile,
@ -87,21 +87,6 @@ func getAPIServerConfig(c *cli.Context) httpserver.Config {
}
}
/*
func getWebServerConfigFunc(c *cli.Context) server.StartServerFunc {
config := httpserver.Config{
Address: c.GlobalString("address-mgmt"),
TLS: false,
CertFile: "",
KeyFile: "",
}
webDrivers := server.WebFactory{
Config: config,
}
return webDrivers.GetStartServerFunc()
}
*/
// Tries to get os/arch/platform specific information
// Returns a map of current os/arch/platform/memstats
func getSystemData() map[string]string {

View File

@ -20,7 +20,7 @@ import (
"net/http"
"sort"
"github.com/minio/minio/pkg/storage/drivers"
"github.com/minio/minio/pkg/storage/donut"
)
// Reply date format
@ -34,7 +34,7 @@ const (
//
// output:
// populated struct that can be serialized to match xml and json api spec output
func generateListBucketsResponse(buckets []drivers.BucketMetadata) ListBucketsResponse {
func generateListBucketsResponse(buckets []donut.BucketMetadata) ListBucketsResponse {
var listbuckets []*Bucket
var data = ListBucketsResponse{}
var owner = Owner{}
@ -70,7 +70,7 @@ func (b itemKey) Less(i, j int) bool { return b[i].Key < b[j].Key }
//
// output:
// populated struct that can be serialized to match xml and json api spec output
func generateListObjectsResponse(bucket string, objects []drivers.ObjectMetadata, bucketResources drivers.BucketResourcesMetadata) ListObjectsResponse {
func generateListObjectsResponse(bucket string, objects []donut.ObjectMetadata, bucketResources donut.BucketResourcesMetadata) ListObjectsResponse {
var contents []*Object
var prefixes []*CommonPrefix
var owner = Owner{}
@ -131,7 +131,7 @@ func generateCompleteMultpartUploadResult(bucket, key, location, etag string) Co
}
// generateListPartsResult
func generateListPartsResult(objectMetadata drivers.ObjectResourcesMetadata) ListPartsResponse {
func generateListPartsResult(objectMetadata donut.ObjectResourcesMetadata) ListPartsResponse {
// TODO - support EncodingType in xml decoding
listPartsResponse := ListPartsResponse{}
listPartsResponse.Bucket = objectMetadata.Bucket
@ -161,7 +161,7 @@ func generateListPartsResult(objectMetadata drivers.ObjectResourcesMetadata) Lis
}
// generateListMultipartUploadsResult
func generateListMultipartUploadsResult(bucket string, metadata drivers.BucketMultipartResourcesMetadata) ListMultipartUploadsResponse {
func generateListMultipartUploadsResult(bucket string, metadata donut.BucketMultipartResourcesMetadata) ListMultipartUploadsResponse {
listMultipartUploadsResponse := ListMultipartUploadsResponse{}
listMultipartUploadsResponse.Bucket = bucket
listMultipartUploadsResponse.Delimiter = metadata.Delimiter

View File

@ -20,7 +20,7 @@ import router "github.com/gorilla/mux"
type minioAPI struct{}
// Handler - api wrapper handler
// New api
func New(config Config) API {
var api = minioAPI{}

View File

@ -23,7 +23,7 @@ import (
"net/http"
"strconv"
"github.com/minio/minio/pkg/storage/drivers"
"github.com/minio/minio/pkg/storage/donut"
)
// No encoder interface exists, so we create one.
@ -62,7 +62,7 @@ func encodeErrorResponse(response interface{}, acceptsType contentType) []byte {
}
// Write object header
func setObjectHeaders(w http.ResponseWriter, metadata drivers.ObjectMetadata) {
func setObjectHeaders(w http.ResponseWriter, metadata donut.ObjectMetadata) {
lastModified := metadata.Created.Format(http.TimeFormat)
// common headers
setCommonHeaders(w, metadata.ContentType, int(metadata.Size))
@ -72,7 +72,7 @@ func setObjectHeaders(w http.ResponseWriter, metadata drivers.ObjectMetadata) {
}
// Write range object header
func setRangeObjectHeaders(w http.ResponseWriter, metadata drivers.ObjectMetadata, contentRange *httpRange) {
func setRangeObjectHeaders(w http.ResponseWriter, metadata donut.ObjectMetadata, contentRange *httpRange) {
// set common headers
setCommonHeaders(w, metadata.ContentType, int(metadata.Size))
// set object headers

View File

@ -20,11 +20,11 @@ import (
"net/url"
"strconv"
"github.com/minio/minio/pkg/storage/drivers"
"github.com/minio/minio/pkg/storage/donut"
)
// parse bucket url queries
func getBucketResources(values url.Values) (v drivers.BucketResourcesMetadata) {
func getBucketResources(values url.Values) (v donut.BucketResourcesMetadata) {
v.Prefix = values.Get("prefix")
v.Marker = values.Get("marker")
v.Maxkeys, _ = strconv.Atoi(values.Get("max-keys"))
@ -34,7 +34,7 @@ func getBucketResources(values url.Values) (v drivers.BucketResourcesMetadata) {
}
// part bucket url queries for ?uploads
func getBucketMultipartResources(values url.Values) (v drivers.BucketMultipartResourcesMetadata) {
func getBucketMultipartResources(values url.Values) (v donut.BucketMultipartResourcesMetadata) {
v.Prefix = values.Get("prefix")
v.KeyMarker = values.Get("key-marker")
v.MaxUploads, _ = strconv.Atoi(values.Get("max-uploads"))
@ -45,7 +45,7 @@ func getBucketMultipartResources(values url.Values) (v drivers.BucketMultipartRe
}
// parse object url queries
func getObjectResources(values url.Values) (v drivers.ObjectResourcesMetadata) {
func getObjectResources(values url.Values) (v donut.ObjectResourcesMetadata) {
v.UploadID = values.Get("uploadId")
v.PartNumberMarker, _ = strconv.Atoi(values.Get("part-number-marker"))
v.MaxParts, _ = strconv.Atoi(values.Get("max-parts"))

View File

@ -14,7 +14,7 @@
* limitations under the License.
*/
package cache
package donut
import (
"bytes"
@ -32,28 +32,27 @@ import (
"time"
"github.com/minio/minio/pkg/iodine"
"github.com/minio/minio/pkg/storage/drivers"
)
func (cache *cacheDriver) NewMultipartUpload(bucket, key, contentType string) (string, error) {
func (cache donut) NewMultipartUpload(bucket, key, contentType string) (string, error) {
cache.lock.RLock()
if !drivers.IsValidBucket(bucket) {
if !IsValidBucket(bucket) {
cache.lock.RUnlock()
return "", iodine.New(drivers.BucketNameInvalid{Bucket: bucket}, nil)
return "", iodine.New(BucketNameInvalid{Bucket: bucket}, nil)
}
if !drivers.IsValidObjectName(key) {
if !IsValidObjectName(key) {
cache.lock.RUnlock()
return "", iodine.New(drivers.ObjectNameInvalid{Object: key}, nil)
return "", iodine.New(ObjectNameInvalid{Object: key}, nil)
}
if _, ok := cache.storedBuckets[bucket]; ok == false {
cache.lock.RUnlock()
return "", iodine.New(drivers.BucketNotFound{Bucket: bucket}, nil)
return "", iodine.New(BucketNotFound{Bucket: bucket}, nil)
}
storedBucket := cache.storedBuckets[bucket]
objectKey := bucket + "/" + key
if _, ok := storedBucket.objectMetadata[objectKey]; ok == true {
cache.lock.RUnlock()
return "", iodine.New(drivers.ObjectExists{Bucket: bucket, Object: key}, nil)
return "", iodine.New(ObjectExists{Bucket: bucket, Object: key}, nil)
}
cache.lock.RUnlock()
@ -72,12 +71,12 @@ func (cache *cacheDriver) NewMultipartUpload(bucket, key, contentType string) (s
return uploadID, nil
}
func (cache *cacheDriver) AbortMultipartUpload(bucket, key, uploadID string) error {
func (cache donut) AbortMultipartUpload(bucket, key, uploadID string) error {
cache.lock.RLock()
storedBucket := cache.storedBuckets[bucket]
if storedBucket.multiPartSession[key].uploadID != uploadID {
cache.lock.RUnlock()
return iodine.New(drivers.InvalidUploadID{UploadID: uploadID}, nil)
return iodine.New(InvalidUploadID{UploadID: uploadID}, nil)
}
cache.lock.RUnlock()
@ -90,13 +89,13 @@ func getMultipartKey(key string, uploadID string, partNumber int) string {
return key + "?uploadId=" + uploadID + "&partNumber=" + strconv.Itoa(partNumber)
}
func (cache *cacheDriver) CreateObjectPart(bucket, key, uploadID string, partID int, contentType, expectedMD5Sum string, size int64, data io.Reader) (string, error) {
func (cache donut) CreateObjectPart(bucket, key, uploadID string, partID int, contentType, expectedMD5Sum string, size int64, data io.Reader) (string, error) {
// Verify upload id
cache.lock.RLock()
storedBucket := cache.storedBuckets[bucket]
if storedBucket.multiPartSession[key].uploadID != uploadID {
cache.lock.RUnlock()
return "", iodine.New(drivers.InvalidUploadID{UploadID: uploadID}, nil)
return "", iodine.New(InvalidUploadID{UploadID: uploadID}, nil)
}
cache.lock.RUnlock()
@ -110,19 +109,19 @@ func (cache *cacheDriver) CreateObjectPart(bucket, key, uploadID string, partID
}
// createObject - PUT object to cache buffer
func (cache *cacheDriver) createObjectPart(bucket, key, uploadID string, partID int, contentType, expectedMD5Sum string, size int64, data io.Reader) (string, error) {
func (cache donut) createObjectPart(bucket, key, uploadID string, partID int, contentType, expectedMD5Sum string, size int64, data io.Reader) (string, error) {
cache.lock.RLock()
if !drivers.IsValidBucket(bucket) {
if !IsValidBucket(bucket) {
cache.lock.RUnlock()
return "", iodine.New(drivers.BucketNameInvalid{Bucket: bucket}, nil)
return "", iodine.New(BucketNameInvalid{Bucket: bucket}, nil)
}
if !drivers.IsValidObjectName(key) {
if !IsValidObjectName(key) {
cache.lock.RUnlock()
return "", iodine.New(drivers.ObjectNameInvalid{Object: key}, nil)
return "", iodine.New(ObjectNameInvalid{Object: key}, nil)
}
if _, ok := cache.storedBuckets[bucket]; ok == false {
cache.lock.RUnlock()
return "", iodine.New(drivers.BucketNotFound{Bucket: bucket}, nil)
return "", iodine.New(BucketNotFound{Bucket: bucket}, nil)
}
storedBucket := cache.storedBuckets[bucket]
// get object key
@ -141,7 +140,7 @@ func (cache *cacheDriver) createObjectPart(bucket, key, uploadID string, partID
expectedMD5SumBytes, err := base64.StdEncoding.DecodeString(strings.TrimSpace(expectedMD5Sum))
if err != nil {
// pro-actively close the connection
return "", iodine.New(drivers.InvalidDigest{Md5: expectedMD5Sum}, nil)
return "", iodine.New(InvalidDigest{Md5: expectedMD5Sum}, nil)
}
expectedMD5Sum = hex.EncodeToString(expectedMD5SumBytes)
}
@ -180,10 +179,10 @@ func (cache *cacheDriver) createObjectPart(bucket, key, uploadID string, partID
// Verify if the written object is equal to what is expected, only if it is requested as such
if strings.TrimSpace(expectedMD5Sum) != "" {
if err := isMD5SumEqual(strings.TrimSpace(expectedMD5Sum), md5Sum); err != nil {
return "", iodine.New(drivers.BadDigest{Md5: expectedMD5Sum, Bucket: bucket, Key: key}, nil)
return "", iodine.New(BadDigest{Md5: expectedMD5Sum, Bucket: bucket, Key: key}, nil)
}
}
newPart := drivers.PartMetadata{
newPart := PartMetadata{
PartNumber: partID,
LastModified: time.Now().UTC(),
ETag: md5Sum,
@ -201,36 +200,36 @@ func (cache *cacheDriver) createObjectPart(bucket, key, uploadID string, partID
return md5Sum, nil
}
func (cache *cacheDriver) cleanupMultipartSession(bucket, key, uploadID string) {
func (cache donut) cleanupMultipartSession(bucket, key, uploadID string) {
cache.lock.Lock()
defer cache.lock.Unlock()
delete(cache.storedBuckets[bucket].multiPartSession, key)
}
func (cache *cacheDriver) cleanupMultiparts(bucket, key, uploadID string) {
func (cache donut) cleanupMultiparts(bucket, key, uploadID string) {
for i := 1; i <= cache.storedBuckets[bucket].multiPartSession[key].totalParts; i++ {
objectKey := bucket + "/" + getMultipartKey(key, uploadID, i)
cache.multiPartObjects.Delete(objectKey)
}
}
func (cache *cacheDriver) CompleteMultipartUpload(bucket, key, uploadID string, parts map[int]string) (string, error) {
if !drivers.IsValidBucket(bucket) {
return "", iodine.New(drivers.BucketNameInvalid{Bucket: bucket}, nil)
func (cache donut) CompleteMultipartUpload(bucket, key, uploadID string, parts map[int]string) (string, error) {
if !IsValidBucket(bucket) {
return "", iodine.New(BucketNameInvalid{Bucket: bucket}, nil)
}
if !drivers.IsValidObjectName(key) {
return "", iodine.New(drivers.ObjectNameInvalid{Object: key}, nil)
if !IsValidObjectName(key) {
return "", iodine.New(ObjectNameInvalid{Object: key}, nil)
}
// Verify upload id
cache.lock.RLock()
if _, ok := cache.storedBuckets[bucket]; ok == false {
cache.lock.RUnlock()
return "", iodine.New(drivers.BucketNotFound{Bucket: bucket}, nil)
return "", iodine.New(BucketNotFound{Bucket: bucket}, nil)
}
storedBucket := cache.storedBuckets[bucket]
if storedBucket.multiPartSession[key].uploadID != uploadID {
cache.lock.RUnlock()
return "", iodine.New(drivers.InvalidUploadID{UploadID: uploadID}, nil)
return "", iodine.New(InvalidUploadID{UploadID: uploadID}, nil)
}
cache.lock.RUnlock()
@ -249,10 +248,10 @@ func (cache *cacheDriver) CompleteMultipartUpload(bucket, key, uploadID string,
// complete multi part request header md5sum per part is hex encoded
recvMD5Bytes, err := hex.DecodeString(strings.Trim(recvMD5, "\""))
if err != nil {
return "", iodine.New(drivers.InvalidDigest{Md5: recvMD5}, nil)
return "", iodine.New(InvalidDigest{Md5: recvMD5}, nil)
}
if !bytes.Equal(recvMD5Bytes, calcMD5Bytes[:]) {
return "", iodine.New(drivers.BadDigest{Md5: recvMD5, Bucket: bucket, Key: getMultipartKey(key, uploadID, i)}, nil)
return "", iodine.New(BadDigest{Md5: recvMD5, Bucket: bucket, Key: getMultipartKey(key, uploadID, i)}, nil)
}
_, err = io.Copy(&fullObject, bytes.NewBuffer(object))
if err != nil {
@ -279,21 +278,21 @@ func (cache *cacheDriver) CompleteMultipartUpload(bucket, key, uploadID string,
}
// byKey is a sortable interface for UploadMetadata slice
type byKey []*drivers.UploadMetadata
type byKey []*UploadMetadata
func (a byKey) Len() int { return len(a) }
func (a byKey) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a byKey) Less(i, j int) bool { return a[i].Key < a[j].Key }
func (cache *cacheDriver) ListMultipartUploads(bucket string, resources drivers.BucketMultipartResourcesMetadata) (drivers.BucketMultipartResourcesMetadata, error) {
func (cache donut) ListMultipartUploads(bucket string, resources BucketMultipartResourcesMetadata) (BucketMultipartResourcesMetadata, error) {
// TODO handle delimiter
cache.lock.RLock()
defer cache.lock.RUnlock()
if _, ok := cache.storedBuckets[bucket]; ok == false {
return drivers.BucketMultipartResourcesMetadata{}, iodine.New(drivers.BucketNotFound{Bucket: bucket}, nil)
return BucketMultipartResourcesMetadata{}, iodine.New(BucketNotFound{Bucket: bucket}, nil)
}
storedBucket := cache.storedBuckets[bucket]
var uploads []*drivers.UploadMetadata
var uploads []*UploadMetadata
for key, session := range storedBucket.multiPartSession {
if strings.HasPrefix(key, resources.Prefix) {
@ -309,7 +308,7 @@ func (cache *cacheDriver) ListMultipartUploads(bucket string, resources drivers.
switch {
case resources.KeyMarker != "" && resources.UploadIDMarker == "":
if key > resources.KeyMarker {
upload := new(drivers.UploadMetadata)
upload := new(UploadMetadata)
upload.Key = key
upload.UploadID = session.uploadID
upload.Initiated = session.initiated
@ -318,7 +317,7 @@ func (cache *cacheDriver) ListMultipartUploads(bucket string, resources drivers.
case resources.KeyMarker != "" && resources.UploadIDMarker != "":
if session.uploadID > resources.UploadIDMarker {
if key >= resources.KeyMarker {
upload := new(drivers.UploadMetadata)
upload := new(UploadMetadata)
upload.Key = key
upload.UploadID = session.uploadID
upload.Initiated = session.initiated
@ -326,7 +325,7 @@ func (cache *cacheDriver) ListMultipartUploads(bucket string, resources drivers.
}
}
default:
upload := new(drivers.UploadMetadata)
upload := new(UploadMetadata)
upload.Key = key
upload.UploadID = session.uploadID
upload.Initiated = session.initiated
@ -340,30 +339,30 @@ func (cache *cacheDriver) ListMultipartUploads(bucket string, resources drivers.
}
// partNumber is a sortable interface for Part slice
type partNumber []*drivers.PartMetadata
type partNumber []*PartMetadata
func (a partNumber) Len() int { return len(a) }
func (a partNumber) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a partNumber) Less(i, j int) bool { return a[i].PartNumber < a[j].PartNumber }
func (cache *cacheDriver) ListObjectParts(bucket, key string, resources drivers.ObjectResourcesMetadata) (drivers.ObjectResourcesMetadata, error) {
func (cache donut) ListObjectParts(bucket, key string, resources ObjectResourcesMetadata) (ObjectResourcesMetadata, error) {
// Verify upload id
cache.lock.RLock()
defer cache.lock.RUnlock()
if _, ok := cache.storedBuckets[bucket]; ok == false {
return drivers.ObjectResourcesMetadata{}, iodine.New(drivers.BucketNotFound{Bucket: bucket}, nil)
return ObjectResourcesMetadata{}, iodine.New(BucketNotFound{Bucket: bucket}, nil)
}
storedBucket := cache.storedBuckets[bucket]
if _, ok := storedBucket.multiPartSession[key]; ok == false {
return drivers.ObjectResourcesMetadata{}, iodine.New(drivers.ObjectNotFound{Bucket: bucket, Object: key}, nil)
return ObjectResourcesMetadata{}, iodine.New(ObjectNotFound{Bucket: bucket, Object: key}, nil)
}
if storedBucket.multiPartSession[key].uploadID != resources.UploadID {
return drivers.ObjectResourcesMetadata{}, iodine.New(drivers.InvalidUploadID{UploadID: resources.UploadID}, nil)
return ObjectResourcesMetadata{}, iodine.New(InvalidUploadID{UploadID: resources.UploadID}, nil)
}
objectResourcesMetadata := resources
objectResourcesMetadata.Bucket = bucket
objectResourcesMetadata.Key = key
var parts []*drivers.PartMetadata
var parts []*PartMetadata
var startPartNumber int
switch {
case objectResourcesMetadata.PartNumberMarker == 0:
@ -381,7 +380,7 @@ func (cache *cacheDriver) ListObjectParts(bucket, key string, resources drivers.
}
part, ok := storedBucket.partMetadata[bucket+"/"+getMultipartKey(key, resources.UploadID, i)]
if !ok {
return drivers.ObjectResourcesMetadata{}, iodine.New(errors.New("missing part: "+strconv.Itoa(i)), nil)
return ObjectResourcesMetadata{}, iodine.New(errors.New("missing part: "+strconv.Itoa(i)), nil)
}
parts = append(parts, &part)
}
@ -390,7 +389,7 @@ func (cache *cacheDriver) ListObjectParts(bucket, key string, resources drivers.
return objectResourcesMetadata, nil
}
func (cache *cacheDriver) expiredPart(a ...interface{}) {
func (cache donut) expiredPart(a ...interface{}) {
key := a[0].(string)
// loop through all buckets
for _, storedBucket := range cache.storedBuckets {

View File

@ -14,7 +14,7 @@
* limitations under the License.
*/
package cache
package donut
import (
"bufio"
@ -29,42 +29,11 @@ import (
"sort"
"strconv"
"strings"
"sync"
"time"
"github.com/minio/minio/pkg/iodine"
"github.com/minio/minio/pkg/storage/drivers"
"github.com/minio/minio/pkg/storage/trove"
)
// cacheDriver - local variables
type cacheDriver struct {
storedBuckets map[string]storedBucket
lock *sync.RWMutex
objects *trove.Cache
multiPartObjects *trove.Cache
maxSize uint64
expiration time.Duration
// stacked driver
driver drivers.Driver
}
// storedBucket saved bucket
type storedBucket struct {
bucketMetadata drivers.BucketMetadata
objectMetadata map[string]drivers.ObjectMetadata
partMetadata map[string]drivers.PartMetadata
multiPartSession map[string]multiPartSession
}
// multiPartSession multipart session
type multiPartSession struct {
totalParts int
uploadID string
initiated time.Time
}
// total Number of buckets allowed
const (
totalBuckets = 100
@ -88,36 +57,18 @@ func newProxyWriter(w io.Writer) *proxyWriter {
return &proxyWriter{writer: w, writtenBytes: nil}
}
// NewDriver instantiate a new cache driver
func NewDriver(maxSize uint64, expiration time.Duration, driver drivers.Driver) (drivers.Driver, error) {
cache := new(cacheDriver)
cache.storedBuckets = make(map[string]storedBucket)
cache.maxSize = maxSize
cache.expiration = expiration
cache.objects = trove.NewCache(maxSize, expiration)
cache.multiPartObjects = trove.NewCache(0, time.Duration(0))
cache.lock = new(sync.RWMutex)
cache.objects.OnExpired = cache.expiredObject
cache.multiPartObjects.OnExpired = cache.expiredPart
// set up cache expiration
cache.objects.ExpireObjects(time.Second * 5)
return cache, nil
}
// GetObject - GET object from cache buffer
func (cache *cacheDriver) GetObject(w io.Writer, bucket string, object string) (int64, error) {
func (cache donut) GetObject(w io.Writer, bucket string, object string) (int64, error) {
cache.lock.RLock()
defer cache.lock.RUnlock()
if !drivers.IsValidBucket(bucket) {
return 0, iodine.New(drivers.BucketNameInvalid{Bucket: bucket}, nil)
if !IsValidBucket(bucket) {
return 0, iodine.New(BucketNameInvalid{Bucket: bucket}, nil)
}
if !drivers.IsValidObjectName(object) {
return 0, iodine.New(drivers.ObjectNameInvalid{Object: object}, nil)
if !IsValidObjectName(object) {
return 0, iodine.New(ObjectNameInvalid{Object: object}, nil)
}
if _, ok := cache.storedBuckets[bucket]; ok == false {
return 0, iodine.New(drivers.BucketNotFound{Bucket: bucket}, nil)
return 0, iodine.New(BucketNotFound{Bucket: bucket}, nil)
}
objectKey := bucket + "/" + object
data, ok := cache.objects.Get(objectKey)
@ -125,7 +76,7 @@ func (cache *cacheDriver) GetObject(w io.Writer, bucket string, object string) (
if cache.driver != nil {
return cache.driver.GetObject(w, bucket, object)
}
return 0, iodine.New(drivers.ObjectNotFound{Bucket: bucket, Object: object}, nil)
return 0, iodine.New(ObjectNotFound{Bucket: bucket, Object: object}, nil)
}
written, err := io.Copy(w, bytes.NewBuffer(data))
if err != nil {
@ -135,7 +86,7 @@ func (cache *cacheDriver) GetObject(w io.Writer, bucket string, object string) (
}
// GetPartialObject - GET object from cache buffer range
func (cache *cacheDriver) GetPartialObject(w io.Writer, bucket, object string, start, length int64) (int64, error) {
func (cache donut) GetPartialObject(w io.Writer, bucket, object string, start, length int64) (int64, error) {
errParams := map[string]string{
"bucket": bucket,
"object": object,
@ -144,14 +95,14 @@ func (cache *cacheDriver) GetPartialObject(w io.Writer, bucket, object string, s
}
cache.lock.RLock()
defer cache.lock.RUnlock()
if !drivers.IsValidBucket(bucket) {
return 0, iodine.New(drivers.BucketNameInvalid{Bucket: bucket}, errParams)
if !IsValidBucket(bucket) {
return 0, iodine.New(BucketNameInvalid{Bucket: bucket}, errParams)
}
if !drivers.IsValidObjectName(object) {
return 0, iodine.New(drivers.ObjectNameInvalid{Object: object}, errParams)
if !IsValidObjectName(object) {
return 0, iodine.New(ObjectNameInvalid{Object: object}, errParams)
}
if start < 0 {
return 0, iodine.New(drivers.InvalidRange{
return 0, iodine.New(InvalidRange{
Start: start,
Length: length,
}, errParams)
@ -162,7 +113,7 @@ func (cache *cacheDriver) GetPartialObject(w io.Writer, bucket, object string, s
if cache.driver != nil {
return cache.driver.GetPartialObject(w, bucket, object, start, length)
}
return 0, iodine.New(drivers.ObjectNotFound{Bucket: bucket, Object: object}, nil)
return 0, iodine.New(ObjectNotFound{Bucket: bucket, Object: object}, nil)
}
written, err := io.CopyN(w, bytes.NewBuffer(data[start:]), length)
if err != nil {
@ -172,21 +123,21 @@ func (cache *cacheDriver) GetPartialObject(w io.Writer, bucket, object string, s
}
// GetBucketMetadata -
func (cache *cacheDriver) GetBucketMetadata(bucket string) (drivers.BucketMetadata, error) {
func (cache donut) GetBucketMetadata(bucket string) (BucketMetadata, error) {
cache.lock.RLock()
if !drivers.IsValidBucket(bucket) {
if !IsValidBucket(bucket) {
cache.lock.RUnlock()
return drivers.BucketMetadata{}, iodine.New(drivers.BucketNameInvalid{Bucket: bucket}, nil)
return BucketMetadata{}, iodine.New(BucketNameInvalid{Bucket: bucket}, nil)
}
if _, ok := cache.storedBuckets[bucket]; ok == false {
if cache.driver == nil {
cache.lock.RUnlock()
return drivers.BucketMetadata{}, iodine.New(drivers.BucketNotFound{Bucket: bucket}, nil)
return BucketMetadata{}, iodine.New(BucketNotFound{Bucket: bucket}, nil)
}
bucketMetadata, err := cache.driver.GetBucketMetadata(bucket)
if err != nil {
cache.lock.RUnlock()
return drivers.BucketMetadata{}, iodine.New(err, nil)
return BucketMetadata{}, iodine.New(err, nil)
}
storedBucket := cache.storedBuckets[bucket]
cache.lock.RUnlock()
@ -200,15 +151,15 @@ func (cache *cacheDriver) GetBucketMetadata(bucket string) (drivers.BucketMetada
}
// SetBucketMetadata -
func (cache *cacheDriver) SetBucketMetadata(bucket, acl string) error {
func (cache donut) SetBucketMetadata(bucket, acl string) error {
cache.lock.RLock()
if !drivers.IsValidBucket(bucket) {
if !IsValidBucket(bucket) {
cache.lock.RUnlock()
return iodine.New(drivers.BucketNameInvalid{Bucket: bucket}, nil)
return iodine.New(BucketNameInvalid{Bucket: bucket}, nil)
}
if _, ok := cache.storedBuckets[bucket]; ok == false {
cache.lock.RUnlock()
return iodine.New(drivers.BucketNotFound{Bucket: bucket}, nil)
return iodine.New(BucketNotFound{Bucket: bucket}, nil)
}
if strings.TrimSpace(acl) == "" {
acl = "private"
@ -221,7 +172,7 @@ func (cache *cacheDriver) SetBucketMetadata(bucket, acl string) error {
}
}
storedBucket := cache.storedBuckets[bucket]
storedBucket.bucketMetadata.ACL = drivers.BucketACL(acl)
storedBucket.bucketMetadata.ACL = BucketACL(acl)
cache.storedBuckets[bucket] = storedBucket
cache.lock.Unlock()
return nil
@ -246,10 +197,10 @@ func isMD5SumEqual(expectedMD5Sum, actualMD5Sum string) error {
return iodine.New(errors.New("invalid argument"), nil)
}
func (cache *cacheDriver) CreateObject(bucket, key, contentType, expectedMD5Sum string, size int64, data io.Reader) (string, error) {
func (cache donut) CreateObject(bucket, key, contentType, expectedMD5Sum string, size int64, data io.Reader) (string, error) {
if size > int64(cache.maxSize) {
generic := drivers.GenericObjectError{Bucket: bucket, Object: key}
return "", iodine.New(drivers.EntityTooLarge{
generic := GenericObjectError{Bucket: bucket, Object: key}
return "", iodine.New(EntityTooLarge{
GenericObjectError: generic,
Size: strconv.FormatInt(size, 10),
MaxSize: strconv.FormatUint(cache.maxSize, 10),
@ -262,26 +213,26 @@ func (cache *cacheDriver) CreateObject(bucket, key, contentType, expectedMD5Sum
}
// createObject - PUT object to cache buffer
func (cache *cacheDriver) createObject(bucket, key, contentType, expectedMD5Sum string, size int64, data io.Reader) (string, error) {
func (cache donut) createObject(bucket, key, contentType, expectedMD5Sum string, size int64, data io.Reader) (string, error) {
cache.lock.RLock()
if !drivers.IsValidBucket(bucket) {
if !IsValidBucket(bucket) {
cache.lock.RUnlock()
return "", iodine.New(drivers.BucketNameInvalid{Bucket: bucket}, nil)
return "", iodine.New(BucketNameInvalid{Bucket: bucket}, nil)
}
if !drivers.IsValidObjectName(key) {
if !IsValidObjectName(key) {
cache.lock.RUnlock()
return "", iodine.New(drivers.ObjectNameInvalid{Object: key}, nil)
return "", iodine.New(ObjectNameInvalid{Object: key}, nil)
}
if _, ok := cache.storedBuckets[bucket]; ok == false {
cache.lock.RUnlock()
return "", iodine.New(drivers.BucketNotFound{Bucket: bucket}, nil)
return "", iodine.New(BucketNotFound{Bucket: bucket}, nil)
}
storedBucket := cache.storedBuckets[bucket]
// get object key
objectKey := bucket + "/" + key
if _, ok := storedBucket.objectMetadata[objectKey]; ok == true {
cache.lock.RUnlock()
return "", iodine.New(drivers.ObjectExists{Bucket: bucket, Object: key}, nil)
return "", iodine.New(ObjectExists{Bucket: bucket, Object: key}, nil)
}
cache.lock.RUnlock()
@ -293,7 +244,7 @@ func (cache *cacheDriver) createObject(bucket, key, contentType, expectedMD5Sum
expectedMD5SumBytes, err := base64.StdEncoding.DecodeString(strings.TrimSpace(expectedMD5Sum))
if err != nil {
// pro-actively close the connection
return "", iodine.New(drivers.InvalidDigest{Md5: expectedMD5Sum}, nil)
return "", iodine.New(InvalidDigest{Md5: expectedMD5Sum}, nil)
}
expectedMD5Sum = hex.EncodeToString(expectedMD5SumBytes)
}
@ -328,18 +279,18 @@ func (cache *cacheDriver) createObject(bucket, key, contentType, expectedMD5Sum
go debug.FreeOSMemory()
cache.lock.Unlock()
if !ok {
return "", iodine.New(drivers.InternalError{}, nil)
return "", iodine.New(InternalError{}, nil)
}
md5Sum := hex.EncodeToString(md5SumBytes)
// Verify if the written object is equal to what is expected, only if it is requested as such
if strings.TrimSpace(expectedMD5Sum) != "" {
if err := isMD5SumEqual(strings.TrimSpace(expectedMD5Sum), md5Sum); err != nil {
return "", iodine.New(drivers.BadDigest{Md5: expectedMD5Sum, Bucket: bucket, Key: key}, nil)
return "", iodine.New(BadDigest{Md5: expectedMD5Sum, Bucket: bucket, Key: key}, nil)
}
}
newObject := drivers.ObjectMetadata{
newObject := ObjectMetadata{
Bucket: bucket,
Key: key,
@ -357,23 +308,23 @@ func (cache *cacheDriver) createObject(bucket, key, contentType, expectedMD5Sum
}
// CreateBucket - create bucket in cache
func (cache *cacheDriver) CreateBucket(bucketName, acl string) error {
func (cache donut) CreateBucket(bucketName, acl string) error {
cache.lock.RLock()
if len(cache.storedBuckets) == totalBuckets {
cache.lock.RUnlock()
return iodine.New(drivers.TooManyBuckets{Bucket: bucketName}, nil)
return iodine.New(TooManyBuckets{Bucket: bucketName}, nil)
}
if !drivers.IsValidBucket(bucketName) {
if !IsValidBucket(bucketName) {
cache.lock.RUnlock()
return iodine.New(drivers.BucketNameInvalid{Bucket: bucketName}, nil)
return iodine.New(BucketNameInvalid{Bucket: bucketName}, nil)
}
if !drivers.IsValidBucketACL(acl) {
if !IsValidBucketACL(acl) {
cache.lock.RUnlock()
return iodine.New(drivers.InvalidACL{ACL: acl}, nil)
return iodine.New(InvalidACL{ACL: acl}, nil)
}
if _, ok := cache.storedBuckets[bucketName]; ok == true {
cache.lock.RUnlock()
return iodine.New(drivers.BucketExists{Bucket: bucketName}, nil)
return iodine.New(BucketExists{Bucket: bucketName}, nil)
}
cache.lock.RUnlock()
@ -387,13 +338,13 @@ func (cache *cacheDriver) CreateBucket(bucketName, acl string) error {
}
}
var newBucket = storedBucket{}
newBucket.objectMetadata = make(map[string]drivers.ObjectMetadata)
newBucket.objectMetadata = make(map[string]ObjectMetadata)
newBucket.multiPartSession = make(map[string]multiPartSession)
newBucket.partMetadata = make(map[string]drivers.PartMetadata)
newBucket.bucketMetadata = drivers.BucketMetadata{}
newBucket.partMetadata = make(map[string]PartMetadata)
newBucket.bucketMetadata = BucketMetadata{}
newBucket.bucketMetadata.Name = bucketName
newBucket.bucketMetadata.Created = time.Now().UTC()
newBucket.bucketMetadata.ACL = drivers.BucketACL(acl)
newBucket.bucketMetadata.ACL = BucketACL(acl)
cache.lock.Lock()
cache.storedBuckets[bucketName] = newBucket
cache.lock.Unlock()
@ -418,7 +369,7 @@ func appendUniq(slice []string, i string) []string {
return append(slice, i)
}
func (cache *cacheDriver) filterDelimiterPrefix(keys []string, key, delim string, r drivers.BucketResourcesMetadata) ([]string, drivers.BucketResourcesMetadata) {
func (cache donut) filterDelimiterPrefix(keys []string, key, delim string, r BucketResourcesMetadata) ([]string, BucketResourcesMetadata) {
switch true {
case key == r.Prefix:
keys = appendUniq(keys, key)
@ -431,7 +382,7 @@ func (cache *cacheDriver) filterDelimiterPrefix(keys []string, key, delim string
return keys, r
}
func (cache *cacheDriver) listObjects(keys []string, key string, r drivers.BucketResourcesMetadata) ([]string, drivers.BucketResourcesMetadata) {
func (cache donut) listObjects(keys []string, key string, r BucketResourcesMetadata) ([]string, BucketResourcesMetadata) {
switch true {
// Prefix absent, delimit object key based on delimiter
case r.IsDelimiterSet():
@ -460,19 +411,19 @@ func (cache *cacheDriver) listObjects(keys []string, key string, r drivers.Bucke
}
// ListObjects - list objects from cache
func (cache *cacheDriver) ListObjects(bucket string, resources drivers.BucketResourcesMetadata) ([]drivers.ObjectMetadata, drivers.BucketResourcesMetadata, error) {
func (cache donut) ListObjects(bucket string, resources BucketResourcesMetadata) ([]ObjectMetadata, BucketResourcesMetadata, error) {
cache.lock.RLock()
defer cache.lock.RUnlock()
if !drivers.IsValidBucket(bucket) {
return nil, drivers.BucketResourcesMetadata{IsTruncated: false}, iodine.New(drivers.BucketNameInvalid{Bucket: bucket}, nil)
if !IsValidBucket(bucket) {
return nil, BucketResourcesMetadata{IsTruncated: false}, iodine.New(BucketNameInvalid{Bucket: bucket}, nil)
}
if !drivers.IsValidObjectName(resources.Prefix) {
return nil, drivers.BucketResourcesMetadata{IsTruncated: false}, iodine.New(drivers.ObjectNameInvalid{Object: resources.Prefix}, nil)
if !IsValidObjectName(resources.Prefix) {
return nil, BucketResourcesMetadata{IsTruncated: false}, iodine.New(ObjectNameInvalid{Object: resources.Prefix}, nil)
}
if _, ok := cache.storedBuckets[bucket]; ok == false {
return nil, drivers.BucketResourcesMetadata{IsTruncated: false}, iodine.New(drivers.BucketNotFound{Bucket: bucket}, nil)
return nil, BucketResourcesMetadata{IsTruncated: false}, iodine.New(BucketNotFound{Bucket: bucket}, nil)
}
var results []drivers.ObjectMetadata
var results []ObjectMetadata
var keys []string
storedBucket := cache.storedBuckets[bucket]
for key := range storedBucket.objectMetadata {
@ -508,17 +459,17 @@ func (cache *cacheDriver) ListObjects(bucket string, resources drivers.BucketRes
}
// byBucketName is a type for sorting bucket metadata by bucket name
type byBucketName []drivers.BucketMetadata
type byBucketName []BucketMetadata
func (b byBucketName) Len() int { return len(b) }
func (b byBucketName) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
func (b byBucketName) Less(i, j int) bool { return b[i].Name < b[j].Name }
// ListBuckets - List buckets from cache
func (cache *cacheDriver) ListBuckets() ([]drivers.BucketMetadata, error) {
func (cache donut) ListBuckets() ([]BucketMetadata, error) {
cache.lock.RLock()
defer cache.lock.RUnlock()
var results []drivers.BucketMetadata
var results []BucketMetadata
for _, bucket := range cache.storedBuckets {
results = append(results, bucket.bucketMetadata)
}
@ -527,20 +478,20 @@ func (cache *cacheDriver) ListBuckets() ([]drivers.BucketMetadata, error) {
}
// GetObjectMetadata - get object metadata from cache
func (cache *cacheDriver) GetObjectMetadata(bucket, key string) (drivers.ObjectMetadata, error) {
func (cache donut) GetObjectMetadata(bucket, key string) (ObjectMetadata, error) {
cache.lock.RLock()
// check if bucket exists
if !drivers.IsValidBucket(bucket) {
if !IsValidBucket(bucket) {
cache.lock.RUnlock()
return drivers.ObjectMetadata{}, iodine.New(drivers.BucketNameInvalid{Bucket: bucket}, nil)
return ObjectMetadata{}, iodine.New(BucketNameInvalid{Bucket: bucket}, nil)
}
if !drivers.IsValidObjectName(key) {
if !IsValidObjectName(key) {
cache.lock.RUnlock()
return drivers.ObjectMetadata{}, iodine.New(drivers.ObjectNameInvalid{Object: key}, nil)
return ObjectMetadata{}, iodine.New(ObjectNameInvalid{Object: key}, nil)
}
if _, ok := cache.storedBuckets[bucket]; ok == false {
cache.lock.RUnlock()
return drivers.ObjectMetadata{}, iodine.New(drivers.BucketNotFound{Bucket: bucket}, nil)
return ObjectMetadata{}, iodine.New(BucketNotFound{Bucket: bucket}, nil)
}
storedBucket := cache.storedBuckets[bucket]
objectKey := bucket + "/" + key
@ -552,7 +503,7 @@ func (cache *cacheDriver) GetObjectMetadata(bucket, key string) (drivers.ObjectM
objMetadata, err := cache.driver.GetObjectMetadata(bucket, key)
cache.lock.RUnlock()
if err != nil {
return drivers.ObjectMetadata{}, iodine.New(err, nil)
return ObjectMetadata{}, iodine.New(err, nil)
}
// update
cache.lock.Lock()
@ -561,10 +512,10 @@ func (cache *cacheDriver) GetObjectMetadata(bucket, key string) (drivers.ObjectM
return objMetadata, nil
}
cache.lock.RUnlock()
return drivers.ObjectMetadata{}, iodine.New(drivers.ObjectNotFound{Bucket: bucket, Object: key}, nil)
return ObjectMetadata{}, iodine.New(ObjectNotFound{Bucket: bucket, Object: key}, nil)
}
func (cache *cacheDriver) expiredObject(a ...interface{}) {
func (cache donut) expiredObject(a ...interface{}) {
cacheStats := cache.objects.Stats()
log.Printf("CurrentSize: %d, CurrentItems: %d, TotalExpirations: %d",
cacheStats.Bytes, cacheStats.Items, cacheStats.Expired)

View File

@ -14,7 +14,7 @@
* limitations under the License.
*/
package drivers
package donut
import (
"errors"

View File

@ -25,8 +25,10 @@ import (
"strconv"
"strings"
"sync"
"time"
"github.com/minio/minio/pkg/iodine"
"github.com/minio/minio/pkg/storage/donut/trove"
)
// donut struct internal data
@ -35,6 +37,32 @@ type donut struct {
buckets map[string]bucket
nodes map[string]node
lock *sync.RWMutex
cache cache
}
// cache - local variables
type cache struct {
storedBuckets map[string]storedBucket
lock *sync.RWMutex
objects *trove.Cache
multiPartObjects *trove.Cache
maxSize uint64
expiration time.Duration
}
// storedBucket saved bucket
type storedBucket struct {
bucketMetadata BucketMetadata
objectMetadata map[string]ObjectMetadata
partMetadata map[string]PartMetadata
multiPartSession map[string]multiPartSession
}
// multiPartSession multipart session
type multiPartSession struct {
totalParts int
uploadID string
initiated time.Time
}
// config files used inside Donut
@ -82,6 +110,15 @@ func NewDonut(donutName string, nodeDiskMap map[string][]string) (Donut, error)
return nil, iodine.New(err, nil)
}
}
d.cache.storedBuckets = make(map[string]storedBucket)
d.cache.objects = trove.NewCache(maxSize, expiration)
d.cache.multiPartObjects = trove.NewCache(0, time.Duration(0))
d.cache.objects.OnExpired = d.expiredObject
d.cache.multiPartObjects.OnExpired = d.expiredPart
// set up cache expiration
d.cache.objects.ExpireObjects(time.Second * 5)
return d, nil
}

View File

@ -16,6 +16,8 @@
package donut
import "fmt"
// InvalidArgument invalid argument
type InvalidArgument struct{}
@ -138,3 +140,172 @@ type InvalidErasureTechnique struct {
func (e InvalidErasureTechnique) Error() string {
return "Invalid erasure technique: " + e.Technique
}
// InternalError - generic internal error
type InternalError struct {
}
// BackendError - generic disk backend error
type BackendError struct {
Path string
}
// BackendCorrupted - path has corrupted data
type BackendCorrupted BackendError
// APINotImplemented - generic API not implemented error
type APINotImplemented struct {
API string
}
// GenericBucketError - generic bucket error
type GenericBucketError struct {
Bucket string
}
// GenericObjectError - generic object error
type GenericObjectError struct {
Bucket string
Object string
}
// ImplementationError - generic implementation error
type ImplementationError struct {
Bucket string
Object string
Err error
}
// DigestError - Generic Md5 error
type DigestError struct {
Bucket string
Key string
Md5 string
}
/// ACL related errors
// InvalidACL - acl invalid
type InvalidACL struct {
ACL string
}
func (e InvalidACL) Error() string {
return "Requested ACL is " + e.ACL + " invalid"
}
/// Bucket related errors
// BucketNameInvalid - bucketname provided is invalid
type BucketNameInvalid GenericBucketError
// TooManyBuckets - total buckets exceeded
type TooManyBuckets GenericBucketError
/// Object related errors
// EntityTooLarge - object size exceeds maximum limit
type EntityTooLarge struct {
GenericObjectError
Size string
MaxSize string
}
// ObjectNameInvalid - object name provided is invalid
type ObjectNameInvalid GenericObjectError
// InvalidDigest - md5 in request header invalid
type InvalidDigest DigestError
// Return string an error formatted as the given text
func (e ImplementationError) Error() string {
error := ""
if e.Bucket != "" {
error = error + "Bucket: " + e.Bucket + " "
}
if e.Object != "" {
error = error + "Object: " + e.Object + " "
}
error = error + "Error: " + e.Err.Error()
return error
}
// EmbedError - wrapper function for error object
func EmbedError(bucket, object string, err error) ImplementationError {
return ImplementationError{
Bucket: bucket,
Object: object,
Err: err,
}
}
// Return string an error formatted as the given text
func (e InternalError) Error() string {
return "Internal error occured"
}
// Return string an error formatted as the given text
func (e APINotImplemented) Error() string {
return "Api not implemented: " + e.API
}
// Return string an error formatted as the given text
func (e BucketNameInvalid) Error() string {
return "Bucket name invalid: " + e.Bucket
}
// Return string an error formatted as the given text
func (e TooManyBuckets) Error() string {
return "Bucket limit exceeded beyond 100, cannot create bucket: " + e.Bucket
}
// Return string an error formatted as the given text
func (e ObjectNameInvalid) Error() string {
return "Object name invalid: " + e.Bucket + "#" + e.Object
}
// Return string an error formatted as the given text
func (e EntityTooLarge) Error() string {
return e.Bucket + "#" + e.Object + "with " + e.Size + "reached maximum allowed size limit " + e.MaxSize
}
// Return string an error formatted as the given text
func (e BackendCorrupted) Error() string {
return "Backend corrupted: " + e.Path
}
// Return string an error formatted as the given text
func (e InvalidDigest) Error() string {
return "Md5 provided " + e.Md5 + " is invalid"
}
// OperationNotPermitted - operation not permitted
type OperationNotPermitted struct {
Op string
Reason string
}
func (e OperationNotPermitted) Error() string {
return "Operation " + e.Op + " not permitted for reason: " + e.Reason
}
// InvalidRange - invalid range
type InvalidRange struct {
Start int64
Length int64
}
func (e InvalidRange) Error() string {
return fmt.Sprintf("Invalid range start:%d length:%d", e.Start, e.Length)
}
/// Multipart related errors
// InvalidUploadID invalid upload id
type InvalidUploadID struct {
UploadID string
}
func (e InvalidUploadID) Error() string {
return "Invalid upload id " + e.UploadID
}

View File

@ -1,53 +1,12 @@
/*
* Minimalist Object Storage, (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package drivers
package donut
import (
"io"
"regexp"
"strings"
"time"
"unicode/utf8"
)
// Driver - generic API interface for various drivers - donut, file, memory
type Driver interface {
// Bucket Operations
ListBuckets() ([]BucketMetadata, error)
CreateBucket(bucket, acl string) error
GetBucketMetadata(bucket string) (BucketMetadata, error)
SetBucketMetadata(bucket, acl string) error
// Object Operations
GetObject(w io.Writer, bucket, object string) (int64, error)
GetPartialObject(w io.Writer, bucket, object string, start, length int64) (int64, error)
GetObjectMetadata(bucket, key string) (ObjectMetadata, error)
ListObjects(bucket string, resources BucketResourcesMetadata) ([]ObjectMetadata, BucketResourcesMetadata, error)
CreateObject(bucket, key, contentType, md5sum string, size int64, data io.Reader) (string, error)
// Object Multipart Operations
ListMultipartUploads(bucket string, resources BucketMultipartResourcesMetadata) (BucketMultipartResourcesMetadata, error)
NewMultipartUpload(bucket, key, contentType string) (string, error)
AbortMultipartUpload(bucket, key, UploadID string) error
CreateObjectPart(bucket, key, uploadID string, partID int, contentType string, md5sum string, size int64, data io.Reader) (string, error)
CompleteMultipartUpload(bucket, key, uploadID string, parts map[int]string) (string, error)
ListObjectParts(bucket, key string, resources ObjectResourcesMetadata) (ObjectResourcesMetadata, error)
}
// BucketACL - bucket level access control
type BucketACL string

View File

@ -1,202 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "{}"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright {yyyy} {name of copyright owner}
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -1,2 +0,0 @@
# objectdriver
Object Storage Driver

View File

@ -1,535 +0,0 @@
/*
* Minimalist Object Storage, (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package drivers
import (
"bytes"
"crypto/md5"
"encoding/base64"
"encoding/hex"
"math/rand"
"reflect"
"strconv"
"time"
"github.com/minio/check"
"github.com/minio/minio/pkg/iodine"
)
// APITestSuite - collection of API tests
func APITestSuite(c *check.C, create func() Driver) {
testCreateBucket(c, create)
testMultipleObjectCreation(c, create)
testPaging(c, create)
testObjectOverwriteFails(c, create)
testNonExistantBucketOperations(c, create)
testBucketMetadata(c, create)
testBucketRecreateFails(c, create)
testPutObjectInSubdir(c, create)
testListBuckets(c, create)
testListBucketsOrder(c, create)
testListObjectsTestsForNonExistantBucket(c, create)
testNonExistantObjectInBucket(c, create)
testGetDirectoryReturnsObjectNotFound(c, create)
testDefaultContentType(c, create)
testMultipartObjectCreation(c, create)
testMultipartObjectAbort(c, create)
}
func testCreateBucket(c *check.C, create func() Driver) {
drivers := create()
err := drivers.CreateBucket("bucket", "")
c.Assert(err, check.IsNil)
}
func testMultipartObjectCreation(c *check.C, create func() Driver) {
drivers := create()
switch {
case reflect.TypeOf(drivers).String() == "*donut.donutDriver":
return
}
err := drivers.CreateBucket("bucket", "")
c.Assert(err, check.IsNil)
uploadID, err := drivers.NewMultipartUpload("bucket", "key", "")
c.Assert(err, check.IsNil)
parts := make(map[int]string)
finalHasher := md5.New()
for i := 1; i <= 10; i++ {
randomPerm := rand.Perm(10)
randomString := ""
for _, num := range randomPerm {
randomString = randomString + strconv.Itoa(num)
}
hasher := md5.New()
finalHasher.Write([]byte(randomString))
hasher.Write([]byte(randomString))
expectedmd5Sum := base64.StdEncoding.EncodeToString(hasher.Sum(nil))
expectedmd5Sumhex := hex.EncodeToString(hasher.Sum(nil))
calculatedmd5sum, err := drivers.CreateObjectPart("bucket", "key", uploadID, i, "", expectedmd5Sum, int64(len(randomString)),
bytes.NewBufferString(randomString))
c.Assert(err, check.IsNil)
c.Assert(calculatedmd5sum, check.Equals, expectedmd5Sumhex)
parts[i] = calculatedmd5sum
}
finalExpectedmd5SumHex := hex.EncodeToString(finalHasher.Sum(nil))
calculatedFinalmd5Sum, err := drivers.CompleteMultipartUpload("bucket", "key", uploadID, parts)
c.Assert(err, check.IsNil)
c.Assert(calculatedFinalmd5Sum, check.Equals, finalExpectedmd5SumHex)
}
func testMultipartObjectAbort(c *check.C, create func() Driver) {
drivers := create()
switch {
case reflect.TypeOf(drivers).String() == "*donut.donutDriver":
return
}
err := drivers.CreateBucket("bucket", "")
c.Assert(err, check.IsNil)
uploadID, err := drivers.NewMultipartUpload("bucket", "key", "")
c.Assert(err, check.IsNil)
parts := make(map[int]string)
for i := 1; i <= 10; i++ {
randomPerm := rand.Perm(10)
randomString := ""
for _, num := range randomPerm {
randomString = randomString + strconv.Itoa(num)
}
hasher := md5.New()
hasher.Write([]byte(randomString))
expectedmd5Sum := base64.StdEncoding.EncodeToString(hasher.Sum(nil))
expectedmd5Sumhex := hex.EncodeToString(hasher.Sum(nil))
calculatedmd5sum, err := drivers.CreateObjectPart("bucket", "key", uploadID, i, "", expectedmd5Sum, int64(len(randomString)),
bytes.NewBufferString(randomString))
c.Assert(err, check.IsNil)
c.Assert(calculatedmd5sum, check.Equals, expectedmd5Sumhex)
parts[i] = calculatedmd5sum
}
err = drivers.AbortMultipartUpload("bucket", "key", uploadID)
c.Assert(err, check.IsNil)
}
func testMultipleObjectCreation(c *check.C, create func() Driver) {
objects := make(map[string][]byte)
drivers := create()
err := drivers.CreateBucket("bucket", "")
c.Assert(err, check.IsNil)
for i := 0; i < 10; i++ {
randomPerm := rand.Perm(10)
randomString := ""
for _, num := range randomPerm {
randomString = randomString + strconv.Itoa(num)
}
hasher := md5.New()
hasher.Write([]byte(randomString))
expectedmd5Sum := base64.StdEncoding.EncodeToString(hasher.Sum(nil))
expectedmd5Sumhex := hex.EncodeToString(hasher.Sum(nil))
key := "obj" + strconv.Itoa(i)
objects[key] = []byte(randomString)
calculatedmd5sum, err := drivers.CreateObject("bucket", key, "", expectedmd5Sum, int64(len(randomString)),
bytes.NewBufferString(randomString))
c.Assert(err, check.IsNil)
c.Assert(calculatedmd5sum, check.Equals, expectedmd5Sumhex)
}
// ensure no duplicate etags
etags := make(map[string]string)
for key, value := range objects {
var byteBuffer bytes.Buffer
_, err := drivers.GetObject(&byteBuffer, "bucket", key)
c.Assert(err, check.IsNil)
c.Assert(byteBuffer.Bytes(), check.DeepEquals, value)
metadata, err := drivers.GetObjectMetadata("bucket", key)
c.Assert(err, check.IsNil)
c.Assert(metadata.Size, check.Equals, int64(len(value)))
_, ok := etags[metadata.Md5]
c.Assert(ok, check.Equals, false)
etags[metadata.Md5] = metadata.Md5
}
}
func testPaging(c *check.C, create func() Driver) {
drivers := create()
drivers.CreateBucket("bucket", "")
resources := BucketResourcesMetadata{}
objects, resources, err := drivers.ListObjects("bucket", resources)
c.Assert(err, check.IsNil)
c.Assert(len(objects), check.Equals, 0)
c.Assert(resources.IsTruncated, check.Equals, false)
// check before paging occurs
for i := 0; i < 5; i++ {
key := "obj" + strconv.Itoa(i)
drivers.CreateObject("bucket", key, "", "", int64(len(key)), bytes.NewBufferString(key))
resources.Maxkeys = 5
resources.Prefix = ""
objects, resources, err = drivers.ListObjects("bucket", resources)
c.Assert(err, check.IsNil)
c.Assert(len(objects), check.Equals, i+1)
c.Assert(resources.IsTruncated, check.Equals, false)
}
// check after paging occurs pages work
for i := 6; i <= 10; i++ {
key := "obj" + strconv.Itoa(i)
drivers.CreateObject("bucket", key, "", "", int64(len(key)), bytes.NewBufferString(key))
resources.Maxkeys = 5
resources.Prefix = ""
objects, resources, err = drivers.ListObjects("bucket", resources)
c.Assert(err, check.IsNil)
c.Assert(len(objects), check.Equals, 5)
c.Assert(resources.IsTruncated, check.Equals, true)
}
// check paging with prefix at end returns less objects
{
drivers.CreateObject("bucket", "newPrefix", "", "", int64(len("prefix1")), bytes.NewBufferString("prefix1"))
drivers.CreateObject("bucket", "newPrefix2", "", "", int64(len("prefix2")), bytes.NewBufferString("prefix2"))
resources.Prefix = "new"
resources.Maxkeys = 5
objects, resources, err = drivers.ListObjects("bucket", resources)
c.Assert(err, check.IsNil)
c.Assert(len(objects), check.Equals, 2)
}
// check ordering of pages
{
resources.Prefix = ""
resources.Maxkeys = 1000
objects, resources, err = drivers.ListObjects("bucket", resources)
c.Assert(err, check.IsNil)
c.Assert(objects[0].Key, check.Equals, "newPrefix")
c.Assert(objects[1].Key, check.Equals, "newPrefix2")
c.Assert(objects[2].Key, check.Equals, "obj0")
c.Assert(objects[3].Key, check.Equals, "obj1")
c.Assert(objects[4].Key, check.Equals, "obj10")
}
// check delimited results with delimiter and prefix
{
drivers.CreateObject("bucket", "this/is/delimited", "", "", int64(len("prefix1")), bytes.NewBufferString("prefix1"))
drivers.CreateObject("bucket", "this/is/also/a/delimited/file", "", "", int64(len("prefix2")), bytes.NewBufferString("prefix2"))
var prefixes []string
resources.CommonPrefixes = prefixes // allocate new everytime
resources.Delimiter = "/"
resources.Prefix = "this/is/"
resources.Maxkeys = 10
objects, resources, err = drivers.ListObjects("bucket", resources)
c.Assert(err, check.IsNil)
c.Assert(len(objects), check.Equals, 1)
c.Assert(resources.CommonPrefixes[0], check.Equals, "this/is/also/")
}
time.Sleep(time.Second)
// check delimited results with delimiter without prefix
{
var prefixes []string
resources.CommonPrefixes = prefixes // allocate new everytime
resources.Delimiter = "/"
resources.Prefix = ""
resources.Maxkeys = 1000
objects, resources, err = drivers.ListObjects("bucket", resources)
c.Assert(err, check.IsNil)
c.Assert(objects[0].Key, check.Equals, "newPrefix")
c.Assert(objects[1].Key, check.Equals, "newPrefix2")
c.Assert(objects[2].Key, check.Equals, "obj0")
c.Assert(objects[3].Key, check.Equals, "obj1")
c.Assert(objects[4].Key, check.Equals, "obj10")
c.Assert(resources.CommonPrefixes[0], check.Equals, "this/")
}
// check results with Marker
{
var prefixes []string
resources.CommonPrefixes = prefixes // allocate new everytime
resources.Prefix = ""
resources.Marker = "newPrefix"
resources.Delimiter = ""
resources.Maxkeys = 3
objects, resources, err = drivers.ListObjects("bucket", resources)
c.Assert(err, check.IsNil)
c.Assert(objects[0].Key, check.Equals, "newPrefix2")
c.Assert(objects[1].Key, check.Equals, "obj0")
c.Assert(objects[2].Key, check.Equals, "obj1")
}
// check ordering of results with prefix
{
resources.Prefix = "obj"
resources.Delimiter = ""
resources.Marker = ""
resources.Maxkeys = 1000
objects, resources, err = drivers.ListObjects("bucket", resources)
c.Assert(err, check.IsNil)
c.Assert(objects[0].Key, check.Equals, "obj0")
c.Assert(objects[1].Key, check.Equals, "obj1")
c.Assert(objects[2].Key, check.Equals, "obj10")
c.Assert(objects[3].Key, check.Equals, "obj2")
c.Assert(objects[4].Key, check.Equals, "obj3")
}
// check ordering of results with prefix and no paging
{
resources.Prefix = "new"
resources.Marker = ""
resources.Maxkeys = 5
objects, resources, err = drivers.ListObjects("bucket", resources)
c.Assert(err, check.IsNil)
c.Assert(objects[0].Key, check.Equals, "newPrefix")
c.Assert(objects[1].Key, check.Equals, "newPrefix2")
}
}
func testObjectOverwriteFails(c *check.C, create func() Driver) {
drivers := create()
drivers.CreateBucket("bucket", "")
hasher1 := md5.New()
hasher1.Write([]byte("one"))
md5Sum1 := base64.StdEncoding.EncodeToString(hasher1.Sum(nil))
md5Sum1hex := hex.EncodeToString(hasher1.Sum(nil))
md5Sum11, err := drivers.CreateObject("bucket", "object", "", md5Sum1, int64(len("one")), bytes.NewBufferString("one"))
c.Assert(err, check.IsNil)
c.Assert(md5Sum1hex, check.Equals, md5Sum11)
hasher2 := md5.New()
hasher2.Write([]byte("three"))
md5Sum2 := base64.StdEncoding.EncodeToString(hasher2.Sum(nil))
_, err = drivers.CreateObject("bucket", "object", "", md5Sum2, int64(len("three")), bytes.NewBufferString("three"))
c.Assert(err, check.Not(check.IsNil))
var bytesBuffer bytes.Buffer
length, err := drivers.GetObject(&bytesBuffer, "bucket", "object")
c.Assert(err, check.IsNil)
c.Assert(length, check.Equals, int64(len("one")))
c.Assert(string(bytesBuffer.Bytes()), check.Equals, "one")
}
func testNonExistantBucketOperations(c *check.C, create func() Driver) {
drivers := create()
_, err := drivers.CreateObject("bucket", "object", "", "", int64(len("one")), bytes.NewBufferString("one"))
c.Assert(err, check.Not(check.IsNil))
}
func testBucketMetadata(c *check.C, create func() Driver) {
drivers := create()
err := drivers.CreateBucket("string", "")
c.Assert(err, check.IsNil)
metadata, err := drivers.GetBucketMetadata("string")
c.Assert(err, check.IsNil)
c.Assert(metadata.ACL, check.Equals, BucketACL("private"))
}
func testBucketRecreateFails(c *check.C, create func() Driver) {
drivers := create()
err := drivers.CreateBucket("string", "")
c.Assert(err, check.IsNil)
err = drivers.CreateBucket("string", "")
c.Assert(err, check.Not(check.IsNil))
}
func testPutObjectInSubdir(c *check.C, create func() Driver) {
drivers := create()
err := drivers.CreateBucket("bucket", "")
c.Assert(err, check.IsNil)
hasher := md5.New()
hasher.Write([]byte("hello world"))
md5Sum1 := base64.StdEncoding.EncodeToString(hasher.Sum(nil))
md5Sum1hex := hex.EncodeToString(hasher.Sum(nil))
md5Sum11, err := drivers.CreateObject("bucket", "dir1/dir2/object", "", md5Sum1, int64(len("hello world")),
bytes.NewBufferString("hello world"))
c.Assert(err, check.IsNil)
c.Assert(md5Sum11, check.Equals, md5Sum1hex)
var bytesBuffer bytes.Buffer
length, err := drivers.GetObject(&bytesBuffer, "bucket", "dir1/dir2/object")
c.Assert(err, check.IsNil)
c.Assert(len(bytesBuffer.Bytes()), check.Equals, len("hello world"))
c.Assert(int64(len(bytesBuffer.Bytes())), check.Equals, length)
}
func testListBuckets(c *check.C, create func() Driver) {
drivers := create()
// test empty list
buckets, err := drivers.ListBuckets()
c.Assert(err, check.IsNil)
c.Assert(len(buckets), check.Equals, 0)
// add one and test exists
err = drivers.CreateBucket("bucket1", "")
c.Assert(err, check.IsNil)
buckets, err = drivers.ListBuckets()
c.Assert(len(buckets), check.Equals, 1)
c.Assert(err, check.IsNil)
// add two and test exists
err = drivers.CreateBucket("bucket2", "")
c.Assert(err, check.IsNil)
buckets, err = drivers.ListBuckets()
c.Assert(len(buckets), check.Equals, 2)
c.Assert(err, check.IsNil)
// add three and test exists + prefix
err = drivers.CreateBucket("bucket22", "")
buckets, err = drivers.ListBuckets()
c.Assert(len(buckets), check.Equals, 3)
c.Assert(err, check.IsNil)
}
func testListBucketsOrder(c *check.C, create func() Driver) {
// if implementation contains a map, order of map keys will vary.
// this ensures they return in the same order each time
for i := 0; i < 10; i++ {
drivers := create()
// add one and test exists
drivers.CreateBucket("bucket1", "")
drivers.CreateBucket("bucket2", "")
buckets, err := drivers.ListBuckets()
c.Assert(err, check.IsNil)
c.Assert(len(buckets), check.Equals, 2)
c.Assert(buckets[0].Name, check.Equals, "bucket1")
c.Assert(buckets[1].Name, check.Equals, "bucket2")
}
}
func testListObjectsTestsForNonExistantBucket(c *check.C, create func() Driver) {
drivers := create()
resources := BucketResourcesMetadata{Prefix: "", Maxkeys: 1000}
objects, resources, err := drivers.ListObjects("bucket", resources)
c.Assert(err, check.Not(check.IsNil))
c.Assert(resources.IsTruncated, check.Equals, false)
c.Assert(len(objects), check.Equals, 0)
}
func testNonExistantObjectInBucket(c *check.C, create func() Driver) {
drivers := create()
err := drivers.CreateBucket("bucket", "")
c.Assert(err, check.IsNil)
var byteBuffer bytes.Buffer
length, err := drivers.GetObject(&byteBuffer, "bucket", "dir1")
c.Assert(length, check.Equals, int64(0))
c.Assert(err, check.Not(check.IsNil))
c.Assert(len(byteBuffer.Bytes()), check.Equals, 0)
switch err := iodine.ToError(err).(type) {
case ObjectNotFound:
{
c.Assert(err, check.ErrorMatches, "Object not Found: bucket#dir1")
}
default:
{
c.Assert(err, check.Equals, "fails")
}
}
}
func testGetDirectoryReturnsObjectNotFound(c *check.C, create func() Driver) {
drivers := create()
err := drivers.CreateBucket("bucket", "")
c.Assert(err, check.IsNil)
_, err = drivers.CreateObject("bucket", "dir1/dir2/object", "", "", int64(len("hello world")),
bytes.NewBufferString("hello world"))
c.Assert(err, check.IsNil)
var byteBuffer bytes.Buffer
length, err := drivers.GetObject(&byteBuffer, "bucket", "dir1")
c.Assert(length, check.Equals, int64(0))
switch err := iodine.ToError(err).(type) {
case ObjectNotFound:
{
c.Assert(err.Bucket, check.Equals, "bucket")
c.Assert(err.Object, check.Equals, "dir1")
}
default:
{
// force a failure with a line number
c.Assert(err, check.Equals, "ObjectNotFound")
}
}
c.Assert(len(byteBuffer.Bytes()), check.Equals, 0)
var byteBuffer2 bytes.Buffer
length, err = drivers.GetObject(&byteBuffer, "bucket", "dir1/")
c.Assert(length, check.Equals, int64(0))
switch err := iodine.ToError(err).(type) {
case ObjectNotFound:
{
c.Assert(err.Bucket, check.Equals, "bucket")
c.Assert(err.Object, check.Equals, "dir1/")
}
default:
{
// force a failure with a line number
c.Assert(err, check.Equals, "ObjectNotFound")
}
}
c.Assert(len(byteBuffer2.Bytes()), check.Equals, 0)
}
func testDefaultContentType(c *check.C, create func() Driver) {
drivers := create()
err := drivers.CreateBucket("bucket", "")
c.Assert(err, check.IsNil)
// test empty
_, err = drivers.CreateObject("bucket", "one", "", "", int64(len("one")), bytes.NewBufferString("one"))
metadata, err := drivers.GetObjectMetadata("bucket", "one")
c.Assert(err, check.IsNil)
c.Assert(metadata.ContentType, check.Equals, "application/octet-stream")
// test custom
drivers.CreateObject("bucket", "two", "application/text", "", int64(len("two")), bytes.NewBufferString("two"))
metadata, err = drivers.GetObjectMetadata("bucket", "two")
c.Assert(err, check.IsNil)
c.Assert(metadata.ContentType, check.Equals, "application/text")
// test trim space
drivers.CreateObject("bucket", "three", "\tapplication/json ", "", int64(len("three")), bytes.NewBufferString("three"))
metadata, err = drivers.GetObjectMetadata("bucket", "three")
c.Assert(err, check.IsNil)
c.Assert(metadata.ContentType, check.Equals, "application/json")
}
func testContentMd5Set(c *check.C, create func() Driver) {
drivers := create()
err := drivers.CreateBucket("bucket", "")
c.Assert(err, check.IsNil)
// test md5 invalid
badmd5Sum := "NWJiZjVhNTIzMjhlNzQzOWFlNmU3MTlkZmU3MTIyMDA"
calculatedmd5sum, err := drivers.CreateObject("bucket", "one", "", badmd5Sum, int64(len("one")), bytes.NewBufferString("one"))
c.Assert(err, check.Not(check.IsNil))
c.Assert(calculatedmd5sum, check.Not(check.Equals), badmd5Sum)
goodmd5sum := "NWJiZjVhNTIzMjhlNzQzOWFlNmU3MTlkZmU3MTIyMDA="
calculatedmd5sum, err = drivers.CreateObject("bucket", "two", "", goodmd5sum, int64(len("one")), bytes.NewBufferString("one"))
c.Assert(err, check.IsNil)
c.Assert(calculatedmd5sum, check.Equals, goodmd5sum)
}

View File

@ -1,41 +0,0 @@
/*
* Minimalist Object Storage, (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cache
import (
"testing"
"time"
. "github.com/minio/check"
"github.com/minio/minio/pkg/storage/drivers"
)
func Test(t *testing.T) { TestingT(t) }
type MySuite struct{}
var _ = Suite(&MySuite{})
func (s *MySuite) TestAPISuite(c *C) {
create := func() drivers.Driver {
var driver drivers.Driver
store, err := NewDriver(1000000, 3*time.Hour, driver)
c.Check(err, IsNil)
return store
}
drivers.APITestSuite(c, create)
}

View File

@ -1,47 +0,0 @@
/*
* Minimalist Object Storage, (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package donut
import (
"io"
"github.com/minio/minio/pkg/iodine"
"github.com/minio/minio/pkg/storage/drivers"
)
func (d donutDriver) NewMultipartUpload(bucketName, objectName, contentType string) (string, error) {
return "", iodine.New(drivers.APINotImplemented{API: "NewMultipartUpload"}, nil)
}
func (d donutDriver) AbortMultipartUpload(bucketName, objectName, uploadID string) error {
return iodine.New(drivers.APINotImplemented{API: "AbortMultipartUpload"}, nil)
}
func (d donutDriver) CreateObjectPart(bucketName, objectName, uploadID string, partID int, contentType, expectedMD5Sum string, size int64, data io.Reader) (string, error) {
return "", iodine.New(drivers.APINotImplemented{API: "CreateObjectPart"}, nil)
}
func (d donutDriver) CompleteMultipartUpload(bucketName, objectName, uploadID string, parts map[int]string) (string, error) {
return "", iodine.New(drivers.APINotImplemented{API: "CompleteMultipartUpload"}, nil)
}
func (d donutDriver) ListMultipartUploads(bucketName string, resources drivers.BucketMultipartResourcesMetadata) (drivers.BucketMultipartResourcesMetadata, error) {
return drivers.BucketMultipartResourcesMetadata{}, iodine.New(drivers.APINotImplemented{API: "ListMultipartUploads"}, nil)
}
func (d donutDriver) ListObjectParts(bucketName, objectName string, resources drivers.ObjectResourcesMetadata) (drivers.ObjectResourcesMetadata, error) {
return drivers.ObjectResourcesMetadata{}, iodine.New(drivers.APINotImplemented{API: "ListObjectParts"}, nil)
}

View File

@ -1,410 +0,0 @@
/*
* Minimalist Object Storage, (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package donut
import (
"encoding/base64"
"encoding/hex"
"io"
"os"
"path/filepath"
"sort"
"strconv"
"strings"
"sync"
"io/ioutil"
"github.com/minio/minio/pkg/iodine"
"github.com/minio/minio/pkg/storage/donut"
"github.com/minio/minio/pkg/storage/drivers"
)
// donutDriver - creates a new single disk drivers driver using donut
type donutDriver struct {
donut donut.Donut
paths []string
lock *sync.RWMutex
}
// This is a dummy nodeDiskMap which is going to be deprecated soon
// once the Management API is standardized, and we have way of adding
// and removing disks. This is useful for now to take inputs from CLI
func createNodeDiskMap(paths []string) map[string][]string {
if len(paths) == 1 {
nodes := make(map[string][]string)
nodes["localhost"] = make([]string, 16)
for i := 0; i < len(nodes["localhost"]); i++ {
diskPath := filepath.Join(paths[0], strconv.Itoa(i))
if _, err := os.Stat(diskPath); err != nil {
if os.IsNotExist(err) {
os.MkdirAll(diskPath, 0700)
}
}
nodes["localhost"][i] = diskPath
}
return nodes
}
diskPaths := make([]string, len(paths))
nodes := make(map[string][]string)
for i, p := range paths {
diskPath := filepath.Join(p, strconv.Itoa(i))
if _, err := os.Stat(diskPath); err != nil {
if os.IsNotExist(err) {
os.MkdirAll(diskPath, 0700)
}
}
diskPaths[i] = diskPath
}
nodes["localhost"] = diskPaths
return nodes
}
// NewDriver instantiate a donut driver
func NewDriver(paths []string) (drivers.Driver, error) {
driver := new(donutDriver)
driver.paths = paths
driver.lock = new(sync.RWMutex)
// Soon to be user configurable, when Management API is available
// we should remove "default" to something which is passed down
// from configuration paramters
var err error
driver.donut, err = donut.NewDonut("default", createNodeDiskMap(driver.paths))
return driver, iodine.New(err, nil)
}
// byBucketName is a type for sorting bucket metadata by bucket name
type byBucketName []drivers.BucketMetadata
func (b byBucketName) Len() int { return len(b) }
func (b byBucketName) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
func (b byBucketName) Less(i, j int) bool { return b[i].Name < b[j].Name }
// ListBuckets returns a list of buckets
func (d donutDriver) ListBuckets() (results []drivers.BucketMetadata, err error) {
if d.donut == nil {
return nil, iodine.New(drivers.InternalError{}, nil)
}
buckets, err := d.donut.ListBuckets()
if err != nil {
return nil, iodine.New(err, nil)
}
for _, metadata := range buckets {
result := drivers.BucketMetadata{
Name: metadata.Name,
Created: metadata.Created,
ACL: drivers.BucketACL(metadata.ACL),
}
results = append(results, result)
}
sort.Sort(byBucketName(results))
return results, nil
}
// CreateBucket creates a new bucket
func (d donutDriver) CreateBucket(bucketName, acl string) error {
d.lock.Lock()
defer d.lock.Unlock()
if d.donut == nil {
return iodine.New(drivers.InternalError{}, nil)
}
if !drivers.IsValidBucketACL(acl) {
return iodine.New(drivers.InvalidACL{ACL: acl}, nil)
}
if drivers.IsValidBucket(bucketName) {
if strings.TrimSpace(acl) == "" {
acl = "private"
}
if err := d.donut.MakeBucket(bucketName, acl); err != nil {
switch iodine.ToError(err).(type) {
case donut.BucketExists:
return iodine.New(drivers.BucketExists{Bucket: bucketName}, nil)
}
return iodine.New(err, nil)
}
return nil
}
return iodine.New(drivers.BucketNameInvalid{Bucket: bucketName}, nil)
}
// GetBucketMetadata retrieves an bucket's metadata
func (d donutDriver) GetBucketMetadata(bucketName string) (drivers.BucketMetadata, error) {
d.lock.RLock()
defer d.lock.RUnlock()
if d.donut == nil {
return drivers.BucketMetadata{}, iodine.New(drivers.InternalError{}, nil)
}
if !drivers.IsValidBucket(bucketName) {
return drivers.BucketMetadata{}, drivers.BucketNameInvalid{Bucket: bucketName}
}
metadata, err := d.donut.GetBucketMetadata(bucketName)
if err != nil {
return drivers.BucketMetadata{}, iodine.New(drivers.BucketNotFound{Bucket: bucketName}, nil)
}
bucketMetadata := drivers.BucketMetadata{
Name: metadata.Name,
Created: metadata.Created,
ACL: drivers.BucketACL(metadata.ACL),
}
return bucketMetadata, nil
}
// SetBucketMetadata sets bucket's metadata
func (d donutDriver) SetBucketMetadata(bucketName, acl string) error {
d.lock.Lock()
defer d.lock.Unlock()
if d.donut == nil {
return iodine.New(drivers.InternalError{}, nil)
}
if !drivers.IsValidBucket(bucketName) {
return iodine.New(drivers.BucketNameInvalid{Bucket: bucketName}, nil)
}
if strings.TrimSpace(acl) == "" {
acl = "private"
}
bucketMetadata := make(map[string]string)
bucketMetadata["acl"] = acl
err := d.donut.SetBucketMetadata(bucketName, bucketMetadata)
if err != nil {
return iodine.New(drivers.BucketNotFound{Bucket: bucketName}, nil)
}
return nil
}
// GetObject retrieves an object and writes it to a writer
func (d donutDriver) GetObject(w io.Writer, bucketName, objectName string) (int64, error) {
if d.donut == nil {
return 0, iodine.New(drivers.InternalError{}, nil)
}
if !drivers.IsValidBucket(bucketName) {
return 0, iodine.New(drivers.BucketNameInvalid{Bucket: bucketName}, nil)
}
if !drivers.IsValidObjectName(objectName) {
return 0, iodine.New(drivers.ObjectNameInvalid{Object: objectName}, nil)
}
d.lock.RLock()
defer d.lock.RUnlock()
reader, size, err := d.donut.GetObject(bucketName, objectName)
if err != nil {
switch iodine.ToError(err).(type) {
case donut.BucketNotFound:
return 0, iodine.New(drivers.BucketNotFound{Bucket: bucketName}, nil)
case donut.ObjectNotFound:
return 0, iodine.New(drivers.ObjectNotFound{
Bucket: bucketName,
Object: objectName,
}, nil)
default:
return 0, iodine.New(drivers.InternalError{}, nil)
}
}
written, err := io.CopyN(w, reader, size)
if err != nil {
return 0, iodine.New(err, nil)
}
return written, nil
}
// GetPartialObject retrieves an object range and writes it to a writer
func (d donutDriver) GetPartialObject(w io.Writer, bucketName, objectName string, start, length int64) (int64, error) {
d.lock.RLock()
defer d.lock.RUnlock()
if d.donut == nil {
return 0, iodine.New(drivers.InternalError{}, nil)
}
errParams := map[string]string{
"bucketName": bucketName,
"objectName": objectName,
"start": strconv.FormatInt(start, 10),
"length": strconv.FormatInt(length, 10),
}
if !drivers.IsValidBucket(bucketName) {
return 0, iodine.New(drivers.BucketNameInvalid{Bucket: bucketName}, errParams)
}
if !drivers.IsValidObjectName(objectName) {
return 0, iodine.New(drivers.ObjectNameInvalid{Object: objectName}, errParams)
}
if start < 0 {
return 0, iodine.New(drivers.InvalidRange{
Start: start,
Length: length,
}, errParams)
}
reader, size, err := d.donut.GetObject(bucketName, objectName)
if err != nil {
switch iodine.ToError(err).(type) {
case donut.BucketNotFound:
return 0, iodine.New(drivers.BucketNotFound{Bucket: bucketName}, nil)
case donut.ObjectNotFound:
return 0, iodine.New(drivers.ObjectNotFound{
Bucket: bucketName,
Object: objectName,
}, nil)
default:
return 0, iodine.New(drivers.InternalError{}, nil)
}
}
defer reader.Close()
if start > size || (start+length-1) > size {
return 0, iodine.New(drivers.InvalidRange{
Start: start,
Length: length,
}, errParams)
}
_, err = io.CopyN(ioutil.Discard, reader, start)
if err != nil {
return 0, iodine.New(err, errParams)
}
n, err := io.CopyN(w, reader, length)
if err != nil {
return 0, iodine.New(err, errParams)
}
return n, nil
}
// GetObjectMetadata retrieves an object's metadata
func (d donutDriver) GetObjectMetadata(bucketName, objectName string) (drivers.ObjectMetadata, error) {
d.lock.RLock()
defer d.lock.RUnlock()
errParams := map[string]string{
"bucketName": bucketName,
"objectName": objectName,
}
if d.donut == nil {
return drivers.ObjectMetadata{}, iodine.New(drivers.InternalError{}, errParams)
}
if !drivers.IsValidBucket(bucketName) {
return drivers.ObjectMetadata{}, iodine.New(drivers.BucketNameInvalid{Bucket: bucketName}, errParams)
}
if !drivers.IsValidObjectName(objectName) {
return drivers.ObjectMetadata{}, iodine.New(drivers.ObjectNameInvalid{Object: objectName}, errParams)
}
metadata, err := d.donut.GetObjectMetadata(bucketName, objectName)
if err != nil {
return drivers.ObjectMetadata{}, iodine.New(drivers.ObjectNotFound{
Bucket: bucketName,
Object: objectName,
}, errParams)
}
objectMetadata := drivers.ObjectMetadata{
Bucket: bucketName,
Key: objectName,
ContentType: metadata.Metadata["contentType"],
Created: metadata.Created,
Md5: metadata.MD5Sum,
Size: metadata.Size,
}
return objectMetadata, nil
}
type byObjectName []drivers.ObjectMetadata
func (b byObjectName) Len() int { return len(b) }
func (b byObjectName) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
func (b byObjectName) Less(i, j int) bool { return b[i].Key < b[j].Key }
// ListObjects - returns list of objects
func (d donutDriver) ListObjects(bucketName string, resources drivers.BucketResourcesMetadata) ([]drivers.ObjectMetadata, drivers.BucketResourcesMetadata, error) {
d.lock.RLock()
defer d.lock.RUnlock()
errParams := map[string]string{
"bucketName": bucketName,
}
if d.donut == nil {
return nil, drivers.BucketResourcesMetadata{}, iodine.New(drivers.InternalError{}, errParams)
}
if !drivers.IsValidBucket(bucketName) {
return nil, drivers.BucketResourcesMetadata{}, iodine.New(drivers.BucketNameInvalid{Bucket: bucketName}, nil)
}
if !drivers.IsValidObjectName(resources.Prefix) {
return nil, drivers.BucketResourcesMetadata{}, iodine.New(drivers.ObjectNameInvalid{Object: resources.Prefix}, nil)
}
listObjects, err := d.donut.ListObjects(bucketName, resources.Prefix, resources.Marker, resources.Delimiter, resources.Maxkeys)
if err != nil {
return nil, drivers.BucketResourcesMetadata{}, iodine.New(err, errParams)
}
resources.CommonPrefixes = listObjects.CommonPrefixes
resources.IsTruncated = listObjects.IsTruncated
var results []drivers.ObjectMetadata
for _, objMetadata := range listObjects.Objects {
metadata := drivers.ObjectMetadata{
Key: objMetadata.Object,
Created: objMetadata.Created,
Size: objMetadata.Size,
}
results = append(results, metadata)
}
sort.Sort(byObjectName(results))
if resources.IsTruncated && resources.IsDelimiterSet() {
resources.NextMarker = results[len(results)-1].Key
}
return results, resources, nil
}
// CreateObject creates a new object
func (d donutDriver) CreateObject(bucketName, objectName, contentType, expectedMD5Sum string, size int64, reader io.Reader) (string, error) {
d.lock.Lock()
defer d.lock.Unlock()
errParams := map[string]string{
"bucketName": bucketName,
"objectName": objectName,
"contentType": contentType,
}
if d.donut == nil {
return "", iodine.New(drivers.InternalError{}, errParams)
}
if !drivers.IsValidBucket(bucketName) {
return "", iodine.New(drivers.BucketNameInvalid{Bucket: bucketName}, nil)
}
if !drivers.IsValidObjectName(objectName) {
return "", iodine.New(drivers.ObjectNameInvalid{Object: objectName}, nil)
}
if strings.TrimSpace(contentType) == "" {
contentType = "application/octet-stream"
}
metadata := make(map[string]string)
metadata["contentType"] = strings.TrimSpace(contentType)
metadata["contentLength"] = strconv.FormatInt(size, 10)
if strings.TrimSpace(expectedMD5Sum) != "" {
expectedMD5SumBytes, err := base64.StdEncoding.DecodeString(strings.TrimSpace(expectedMD5Sum))
if err != nil {
return "", iodine.New(drivers.InvalidDigest{Md5: expectedMD5Sum}, nil)
}
expectedMD5Sum = hex.EncodeToString(expectedMD5SumBytes)
}
objMetadata, err := d.donut.PutObject(bucketName, objectName, expectedMD5Sum, reader, metadata)
if err != nil {
switch iodine.ToError(err).(type) {
case donut.BadDigest:
return "", iodine.New(drivers.BadDigest{Md5: expectedMD5Sum, Bucket: bucketName, Key: objectName}, nil)
}
return "", iodine.New(err, errParams)
}
newObject := drivers.ObjectMetadata{
Bucket: bucketName,
Key: objectName,
ContentType: objMetadata.Metadata["contentType"],
Created: objMetadata.Created,
Md5: objMetadata.MD5Sum,
Size: objMetadata.Size,
}
return newObject.Md5, nil
}

View File

@ -1,55 +0,0 @@
/*
* Minimalist Object Storage, (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package donut
import (
"io/ioutil"
"os"
"testing"
. "github.com/minio/check"
"github.com/minio/minio/pkg/storage/drivers"
)
func Test(t *testing.T) { TestingT(t) }
type MySuite struct{}
var _ = Suite(&MySuite{})
func (s *MySuite) TestAPISuite(c *C) {
var storageList []string
create := func() drivers.Driver {
var paths []string
p, err := ioutil.TempDir(os.TempDir(), "minio-donut-")
c.Check(err, IsNil)
storageList = append(storageList, p)
paths = append(paths, p)
store, err := NewDriver(paths)
c.Check(err, IsNil)
return store
}
drivers.APITestSuite(c, create)
removeRoots(c, storageList)
}
func removeRoots(c *C, roots []string) {
for _, root := range roots {
err := os.RemoveAll(root)
c.Check(err, IsNil)
}
}

View File

@ -1 +0,0 @@
This is a dummy driver which is a pass through driver, useful if some one wants to contribute code.

View File

@ -1,108 +0,0 @@
/*
* Minimalist Object Storage, (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package dummy
import (
"io"
"github.com/minio/minio/pkg/storage/drivers"
)
// dummyDriver
type dummyDriver struct {
driver drivers.Driver
}
// NewDriver provides a new dummy driver
func NewDriver(driver drivers.Driver) drivers.Driver {
return dummyDriver{driver: driver}
}
// ListBuckets
func (dummy dummyDriver) ListBuckets() ([]drivers.BucketMetadata, error) {
return dummy.driver.ListBuckets()
}
// CreateBucket
func (dummy dummyDriver) CreateBucket(bucket, acl string) error {
return dummy.driver.CreateBucket(bucket, acl)
}
// GetBucketMetadata
func (dummy dummyDriver) GetBucketMetadata(bucket string) (drivers.BucketMetadata, error) {
return dummy.driver.GetBucketMetadata(bucket)
}
// SetBucketMetadata
func (dummy dummyDriver) SetBucketMetadata(bucket, acl string) error {
return dummy.driver.SetBucketMetadata(bucket, acl)
}
// GetObject
func (dummy dummyDriver) GetObject(w io.Writer, bucket, object string) (int64, error) {
return dummy.driver.GetObject(w, bucket, object)
}
// GetPartialObject
func (dummy dummyDriver) GetPartialObject(w io.Writer, bucket, object string, start int64, length int64) (int64, error) {
return dummy.driver.GetPartialObject(w, bucket, object, start, length)
}
// GetObjectMetadata
func (dummy dummyDriver) GetObjectMetadata(bucket, object string) (drivers.ObjectMetadata, error) {
return dummy.driver.GetObjectMetadata(bucket, object)
}
// ListObjects
func (dummy dummyDriver) ListObjects(bucket string, resources drivers.BucketResourcesMetadata) ([]drivers.ObjectMetadata, drivers.BucketResourcesMetadata, error) {
return dummy.driver.ListObjects(bucket, resources)
}
// CreateObject
func (dummy dummyDriver) CreateObject(bucket, key, contentType, md5sum string, size int64, data io.Reader) (string, error) {
return dummy.driver.CreateObject(bucket, key, contentType, md5sum, size, data)
}
// NewMultipartUpload
func (dummy dummyDriver) NewMultipartUpload(bucket, key, contentType string) (string, error) {
return dummy.driver.NewMultipartUpload(bucket, key, contentType)
}
// CreateObjectPart
func (dummy dummyDriver) CreateObjectPart(bucket, key, uploadID string, partID int, contentType string, md5sum string, size int64, data io.Reader) (string, error) {
return dummy.driver.CreateObjectPart(bucket, key, uploadID, partID, contentType, md5sum, size, data)
}
// CompleteMultipartUpload
func (dummy dummyDriver) CompleteMultipartUpload(bucket, key, uploadID string, parts map[int]string) (string, error) {
return dummy.driver.CompleteMultipartUpload(bucket, key, uploadID, parts)
}
// ListObjectParts
func (dummy dummyDriver) ListObjectParts(bucket, key string, resources drivers.ObjectResourcesMetadata) (drivers.ObjectResourcesMetadata, error) {
return dummy.driver.ListObjectParts(bucket, key, resources)
}
// ListMultipartUploads
func (dummy dummyDriver) ListMultipartUploads(bucket string, resources drivers.BucketMultipartResourcesMetadata) (drivers.BucketMultipartResourcesMetadata, error) {
return dummy.driver.ListMultipartUploads(bucket, resources)
}
// AbortMultipartUpload
func (dummy dummyDriver) AbortMultipartUpload(bucket, key, uploadID string) error {
return dummy.driver.AbortMultipartUpload(bucket, key, uploadID)
}

View File

@ -1,228 +0,0 @@
/*
* Minimalist Object Storage, (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package drivers
import "fmt"
// InternalError - generic internal error
type InternalError struct {
}
// BackendError - generic disk backend error
type BackendError struct {
Path string
}
// BackendCorrupted - path has corrupted data
type BackendCorrupted BackendError
// APINotImplemented - generic API not implemented error
type APINotImplemented struct {
API string
}
// GenericBucketError - generic bucket error
type GenericBucketError struct {
Bucket string
}
// GenericObjectError - generic object error
type GenericObjectError struct {
Bucket string
Object string
}
// ImplementationError - generic implementation error
type ImplementationError struct {
Bucket string
Object string
Err error
}
// DigestError - Generic Md5 error
type DigestError struct {
Bucket string
Key string
Md5 string
}
/// ACL related errors
// InvalidACL - acl invalid
type InvalidACL struct {
ACL string
}
func (e InvalidACL) Error() string {
return "Requested ACL is " + e.ACL + " invalid"
}
/// Bucket related errors
// BucketNameInvalid - bucketname provided is invalid
type BucketNameInvalid GenericBucketError
// BucketExists - bucket already exists
type BucketExists GenericBucketError
// BucketNotFound - requested bucket not found
type BucketNotFound GenericBucketError
// TooManyBuckets - total buckets exceeded
type TooManyBuckets GenericBucketError
/// Object related errors
// ObjectNotFound - requested object not found
type ObjectNotFound GenericObjectError
// ObjectExists - object already exists
type ObjectExists GenericObjectError
// EntityTooLarge - object size exceeds maximum limit
type EntityTooLarge struct {
GenericObjectError
Size string
MaxSize string
}
// ObjectNameInvalid - object name provided is invalid
type ObjectNameInvalid GenericObjectError
// BadDigest - md5 mismatch from data received
type BadDigest DigestError
// InvalidDigest - md5 in request header invalid
type InvalidDigest DigestError
// Return string an error formatted as the given text
func (e ImplementationError) Error() string {
error := ""
if e.Bucket != "" {
error = error + "Bucket: " + e.Bucket + " "
}
if e.Object != "" {
error = error + "Object: " + e.Object + " "
}
error = error + "Error: " + e.Err.Error()
return error
}
// EmbedError - wrapper function for error object
func EmbedError(bucket, object string, err error) ImplementationError {
return ImplementationError{
Bucket: bucket,
Object: object,
Err: err,
}
}
// Return string an error formatted as the given text
func (e InternalError) Error() string {
return "Internal error occured"
}
// Return string an error formatted as the given text
func (e ObjectNotFound) Error() string {
return "Object not Found: " + e.Bucket + "#" + e.Object
}
// Return string an error formatted as the given text
func (e APINotImplemented) Error() string {
return "Api not implemented: " + e.API
}
// Return string an error formatted as the given text
func (e ObjectExists) Error() string {
return "Object exists: " + e.Bucket + "#" + e.Object
}
// Return string an error formatted as the given text
func (e BucketNameInvalid) Error() string {
return "Bucket name invalid: " + e.Bucket
}
// Return string an error formatted as the given text
func (e BucketExists) Error() string {
return "Bucket exists: " + e.Bucket
}
// Return string an error formatted as the given text
func (e TooManyBuckets) Error() string {
return "Bucket limit exceeded beyond 100, cannot create bucket: " + e.Bucket
}
// Return string an error formatted as the given text
func (e BucketNotFound) Error() string {
return "Bucket not Found: " + e.Bucket
}
// Return string an error formatted as the given text
func (e ObjectNameInvalid) Error() string {
return "Object name invalid: " + e.Bucket + "#" + e.Object
}
// Return string an error formatted as the given text
func (e EntityTooLarge) Error() string {
return e.Bucket + "#" + e.Object + "with " + e.Size + "reached maximum allowed size limit " + e.MaxSize
}
// Return string an error formatted as the given text
func (e BackendCorrupted) Error() string {
return "Backend corrupted: " + e.Path
}
// Return string an error formatted as the given text
func (e BadDigest) Error() string {
return "Md5 provided " + e.Md5 + " mismatches for: " + e.Bucket + "#" + e.Key
}
// Return string an error formatted as the given text
func (e InvalidDigest) Error() string {
return "Md5 provided " + e.Md5 + " is invalid"
}
// OperationNotPermitted - operation not permitted
type OperationNotPermitted struct {
Op string
Reason string
}
func (e OperationNotPermitted) Error() string {
return "Operation " + e.Op + " not permitted for reason: " + e.Reason
}
// InvalidRange - invalid range
type InvalidRange struct {
Start int64
Length int64
}
func (e InvalidRange) Error() string {
return fmt.Sprintf("Invalid range start:%d length:%d", e.Start, e.Length)
}
/// Multipart related errors
// InvalidUploadID invalid upload id
type InvalidUploadID struct {
UploadID string
}
func (e InvalidUploadID) Error() string {
return "Invalid upload id " + e.UploadID
}

View File

@ -1,187 +0,0 @@
package mocks
import (
"bytes"
"io"
"github.com/minio/minio/pkg/iodine"
"github.com/minio/minio/pkg/storage/drivers"
"github.com/stretchr/testify/mock"
)
// Driver is a mock
type Driver struct {
mock.Mock
ObjectWriterData map[string][]byte
}
// ListBuckets is a mock
func (m *Driver) ListBuckets() ([]drivers.BucketMetadata, error) {
ret := m.Called()
r0 := ret.Get(0).([]drivers.BucketMetadata)
r1 := ret.Error(1)
return r0, r1
}
// CreateBucket is a mock
func (m *Driver) CreateBucket(bucket, acl string) error {
ret := m.Called(bucket, acl)
r0 := ret.Error(0)
return r0
}
// GetBucketMetadata is a mock
func (m *Driver) GetBucketMetadata(bucket string) (drivers.BucketMetadata, error) {
ret := m.Called(bucket)
r0 := ret.Get(0).(drivers.BucketMetadata)
r1 := ret.Error(1)
return r0, r1
}
// SetBucketMetadata is a mock
func (m *Driver) SetBucketMetadata(bucket, acl string) error {
ret := m.Called(bucket, acl)
r0 := ret.Error(0)
return r0
}
// SetGetObjectWriter is a mock
func (m *Driver) SetGetObjectWriter(bucket, object string, data []byte) {
m.ObjectWriterData[bucket+":"+object] = data
}
// GetObject is a mock
func (m *Driver) GetObject(w io.Writer, bucket, object string) (int64, error) {
ret := m.Called(w, bucket, object)
r0 := ret.Get(0).(int64)
r1 := ret.Error(1)
if r1 == nil {
if obj, ok := m.ObjectWriterData[bucket+":"+object]; ok {
n, err := io.Copy(w, bytes.NewBuffer(obj))
if err != nil {
panic(err)
}
r0 = n
}
}
return r0, r1
}
// GetPartialObject is a mock
func (m *Driver) GetPartialObject(w io.Writer, bucket, object string, start int64, length int64) (int64, error) {
ret := m.Called(w, bucket, object, start, length)
r0 := ret.Get(0).(int64)
r1 := ret.Error(1)
if r1 == nil {
if obj, ok := m.ObjectWriterData[bucket+":"+object]; ok {
source := bytes.NewBuffer(obj)
var nilSink bytes.Buffer
io.CopyN(&nilSink, source, start)
n, _ := io.CopyN(w, source, length)
r0 = n
}
}
r1 = iodine.New(r1, nil)
return r0, r1
}
// GetObjectMetadata is a mock
func (m *Driver) GetObjectMetadata(bucket, object string) (drivers.ObjectMetadata, error) {
ret := m.Called(bucket, object)
r0 := ret.Get(0).(drivers.ObjectMetadata)
r1 := ret.Error(1)
return r0, r1
}
// ListObjects is a mock
func (m *Driver) ListObjects(bucket string, resources drivers.BucketResourcesMetadata) ([]drivers.ObjectMetadata, drivers.BucketResourcesMetadata, error) {
ret := m.Called(bucket, resources)
r0 := ret.Get(0).([]drivers.ObjectMetadata)
r1 := ret.Get(1).(drivers.BucketResourcesMetadata)
r2 := ret.Error(2)
return r0, r1, r2
}
// CreateObject is a mock
func (m *Driver) CreateObject(bucket, key, contentType, md5sum string, size int64, data io.Reader) (string, error) {
ret := m.Called(bucket, key, contentType, md5sum, size, data)
r0 := ret.Get(0).(string)
r1 := ret.Error(1)
return r0, r1
}
// NewMultipartUpload is a mock
func (m *Driver) NewMultipartUpload(bucket, key, contentType string) (string, error) {
ret := m.Called(bucket, key, contentType)
r0 := ret.Get(0).(string)
r1 := ret.Error(1)
return r0, r1
}
// CreateObjectPart is a mock
func (m *Driver) CreateObjectPart(bucket, key, uploadID string, partID int, contentType string, md5sum string, size int64, data io.Reader) (string, error) {
ret := m.Called(bucket, key, uploadID, partID, contentType, md5sum, size, data)
r0 := ret.Get(0).(string)
r1 := ret.Error(1)
return r0, r1
}
// CompleteMultipartUpload is a mock
func (m *Driver) CompleteMultipartUpload(bucket, key, uploadID string, parts map[int]string) (string, error) {
ret := m.Called(bucket, key, uploadID, parts)
r0 := ret.Get(0).(string)
r1 := ret.Error(1)
return r0, r1
}
// ListObjectParts is a mock
func (m *Driver) ListObjectParts(bucket, key string, resources drivers.ObjectResourcesMetadata) (drivers.ObjectResourcesMetadata, error) {
ret := m.Called(bucket, key, resources)
r0 := ret.Get(0).(drivers.ObjectResourcesMetadata)
r1 := ret.Error(1)
return r0, r1
}
// ListMultipartUploads is a mock
func (m *Driver) ListMultipartUploads(bucket string, resources drivers.BucketMultipartResourcesMetadata) (drivers.BucketMultipartResourcesMetadata, error) {
ret := m.Called(bucket, resources)
r0 := ret.Get(0).(drivers.BucketMultipartResourcesMetadata)
r1 := ret.Error(1)
return r0, r1
}
// AbortMultipartUpload is a mock
func (m *Driver) AbortMultipartUpload(bucket, key, uploadID string) error {
ret := m.Called(bucket, key, uploadID)
r0 := ret.Error(0)
return r0
}