mirror of
https://github.com/minio/minio.git
synced 2025-01-12 15:33:22 -05:00
Merge pull request #693 from harshavardhana/pr_out_add_dummy_driver_for_community_to_submit_new_drivers
This commit is contained in:
commit
fe3c618cc7
119
commands.go
119
commands.go
@ -26,8 +26,6 @@ var commands = []cli.Command{
|
||||
}
|
||||
|
||||
var modeCommands = []cli.Command{
|
||||
memoryCmd,
|
||||
fsCmd,
|
||||
donutCmd,
|
||||
}
|
||||
|
||||
@ -37,42 +35,6 @@ var modeCmd = cli.Command{
|
||||
Description: "Mode of execution",
|
||||
}
|
||||
|
||||
var memoryCmd = cli.Command{
|
||||
Name: "memory",
|
||||
Description: "Limit maximum memory usage to SIZE in [B, KB, MB, GB]",
|
||||
Action: runMemory,
|
||||
CustomHelpTemplate: `NAME:
|
||||
minio mode {{.Name}} - {{.Description}}
|
||||
|
||||
USAGE:
|
||||
minio mode {{.Name}} limit SIZE expire TIME
|
||||
|
||||
EXAMPLES:
|
||||
1. Limit maximum memory usage to 64MB with 1 hour expiration
|
||||
$ minio mode {{.Name}} limit 64MB expire 1h
|
||||
|
||||
2. Limit maximum memory usage to 4GB with no expiration
|
||||
$ minio mode {{.Name}} limit 4GB
|
||||
`,
|
||||
}
|
||||
|
||||
var fsCmd = cli.Command{
|
||||
Name: "fs",
|
||||
Description: "Path to filesystem volume.",
|
||||
Action: runFilesystem,
|
||||
CustomHelpTemplate: `NAME:
|
||||
minio mode {{.Name}} - {{.Description}}
|
||||
|
||||
USAGE:
|
||||
minio mode {{.Name}} limit SIZE expire TIME
|
||||
|
||||
EXAMPLES:
|
||||
1. Export an existing filesystem path
|
||||
$ minio mode {{.Name}} /var/www
|
||||
|
||||
`,
|
||||
}
|
||||
|
||||
var donutCmd = cli.Command{
|
||||
Name: "donut",
|
||||
Description: "[status: EXPERIMENTAL]. Path to donut volume.",
|
||||
@ -93,72 +55,6 @@ EXAMPLES:
|
||||
`,
|
||||
}
|
||||
|
||||
func runMemory(c *cli.Context) {
|
||||
if len(c.Args()) == 0 || len(c.Args())%2 != 0 {
|
||||
cli.ShowCommandHelpAndExit(c, "memory", 1) // last argument is exit code
|
||||
}
|
||||
apiServerConfig := getAPIServerConfig(c)
|
||||
|
||||
var maxMemory uint64
|
||||
maxMemorySet := false
|
||||
|
||||
var expiration time.Duration
|
||||
expirationSet := false
|
||||
|
||||
var err error
|
||||
|
||||
args := c.Args()
|
||||
for len(args) > 0 {
|
||||
switch args.First() {
|
||||
case "limit":
|
||||
{
|
||||
if maxMemorySet {
|
||||
Fatalln("Limit should be set only once")
|
||||
}
|
||||
args = args.Tail()
|
||||
maxMemory, err = humanize.ParseBytes(args.First())
|
||||
if err != nil {
|
||||
Fatalf("Invalid memory size [%s] passed. Reason: %s\n", args.First(), iodine.New(err, nil))
|
||||
}
|
||||
if maxMemory < 1024*1024*10 {
|
||||
Fatalf("Invalid memory size [%s] passed. Should be greater than 10M\n", args.First())
|
||||
}
|
||||
args = args.Tail()
|
||||
maxMemorySet = true
|
||||
}
|
||||
case "expire":
|
||||
{
|
||||
if expirationSet {
|
||||
Fatalln("Expiration should be set only once")
|
||||
}
|
||||
args = args.Tail()
|
||||
expiration, err = time.ParseDuration(args.First())
|
||||
if err != nil {
|
||||
Fatalf("Invalid expiration time [%s] passed. Reason: %s\n", args.First(), iodine.New(err, nil))
|
||||
}
|
||||
args = args.Tail()
|
||||
expirationSet = true
|
||||
}
|
||||
default:
|
||||
{
|
||||
cli.ShowCommandHelpAndExit(c, "memory", 1) // last argument is exit code
|
||||
}
|
||||
}
|
||||
}
|
||||
if maxMemorySet == false {
|
||||
Fatalln("Memory limit must be set")
|
||||
}
|
||||
memoryDriver := server.MemoryFactory{
|
||||
Config: apiServerConfig,
|
||||
MaxMemory: maxMemory,
|
||||
Expiration: expiration,
|
||||
}
|
||||
apiServer := memoryDriver.GetStartServerFunc()
|
||||
// webServer := getWebServerConfigFunc(c)
|
||||
servers := []server.StartServerFunc{apiServer} //, webServer}
|
||||
server.StartMinio(servers)
|
||||
}
|
||||
|
||||
func runDonut(c *cli.Context) {
|
||||
var err error
|
||||
|
||||
@ -250,18 +146,3 @@ func runDonut(c *cli.Context) {
|
||||
servers := []server.StartServerFunc{apiServer} //, webServer}
|
||||
server.StartMinio(servers)
|
||||
}
|
||||
|
||||
func runFilesystem(c *cli.Context) {
|
||||
if len(c.Args()) != 1 {
|
||||
cli.ShowCommandHelpAndExit(c, "fs", 1) // last argument is exit code
|
||||
}
|
||||
apiServerConfig := getAPIServerConfig(c)
|
||||
fsDriver := server.FilesystemFactory{
|
||||
Config: apiServerConfig,
|
||||
Path: c.Args()[0],
|
||||
}
|
||||
apiServer := fsDriver.GetStartServerFunc()
|
||||
// webServer := getWebServerConfigFunc(c)
|
||||
servers := []server.StartServerFunc{apiServer} //, webServer}
|
||||
server.StartMinio(servers)
|
||||
}
|
||||
|
@ -33,9 +33,8 @@ import (
|
||||
"net/http/httptest"
|
||||
|
||||
"github.com/minio/minio/pkg/storage/drivers"
|
||||
"github.com/minio/minio/pkg/storage/drivers/cache"
|
||||
"github.com/minio/minio/pkg/storage/drivers/donut"
|
||||
"github.com/minio/minio/pkg/storage/drivers/fs"
|
||||
"github.com/minio/minio/pkg/storage/drivers/memory"
|
||||
"github.com/minio/minio/pkg/storage/drivers/mocks"
|
||||
"github.com/stretchr/testify/mock"
|
||||
|
||||
@ -59,7 +58,7 @@ var _ = Suite(&MySuite{
|
||||
|
||||
var _ = Suite(&MySuite{
|
||||
initDriver: func() (drivers.Driver, string) {
|
||||
driver, _ := memory.NewDriver(10000, 3*time.Hour)
|
||||
driver, _ := cache.NewDriver(10000, 3*time.Hour)
|
||||
return driver, ""
|
||||
},
|
||||
})
|
||||
@ -74,14 +73,6 @@ var _ = Suite(&MySuite{
|
||||
},
|
||||
})
|
||||
|
||||
var _ = Suite(&MySuite{
|
||||
initDriver: func() (drivers.Driver, string) {
|
||||
root, _ := ioutil.TempDir(os.TempDir(), "minio-fs-api")
|
||||
driver, _ := filesystem.NewDriver(root)
|
||||
return driver, root
|
||||
},
|
||||
})
|
||||
|
||||
func (s *MySuite) SetUpSuite(c *C) {
|
||||
driver, root := s.initDriver()
|
||||
if root != "" {
|
||||
|
@ -27,46 +27,9 @@ import (
|
||||
"github.com/minio/minio/pkg/iodine"
|
||||
"github.com/minio/minio/pkg/server/httpserver"
|
||||
"github.com/minio/minio/pkg/storage/drivers/donut"
|
||||
fs "github.com/minio/minio/pkg/storage/drivers/fs"
|
||||
"github.com/minio/minio/pkg/storage/drivers/memory"
|
||||
"github.com/minio/minio/pkg/utils/log"
|
||||
)
|
||||
|
||||
// MemoryFactory is used to build memory api server
|
||||
type MemoryFactory struct {
|
||||
httpserver.Config
|
||||
MaxMemory uint64
|
||||
Expiration time.Duration
|
||||
}
|
||||
|
||||
// GetStartServerFunc builds memory api server
|
||||
func (f MemoryFactory) GetStartServerFunc() StartServerFunc {
|
||||
return func() (chan<- string, <-chan error) {
|
||||
driver, _ := memory.NewDriver(f.MaxMemory, f.Expiration)
|
||||
conf := api.Config{RateLimit: f.RateLimit}
|
||||
conf.SetDriver(driver)
|
||||
ctrl, status, _ := httpserver.Start(api.HTTPHandler(conf), f.Config)
|
||||
return ctrl, status
|
||||
}
|
||||
}
|
||||
|
||||
// FilesystemFactory is used to build filesystem api server
|
||||
type FilesystemFactory struct {
|
||||
httpserver.Config
|
||||
Path string
|
||||
}
|
||||
|
||||
// GetStartServerFunc builds memory api server
|
||||
func (f FilesystemFactory) GetStartServerFunc() StartServerFunc {
|
||||
return func() (chan<- string, <-chan error) {
|
||||
driver, _ := fs.NewDriver(f.Path)
|
||||
conf := api.Config{RateLimit: f.RateLimit}
|
||||
conf.SetDriver(driver)
|
||||
ctrl, status, _ := httpserver.Start(api.HTTPHandler(conf), f.Config)
|
||||
return ctrl, status
|
||||
}
|
||||
}
|
||||
|
||||
// WebFactory is used to build web cli server
|
||||
type WebFactory struct {
|
||||
httpserver.Config
|
||||
|
@ -14,7 +14,7 @@
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package memory
|
||||
package cache
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
@ -35,54 +35,54 @@ import (
|
||||
"github.com/minio/minio/pkg/storage/drivers"
|
||||
)
|
||||
|
||||
func (memory *memoryDriver) NewMultipartUpload(bucket, key, contentType string) (string, error) {
|
||||
memory.lock.RLock()
|
||||
func (cache *cacheDriver) NewMultipartUpload(bucket, key, contentType string) (string, error) {
|
||||
cache.lock.RLock()
|
||||
if !drivers.IsValidBucket(bucket) {
|
||||
memory.lock.RUnlock()
|
||||
cache.lock.RUnlock()
|
||||
return "", iodine.New(drivers.BucketNameInvalid{Bucket: bucket}, nil)
|
||||
}
|
||||
if !drivers.IsValidObjectName(key) {
|
||||
memory.lock.RUnlock()
|
||||
cache.lock.RUnlock()
|
||||
return "", iodine.New(drivers.ObjectNameInvalid{Object: key}, nil)
|
||||
}
|
||||
if _, ok := memory.storedBuckets[bucket]; ok == false {
|
||||
memory.lock.RUnlock()
|
||||
if _, ok := cache.storedBuckets[bucket]; ok == false {
|
||||
cache.lock.RUnlock()
|
||||
return "", iodine.New(drivers.BucketNotFound{Bucket: bucket}, nil)
|
||||
}
|
||||
storedBucket := memory.storedBuckets[bucket]
|
||||
storedBucket := cache.storedBuckets[bucket]
|
||||
objectKey := bucket + "/" + key
|
||||
if _, ok := storedBucket.objectMetadata[objectKey]; ok == true {
|
||||
memory.lock.RUnlock()
|
||||
cache.lock.RUnlock()
|
||||
return "", iodine.New(drivers.ObjectExists{Bucket: bucket, Object: key}, nil)
|
||||
}
|
||||
memory.lock.RUnlock()
|
||||
cache.lock.RUnlock()
|
||||
|
||||
memory.lock.Lock()
|
||||
cache.lock.Lock()
|
||||
id := []byte(strconv.FormatInt(rand.Int63(), 10) + bucket + key + time.Now().String())
|
||||
uploadIDSum := sha512.Sum512(id)
|
||||
uploadID := base64.URLEncoding.EncodeToString(uploadIDSum[:])[:47]
|
||||
|
||||
memory.storedBuckets[bucket].multiPartSession[key] = multiPartSession{
|
||||
cache.storedBuckets[bucket].multiPartSession[key] = multiPartSession{
|
||||
uploadID: uploadID,
|
||||
initiated: time.Now(),
|
||||
totalParts: 0,
|
||||
}
|
||||
memory.lock.Unlock()
|
||||
cache.lock.Unlock()
|
||||
|
||||
return uploadID, nil
|
||||
}
|
||||
|
||||
func (memory *memoryDriver) AbortMultipartUpload(bucket, key, uploadID string) error {
|
||||
memory.lock.RLock()
|
||||
storedBucket := memory.storedBuckets[bucket]
|
||||
func (cache *cacheDriver) AbortMultipartUpload(bucket, key, uploadID string) error {
|
||||
cache.lock.RLock()
|
||||
storedBucket := cache.storedBuckets[bucket]
|
||||
if storedBucket.multiPartSession[key].uploadID != uploadID {
|
||||
memory.lock.RUnlock()
|
||||
cache.lock.RUnlock()
|
||||
return iodine.New(drivers.InvalidUploadID{UploadID: uploadID}, nil)
|
||||
}
|
||||
memory.lock.RUnlock()
|
||||
cache.lock.RUnlock()
|
||||
|
||||
memory.cleanupMultiparts(bucket, key, uploadID)
|
||||
memory.cleanupMultipartSession(bucket, key, uploadID)
|
||||
cache.cleanupMultiparts(bucket, key, uploadID)
|
||||
cache.cleanupMultipartSession(bucket, key, uploadID)
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -90,17 +90,17 @@ func getMultipartKey(key string, uploadID string, partNumber int) string {
|
||||
return key + "?uploadId=" + uploadID + "&partNumber=" + strconv.Itoa(partNumber)
|
||||
}
|
||||
|
||||
func (memory *memoryDriver) CreateObjectPart(bucket, key, uploadID string, partID int, contentType, expectedMD5Sum string, size int64, data io.Reader) (string, error) {
|
||||
func (cache *cacheDriver) CreateObjectPart(bucket, key, uploadID string, partID int, contentType, expectedMD5Sum string, size int64, data io.Reader) (string, error) {
|
||||
// Verify upload id
|
||||
memory.lock.RLock()
|
||||
storedBucket := memory.storedBuckets[bucket]
|
||||
cache.lock.RLock()
|
||||
storedBucket := cache.storedBuckets[bucket]
|
||||
if storedBucket.multiPartSession[key].uploadID != uploadID {
|
||||
memory.lock.RUnlock()
|
||||
cache.lock.RUnlock()
|
||||
return "", iodine.New(drivers.InvalidUploadID{UploadID: uploadID}, nil)
|
||||
}
|
||||
memory.lock.RUnlock()
|
||||
cache.lock.RUnlock()
|
||||
|
||||
etag, err := memory.createObjectPart(bucket, key, uploadID, partID, "", expectedMD5Sum, size, data)
|
||||
etag, err := cache.createObjectPart(bucket, key, uploadID, partID, "", expectedMD5Sum, size, data)
|
||||
if err != nil {
|
||||
return "", iodine.New(err, nil)
|
||||
}
|
||||
@ -109,29 +109,29 @@ func (memory *memoryDriver) CreateObjectPart(bucket, key, uploadID string, partI
|
||||
return etag, nil
|
||||
}
|
||||
|
||||
// createObject - PUT object to memory buffer
|
||||
func (memory *memoryDriver) createObjectPart(bucket, key, uploadID string, partID int, contentType, expectedMD5Sum string, size int64, data io.Reader) (string, error) {
|
||||
memory.lock.RLock()
|
||||
// createObject - PUT object to cache buffer
|
||||
func (cache *cacheDriver) createObjectPart(bucket, key, uploadID string, partID int, contentType, expectedMD5Sum string, size int64, data io.Reader) (string, error) {
|
||||
cache.lock.RLock()
|
||||
if !drivers.IsValidBucket(bucket) {
|
||||
memory.lock.RUnlock()
|
||||
cache.lock.RUnlock()
|
||||
return "", iodine.New(drivers.BucketNameInvalid{Bucket: bucket}, nil)
|
||||
}
|
||||
if !drivers.IsValidObjectName(key) {
|
||||
memory.lock.RUnlock()
|
||||
cache.lock.RUnlock()
|
||||
return "", iodine.New(drivers.ObjectNameInvalid{Object: key}, nil)
|
||||
}
|
||||
if _, ok := memory.storedBuckets[bucket]; ok == false {
|
||||
memory.lock.RUnlock()
|
||||
if _, ok := cache.storedBuckets[bucket]; ok == false {
|
||||
cache.lock.RUnlock()
|
||||
return "", iodine.New(drivers.BucketNotFound{Bucket: bucket}, nil)
|
||||
}
|
||||
storedBucket := memory.storedBuckets[bucket]
|
||||
storedBucket := cache.storedBuckets[bucket]
|
||||
// get object key
|
||||
partKey := bucket + "/" + getMultipartKey(key, uploadID, partID)
|
||||
if _, ok := storedBucket.partMetadata[partKey]; ok == true {
|
||||
memory.lock.RUnlock()
|
||||
cache.lock.RUnlock()
|
||||
return storedBucket.partMetadata[partKey].ETag, nil
|
||||
}
|
||||
memory.lock.RUnlock()
|
||||
cache.lock.RUnlock()
|
||||
|
||||
if contentType == "" {
|
||||
contentType = "application/octet-stream"
|
||||
@ -170,9 +170,9 @@ func (memory *memoryDriver) createObjectPart(bucket, key, uploadID string, partI
|
||||
md5SumBytes := hash.Sum(nil)
|
||||
totalLength := int64(len(readBytes))
|
||||
|
||||
memory.lock.Lock()
|
||||
memory.multiPartObjects.Set(partKey, readBytes)
|
||||
memory.lock.Unlock()
|
||||
cache.lock.Lock()
|
||||
cache.multiPartObjects.Set(partKey, readBytes)
|
||||
cache.lock.Unlock()
|
||||
// setting up for de-allocation
|
||||
readBytes = nil
|
||||
|
||||
@ -190,31 +190,31 @@ func (memory *memoryDriver) createObjectPart(bucket, key, uploadID string, partI
|
||||
Size: totalLength,
|
||||
}
|
||||
|
||||
memory.lock.Lock()
|
||||
cache.lock.Lock()
|
||||
storedBucket.partMetadata[partKey] = newPart
|
||||
multiPartSession := storedBucket.multiPartSession[key]
|
||||
multiPartSession.totalParts++
|
||||
storedBucket.multiPartSession[key] = multiPartSession
|
||||
memory.storedBuckets[bucket] = storedBucket
|
||||
memory.lock.Unlock()
|
||||
cache.storedBuckets[bucket] = storedBucket
|
||||
cache.lock.Unlock()
|
||||
|
||||
return md5Sum, nil
|
||||
}
|
||||
|
||||
func (memory *memoryDriver) cleanupMultipartSession(bucket, key, uploadID string) {
|
||||
memory.lock.Lock()
|
||||
defer memory.lock.Unlock()
|
||||
delete(memory.storedBuckets[bucket].multiPartSession, key)
|
||||
func (cache *cacheDriver) cleanupMultipartSession(bucket, key, uploadID string) {
|
||||
cache.lock.Lock()
|
||||
defer cache.lock.Unlock()
|
||||
delete(cache.storedBuckets[bucket].multiPartSession, key)
|
||||
}
|
||||
|
||||
func (memory *memoryDriver) cleanupMultiparts(bucket, key, uploadID string) {
|
||||
for i := 1; i <= memory.storedBuckets[bucket].multiPartSession[key].totalParts; i++ {
|
||||
func (cache *cacheDriver) cleanupMultiparts(bucket, key, uploadID string) {
|
||||
for i := 1; i <= cache.storedBuckets[bucket].multiPartSession[key].totalParts; i++ {
|
||||
objectKey := bucket + "/" + getMultipartKey(key, uploadID, i)
|
||||
memory.multiPartObjects.Delete(objectKey)
|
||||
cache.multiPartObjects.Delete(objectKey)
|
||||
}
|
||||
}
|
||||
|
||||
func (memory *memoryDriver) CompleteMultipartUpload(bucket, key, uploadID string, parts map[int]string) (string, error) {
|
||||
func (cache *cacheDriver) CompleteMultipartUpload(bucket, key, uploadID string, parts map[int]string) (string, error) {
|
||||
if !drivers.IsValidBucket(bucket) {
|
||||
return "", iodine.New(drivers.BucketNameInvalid{Bucket: bucket}, nil)
|
||||
}
|
||||
@ -222,26 +222,26 @@ func (memory *memoryDriver) CompleteMultipartUpload(bucket, key, uploadID string
|
||||
return "", iodine.New(drivers.ObjectNameInvalid{Object: key}, nil)
|
||||
}
|
||||
// Verify upload id
|
||||
memory.lock.RLock()
|
||||
if _, ok := memory.storedBuckets[bucket]; ok == false {
|
||||
memory.lock.RUnlock()
|
||||
cache.lock.RLock()
|
||||
if _, ok := cache.storedBuckets[bucket]; ok == false {
|
||||
cache.lock.RUnlock()
|
||||
return "", iodine.New(drivers.BucketNotFound{Bucket: bucket}, nil)
|
||||
}
|
||||
storedBucket := memory.storedBuckets[bucket]
|
||||
storedBucket := cache.storedBuckets[bucket]
|
||||
if storedBucket.multiPartSession[key].uploadID != uploadID {
|
||||
memory.lock.RUnlock()
|
||||
cache.lock.RUnlock()
|
||||
return "", iodine.New(drivers.InvalidUploadID{UploadID: uploadID}, nil)
|
||||
}
|
||||
memory.lock.RUnlock()
|
||||
cache.lock.RUnlock()
|
||||
|
||||
memory.lock.Lock()
|
||||
cache.lock.Lock()
|
||||
var size int64
|
||||
var fullObject bytes.Buffer
|
||||
for i := 1; i <= len(parts); i++ {
|
||||
recvMD5 := parts[i]
|
||||
object, ok := memory.multiPartObjects.Get(bucket + "/" + getMultipartKey(key, uploadID, i))
|
||||
object, ok := cache.multiPartObjects.Get(bucket + "/" + getMultipartKey(key, uploadID, i))
|
||||
if ok == false {
|
||||
memory.lock.Unlock()
|
||||
cache.lock.Unlock()
|
||||
return "", iodine.New(errors.New("missing part: "+strconv.Itoa(i)), nil)
|
||||
}
|
||||
size += int64(len(object))
|
||||
@ -261,20 +261,20 @@ func (memory *memoryDriver) CompleteMultipartUpload(bucket, key, uploadID string
|
||||
object = nil
|
||||
go debug.FreeOSMemory()
|
||||
}
|
||||
memory.lock.Unlock()
|
||||
cache.lock.Unlock()
|
||||
|
||||
md5sumSlice := md5.Sum(fullObject.Bytes())
|
||||
// this is needed for final verification inside CreateObject, do not convert this to hex
|
||||
md5sum := base64.StdEncoding.EncodeToString(md5sumSlice[:])
|
||||
etag, err := memory.CreateObject(bucket, key, "", md5sum, size, &fullObject)
|
||||
etag, err := cache.CreateObject(bucket, key, "", md5sum, size, &fullObject)
|
||||
if err != nil {
|
||||
// No need to call internal cleanup functions here, caller will call AbortMultipartUpload()
|
||||
// which would in-turn cleanup properly in accordance with S3 Spec
|
||||
return "", iodine.New(err, nil)
|
||||
}
|
||||
fullObject.Reset()
|
||||
memory.cleanupMultiparts(bucket, key, uploadID)
|
||||
memory.cleanupMultipartSession(bucket, key, uploadID)
|
||||
cache.cleanupMultiparts(bucket, key, uploadID)
|
||||
cache.cleanupMultipartSession(bucket, key, uploadID)
|
||||
return etag, nil
|
||||
}
|
||||
|
||||
@ -285,14 +285,14 @@ func (a byKey) Len() int { return len(a) }
|
||||
func (a byKey) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||
func (a byKey) Less(i, j int) bool { return a[i].Key < a[j].Key }
|
||||
|
||||
func (memory *memoryDriver) ListMultipartUploads(bucket string, resources drivers.BucketMultipartResourcesMetadata) (drivers.BucketMultipartResourcesMetadata, error) {
|
||||
func (cache *cacheDriver) ListMultipartUploads(bucket string, resources drivers.BucketMultipartResourcesMetadata) (drivers.BucketMultipartResourcesMetadata, error) {
|
||||
// TODO handle delimiter
|
||||
memory.lock.RLock()
|
||||
defer memory.lock.RUnlock()
|
||||
if _, ok := memory.storedBuckets[bucket]; ok == false {
|
||||
cache.lock.RLock()
|
||||
defer cache.lock.RUnlock()
|
||||
if _, ok := cache.storedBuckets[bucket]; ok == false {
|
||||
return drivers.BucketMultipartResourcesMetadata{}, iodine.New(drivers.BucketNotFound{Bucket: bucket}, nil)
|
||||
}
|
||||
storedBucket := memory.storedBuckets[bucket]
|
||||
storedBucket := cache.storedBuckets[bucket]
|
||||
var uploads []*drivers.UploadMetadata
|
||||
|
||||
for key, session := range storedBucket.multiPartSession {
|
||||
@ -346,14 +346,14 @@ func (a partNumber) Len() int { return len(a) }
|
||||
func (a partNumber) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||
func (a partNumber) Less(i, j int) bool { return a[i].PartNumber < a[j].PartNumber }
|
||||
|
||||
func (memory *memoryDriver) ListObjectParts(bucket, key string, resources drivers.ObjectResourcesMetadata) (drivers.ObjectResourcesMetadata, error) {
|
||||
func (cache *cacheDriver) ListObjectParts(bucket, key string, resources drivers.ObjectResourcesMetadata) (drivers.ObjectResourcesMetadata, error) {
|
||||
// Verify upload id
|
||||
memory.lock.RLock()
|
||||
defer memory.lock.RUnlock()
|
||||
if _, ok := memory.storedBuckets[bucket]; ok == false {
|
||||
cache.lock.RLock()
|
||||
defer cache.lock.RUnlock()
|
||||
if _, ok := cache.storedBuckets[bucket]; ok == false {
|
||||
return drivers.ObjectResourcesMetadata{}, iodine.New(drivers.BucketNotFound{Bucket: bucket}, nil)
|
||||
}
|
||||
storedBucket := memory.storedBuckets[bucket]
|
||||
storedBucket := cache.storedBuckets[bucket]
|
||||
if _, ok := storedBucket.multiPartSession[key]; ok == false {
|
||||
return drivers.ObjectResourcesMetadata{}, iodine.New(drivers.ObjectNotFound{Bucket: bucket, Object: key}, nil)
|
||||
}
|
||||
@ -390,10 +390,10 @@ func (memory *memoryDriver) ListObjectParts(bucket, key string, resources driver
|
||||
return objectResourcesMetadata, nil
|
||||
}
|
||||
|
||||
func (memory *memoryDriver) expiredPart(a ...interface{}) {
|
||||
func (cache *cacheDriver) expiredPart(a ...interface{}) {
|
||||
key := a[0].(string)
|
||||
// loop through all buckets
|
||||
for _, storedBucket := range memory.storedBuckets {
|
||||
for _, storedBucket := range cache.storedBuckets {
|
||||
delete(storedBucket.partMetadata, key)
|
||||
}
|
||||
debug.FreeOSMemory()
|
@ -14,7 +14,7 @@
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package memory
|
||||
package cache
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
@ -37,8 +37,8 @@ import (
|
||||
"github.com/minio/minio/pkg/storage/trove"
|
||||
)
|
||||
|
||||
// memoryDriver - local variables
|
||||
type memoryDriver struct {
|
||||
// cacheDriver - local variables
|
||||
type cacheDriver struct {
|
||||
storedBuckets map[string]storedBucket
|
||||
lock *sync.RWMutex
|
||||
objects *trove.Cache
|
||||
@ -64,65 +64,65 @@ const (
|
||||
totalBuckets = 100
|
||||
)
|
||||
|
||||
// NewDriver instantiate a new memory driver
|
||||
// NewDriver instantiate a new cache driver
|
||||
func NewDriver(maxSize uint64, expiration time.Duration) (drivers.Driver, error) {
|
||||
memory := new(memoryDriver)
|
||||
memory.storedBuckets = make(map[string]storedBucket)
|
||||
memory.objects = trove.NewCache(maxSize, expiration)
|
||||
memory.maxSize = maxSize
|
||||
memory.expiration = expiration
|
||||
memory.multiPartObjects = trove.NewCache(0, time.Duration(0))
|
||||
memory.lock = new(sync.RWMutex)
|
||||
cache := new(cacheDriver)
|
||||
cache.storedBuckets = make(map[string]storedBucket)
|
||||
cache.objects = trove.NewCache(maxSize, expiration)
|
||||
cache.maxSize = maxSize
|
||||
cache.expiration = expiration
|
||||
cache.multiPartObjects = trove.NewCache(0, time.Duration(0))
|
||||
cache.lock = new(sync.RWMutex)
|
||||
|
||||
memory.objects.OnExpired = memory.expiredObject
|
||||
memory.multiPartObjects.OnExpired = memory.expiredPart
|
||||
cache.objects.OnExpired = cache.expiredObject
|
||||
cache.multiPartObjects.OnExpired = cache.expiredPart
|
||||
|
||||
// set up memory expiration
|
||||
memory.objects.ExpireObjects(time.Second * 5)
|
||||
return memory, nil
|
||||
// set up cache expiration
|
||||
cache.objects.ExpireObjects(time.Second * 5)
|
||||
return cache, nil
|
||||
}
|
||||
|
||||
// GetObject - GET object from memory buffer
|
||||
func (memory *memoryDriver) GetObject(w io.Writer, bucket string, object string) (int64, error) {
|
||||
memory.lock.RLock()
|
||||
// GetObject - GET object from cache buffer
|
||||
func (cache *cacheDriver) GetObject(w io.Writer, bucket string, object string) (int64, error) {
|
||||
cache.lock.RLock()
|
||||
if !drivers.IsValidBucket(bucket) {
|
||||
memory.lock.RUnlock()
|
||||
cache.lock.RUnlock()
|
||||
return 0, iodine.New(drivers.BucketNameInvalid{Bucket: bucket}, nil)
|
||||
}
|
||||
if !drivers.IsValidObjectName(object) {
|
||||
memory.lock.RUnlock()
|
||||
cache.lock.RUnlock()
|
||||
return 0, iodine.New(drivers.ObjectNameInvalid{Object: object}, nil)
|
||||
}
|
||||
if _, ok := memory.storedBuckets[bucket]; ok == false {
|
||||
memory.lock.RUnlock()
|
||||
if _, ok := cache.storedBuckets[bucket]; ok == false {
|
||||
cache.lock.RUnlock()
|
||||
return 0, iodine.New(drivers.BucketNotFound{Bucket: bucket}, nil)
|
||||
}
|
||||
objectKey := bucket + "/" + object
|
||||
data, ok := memory.objects.Get(objectKey)
|
||||
data, ok := cache.objects.Get(objectKey)
|
||||
if !ok {
|
||||
memory.lock.RUnlock()
|
||||
cache.lock.RUnlock()
|
||||
return 0, iodine.New(drivers.ObjectNotFound{Bucket: bucket, Object: object}, nil)
|
||||
}
|
||||
written, err := io.Copy(w, bytes.NewBuffer(data))
|
||||
memory.lock.RUnlock()
|
||||
cache.lock.RUnlock()
|
||||
return written, iodine.New(err, nil)
|
||||
}
|
||||
|
||||
// GetPartialObject - GET object from memory buffer range
|
||||
func (memory *memoryDriver) GetPartialObject(w io.Writer, bucket, object string, start, length int64) (int64, error) {
|
||||
// GetPartialObject - GET object from cache buffer range
|
||||
func (cache *cacheDriver) GetPartialObject(w io.Writer, bucket, object string, start, length int64) (int64, error) {
|
||||
errParams := map[string]string{
|
||||
"bucket": bucket,
|
||||
"object": object,
|
||||
"start": strconv.FormatInt(start, 10),
|
||||
"length": strconv.FormatInt(length, 10),
|
||||
}
|
||||
memory.lock.RLock()
|
||||
cache.lock.RLock()
|
||||
if !drivers.IsValidBucket(bucket) {
|
||||
memory.lock.RUnlock()
|
||||
cache.lock.RUnlock()
|
||||
return 0, iodine.New(drivers.BucketNameInvalid{Bucket: bucket}, errParams)
|
||||
}
|
||||
if !drivers.IsValidObjectName(object) {
|
||||
memory.lock.RUnlock()
|
||||
cache.lock.RUnlock()
|
||||
return 0, iodine.New(drivers.ObjectNameInvalid{Object: object}, errParams)
|
||||
}
|
||||
if start < 0 {
|
||||
@ -132,49 +132,49 @@ func (memory *memoryDriver) GetPartialObject(w io.Writer, bucket, object string,
|
||||
}, errParams)
|
||||
}
|
||||
objectKey := bucket + "/" + object
|
||||
data, ok := memory.objects.Get(objectKey)
|
||||
data, ok := cache.objects.Get(objectKey)
|
||||
if !ok {
|
||||
memory.lock.RUnlock()
|
||||
cache.lock.RUnlock()
|
||||
return 0, iodine.New(drivers.ObjectNotFound{Bucket: bucket, Object: object}, errParams)
|
||||
}
|
||||
written, err := io.CopyN(w, bytes.NewBuffer(data[start:]), length)
|
||||
memory.lock.RUnlock()
|
||||
cache.lock.RUnlock()
|
||||
return written, iodine.New(err, nil)
|
||||
}
|
||||
|
||||
// GetBucketMetadata -
|
||||
func (memory *memoryDriver) GetBucketMetadata(bucket string) (drivers.BucketMetadata, error) {
|
||||
memory.lock.RLock()
|
||||
defer memory.lock.RUnlock()
|
||||
func (cache *cacheDriver) GetBucketMetadata(bucket string) (drivers.BucketMetadata, error) {
|
||||
cache.lock.RLock()
|
||||
defer cache.lock.RUnlock()
|
||||
if !drivers.IsValidBucket(bucket) {
|
||||
return drivers.BucketMetadata{}, iodine.New(drivers.BucketNameInvalid{Bucket: bucket}, nil)
|
||||
}
|
||||
if _, ok := memory.storedBuckets[bucket]; ok == false {
|
||||
if _, ok := cache.storedBuckets[bucket]; ok == false {
|
||||
return drivers.BucketMetadata{}, iodine.New(drivers.BucketNotFound{Bucket: bucket}, nil)
|
||||
}
|
||||
return memory.storedBuckets[bucket].bucketMetadata, nil
|
||||
return cache.storedBuckets[bucket].bucketMetadata, nil
|
||||
}
|
||||
|
||||
// SetBucketMetadata -
|
||||
func (memory *memoryDriver) SetBucketMetadata(bucket, acl string) error {
|
||||
memory.lock.RLock()
|
||||
func (cache *cacheDriver) SetBucketMetadata(bucket, acl string) error {
|
||||
cache.lock.RLock()
|
||||
if !drivers.IsValidBucket(bucket) {
|
||||
memory.lock.RUnlock()
|
||||
cache.lock.RUnlock()
|
||||
return iodine.New(drivers.BucketNameInvalid{Bucket: bucket}, nil)
|
||||
}
|
||||
if _, ok := memory.storedBuckets[bucket]; ok == false {
|
||||
memory.lock.RUnlock()
|
||||
if _, ok := cache.storedBuckets[bucket]; ok == false {
|
||||
cache.lock.RUnlock()
|
||||
return iodine.New(drivers.BucketNotFound{Bucket: bucket}, nil)
|
||||
}
|
||||
if strings.TrimSpace(acl) == "" {
|
||||
acl = "private"
|
||||
}
|
||||
memory.lock.RUnlock()
|
||||
memory.lock.Lock()
|
||||
defer memory.lock.Unlock()
|
||||
storedBucket := memory.storedBuckets[bucket]
|
||||
cache.lock.RUnlock()
|
||||
cache.lock.Lock()
|
||||
defer cache.lock.Unlock()
|
||||
storedBucket := cache.storedBuckets[bucket]
|
||||
storedBucket.bucketMetadata.ACL = drivers.BucketACL(acl)
|
||||
memory.storedBuckets[bucket] = storedBucket
|
||||
cache.storedBuckets[bucket] = storedBucket
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -197,44 +197,44 @@ func isMD5SumEqual(expectedMD5Sum, actualMD5Sum string) error {
|
||||
return iodine.New(errors.New("invalid argument"), nil)
|
||||
}
|
||||
|
||||
func (memory *memoryDriver) CreateObject(bucket, key, contentType, expectedMD5Sum string, size int64, data io.Reader) (string, error) {
|
||||
if size > int64(memory.maxSize) {
|
||||
func (cache *cacheDriver) CreateObject(bucket, key, contentType, expectedMD5Sum string, size int64, data io.Reader) (string, error) {
|
||||
if size > int64(cache.maxSize) {
|
||||
generic := drivers.GenericObjectError{Bucket: bucket, Object: key}
|
||||
return "", iodine.New(drivers.EntityTooLarge{
|
||||
GenericObjectError: generic,
|
||||
Size: strconv.FormatInt(size, 10),
|
||||
MaxSize: strconv.FormatUint(memory.maxSize, 10),
|
||||
MaxSize: strconv.FormatUint(cache.maxSize, 10),
|
||||
}, nil)
|
||||
}
|
||||
md5sum, err := memory.createObject(bucket, key, contentType, expectedMD5Sum, size, data)
|
||||
md5sum, err := cache.createObject(bucket, key, contentType, expectedMD5Sum, size, data)
|
||||
// free
|
||||
debug.FreeOSMemory()
|
||||
return md5sum, iodine.New(err, nil)
|
||||
}
|
||||
|
||||
// createObject - PUT object to memory buffer
|
||||
func (memory *memoryDriver) createObject(bucket, key, contentType, expectedMD5Sum string, size int64, data io.Reader) (string, error) {
|
||||
memory.lock.RLock()
|
||||
// createObject - PUT object to cache buffer
|
||||
func (cache *cacheDriver) createObject(bucket, key, contentType, expectedMD5Sum string, size int64, data io.Reader) (string, error) {
|
||||
cache.lock.RLock()
|
||||
if !drivers.IsValidBucket(bucket) {
|
||||
memory.lock.RUnlock()
|
||||
cache.lock.RUnlock()
|
||||
return "", iodine.New(drivers.BucketNameInvalid{Bucket: bucket}, nil)
|
||||
}
|
||||
if !drivers.IsValidObjectName(key) {
|
||||
memory.lock.RUnlock()
|
||||
cache.lock.RUnlock()
|
||||
return "", iodine.New(drivers.ObjectNameInvalid{Object: key}, nil)
|
||||
}
|
||||
if _, ok := memory.storedBuckets[bucket]; ok == false {
|
||||
memory.lock.RUnlock()
|
||||
if _, ok := cache.storedBuckets[bucket]; ok == false {
|
||||
cache.lock.RUnlock()
|
||||
return "", iodine.New(drivers.BucketNotFound{Bucket: bucket}, nil)
|
||||
}
|
||||
storedBucket := memory.storedBuckets[bucket]
|
||||
storedBucket := cache.storedBuckets[bucket]
|
||||
// get object key
|
||||
objectKey := bucket + "/" + key
|
||||
if _, ok := storedBucket.objectMetadata[objectKey]; ok == true {
|
||||
memory.lock.RUnlock()
|
||||
cache.lock.RUnlock()
|
||||
return "", iodine.New(drivers.ObjectExists{Bucket: bucket, Object: key}, nil)
|
||||
}
|
||||
memory.lock.RUnlock()
|
||||
cache.lock.RUnlock()
|
||||
|
||||
if contentType == "" {
|
||||
contentType = "application/octet-stream"
|
||||
@ -272,12 +272,12 @@ func (memory *memoryDriver) createObject(bucket, key, contentType, expectedMD5Su
|
||||
md5SumBytes := hash.Sum(nil)
|
||||
totalLength := len(readBytes)
|
||||
|
||||
memory.lock.Lock()
|
||||
ok := memory.objects.Set(objectKey, readBytes)
|
||||
cache.lock.Lock()
|
||||
ok := cache.objects.Set(objectKey, readBytes)
|
||||
// setting up for de-allocation
|
||||
readBytes = nil
|
||||
go debug.FreeOSMemory()
|
||||
memory.lock.Unlock()
|
||||
cache.lock.Unlock()
|
||||
if !ok {
|
||||
return "", iodine.New(drivers.InternalError{}, nil)
|
||||
}
|
||||
@ -300,33 +300,33 @@ func (memory *memoryDriver) createObject(bucket, key, contentType, expectedMD5Su
|
||||
Size: int64(totalLength),
|
||||
}
|
||||
|
||||
memory.lock.Lock()
|
||||
cache.lock.Lock()
|
||||
storedBucket.objectMetadata[objectKey] = newObject
|
||||
memory.storedBuckets[bucket] = storedBucket
|
||||
memory.lock.Unlock()
|
||||
cache.storedBuckets[bucket] = storedBucket
|
||||
cache.lock.Unlock()
|
||||
return newObject.Md5, nil
|
||||
}
|
||||
|
||||
// CreateBucket - create bucket in memory
|
||||
func (memory *memoryDriver) CreateBucket(bucketName, acl string) error {
|
||||
memory.lock.RLock()
|
||||
if len(memory.storedBuckets) == totalBuckets {
|
||||
memory.lock.RUnlock()
|
||||
// CreateBucket - create bucket in cache
|
||||
func (cache *cacheDriver) CreateBucket(bucketName, acl string) error {
|
||||
cache.lock.RLock()
|
||||
if len(cache.storedBuckets) == totalBuckets {
|
||||
cache.lock.RUnlock()
|
||||
return iodine.New(drivers.TooManyBuckets{Bucket: bucketName}, nil)
|
||||
}
|
||||
if !drivers.IsValidBucket(bucketName) {
|
||||
memory.lock.RUnlock()
|
||||
cache.lock.RUnlock()
|
||||
return iodine.New(drivers.BucketNameInvalid{Bucket: bucketName}, nil)
|
||||
}
|
||||
if !drivers.IsValidBucketACL(acl) {
|
||||
memory.lock.RUnlock()
|
||||
cache.lock.RUnlock()
|
||||
return iodine.New(drivers.InvalidACL{ACL: acl}, nil)
|
||||
}
|
||||
if _, ok := memory.storedBuckets[bucketName]; ok == true {
|
||||
memory.lock.RUnlock()
|
||||
if _, ok := cache.storedBuckets[bucketName]; ok == true {
|
||||
cache.lock.RUnlock()
|
||||
return iodine.New(drivers.BucketExists{Bucket: bucketName}, nil)
|
||||
}
|
||||
memory.lock.RUnlock()
|
||||
cache.lock.RUnlock()
|
||||
|
||||
if strings.TrimSpace(acl) == "" {
|
||||
// default is private
|
||||
@ -340,9 +340,9 @@ func (memory *memoryDriver) CreateBucket(bucketName, acl string) error {
|
||||
newBucket.bucketMetadata.Name = bucketName
|
||||
newBucket.bucketMetadata.Created = time.Now().UTC()
|
||||
newBucket.bucketMetadata.ACL = drivers.BucketACL(acl)
|
||||
memory.lock.Lock()
|
||||
defer memory.lock.Unlock()
|
||||
memory.storedBuckets[bucketName] = newBucket
|
||||
cache.lock.Lock()
|
||||
cache.storedBuckets[bucketName] = newBucket
|
||||
cache.lock.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -364,7 +364,7 @@ func appendUniq(slice []string, i string) []string {
|
||||
return append(slice, i)
|
||||
}
|
||||
|
||||
func (memory *memoryDriver) filterDelimiterPrefix(keys []string, key, delim string, r drivers.BucketResourcesMetadata) ([]string, drivers.BucketResourcesMetadata) {
|
||||
func (cache *cacheDriver) filterDelimiterPrefix(keys []string, key, delim string, r drivers.BucketResourcesMetadata) ([]string, drivers.BucketResourcesMetadata) {
|
||||
switch true {
|
||||
case key == r.Prefix:
|
||||
keys = appendUniq(keys, key)
|
||||
@ -377,7 +377,7 @@ func (memory *memoryDriver) filterDelimiterPrefix(keys []string, key, delim stri
|
||||
return keys, r
|
||||
}
|
||||
|
||||
func (memory *memoryDriver) listObjects(keys []string, key string, r drivers.BucketResourcesMetadata) ([]string, drivers.BucketResourcesMetadata) {
|
||||
func (cache *cacheDriver) listObjects(keys []string, key string, r drivers.BucketResourcesMetadata) ([]string, drivers.BucketResourcesMetadata) {
|
||||
switch true {
|
||||
// Prefix absent, delimit object key based on delimiter
|
||||
case r.IsDelimiterSet():
|
||||
@ -393,7 +393,7 @@ func (memory *memoryDriver) listObjects(keys []string, key string, r drivers.Buc
|
||||
if strings.HasPrefix(key, r.Prefix) {
|
||||
trimmedName := strings.TrimPrefix(key, r.Prefix)
|
||||
delim := delimiter(trimmedName, r.Delimiter)
|
||||
keys, r = memory.filterDelimiterPrefix(keys, key, delim, r)
|
||||
keys, r = cache.filterDelimiterPrefix(keys, key, delim, r)
|
||||
}
|
||||
// Prefix present, nothing to delimit
|
||||
case r.IsPrefixSet():
|
||||
@ -405,26 +405,26 @@ func (memory *memoryDriver) listObjects(keys []string, key string, r drivers.Buc
|
||||
return keys, r
|
||||
}
|
||||
|
||||
// ListObjects - list objects from memory
|
||||
func (memory *memoryDriver) ListObjects(bucket string, resources drivers.BucketResourcesMetadata) ([]drivers.ObjectMetadata, drivers.BucketResourcesMetadata, error) {
|
||||
memory.lock.RLock()
|
||||
defer memory.lock.RUnlock()
|
||||
// ListObjects - list objects from cache
|
||||
func (cache *cacheDriver) ListObjects(bucket string, resources drivers.BucketResourcesMetadata) ([]drivers.ObjectMetadata, drivers.BucketResourcesMetadata, error) {
|
||||
cache.lock.RLock()
|
||||
defer cache.lock.RUnlock()
|
||||
if !drivers.IsValidBucket(bucket) {
|
||||
return nil, drivers.BucketResourcesMetadata{IsTruncated: false}, iodine.New(drivers.BucketNameInvalid{Bucket: bucket}, nil)
|
||||
}
|
||||
if !drivers.IsValidObjectName(resources.Prefix) {
|
||||
return nil, drivers.BucketResourcesMetadata{IsTruncated: false}, iodine.New(drivers.ObjectNameInvalid{Object: resources.Prefix}, nil)
|
||||
}
|
||||
if _, ok := memory.storedBuckets[bucket]; ok == false {
|
||||
if _, ok := cache.storedBuckets[bucket]; ok == false {
|
||||
return nil, drivers.BucketResourcesMetadata{IsTruncated: false}, iodine.New(drivers.BucketNotFound{Bucket: bucket}, nil)
|
||||
}
|
||||
var results []drivers.ObjectMetadata
|
||||
var keys []string
|
||||
storedBucket := memory.storedBuckets[bucket]
|
||||
storedBucket := cache.storedBuckets[bucket]
|
||||
for key := range storedBucket.objectMetadata {
|
||||
if strings.HasPrefix(key, bucket+"/") {
|
||||
key = key[len(bucket)+1:]
|
||||
keys, resources = memory.listObjects(keys, key, resources)
|
||||
keys, resources = cache.listObjects(keys, key, resources)
|
||||
}
|
||||
}
|
||||
var newKeys []string
|
||||
@ -453,34 +453,29 @@ func (memory *memoryDriver) ListObjects(bucket string, resources drivers.BucketR
|
||||
return results, resources, nil
|
||||
}
|
||||
|
||||
// ByBucketName is a type for sorting bucket metadata by bucket name
|
||||
type ByBucketName []drivers.BucketMetadata
|
||||
// byBucketName is a type for sorting bucket metadata by bucket name
|
||||
type byBucketName []drivers.BucketMetadata
|
||||
|
||||
// Len of bucket name
|
||||
func (b ByBucketName) Len() int { return len(b) }
|
||||
func (b byBucketName) Len() int { return len(b) }
|
||||
func (b byBucketName) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
|
||||
func (b byBucketName) Less(i, j int) bool { return b[i].Name < b[j].Name }
|
||||
|
||||
// Swap bucket i, j
|
||||
func (b ByBucketName) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
|
||||
|
||||
// Less
|
||||
func (b ByBucketName) Less(i, j int) bool { return b[i].Name < b[j].Name }
|
||||
|
||||
// ListBuckets - List buckets from memory
|
||||
func (memory *memoryDriver) ListBuckets() ([]drivers.BucketMetadata, error) {
|
||||
memory.lock.RLock()
|
||||
defer memory.lock.RUnlock()
|
||||
// ListBuckets - List buckets from cache
|
||||
func (cache *cacheDriver) ListBuckets() ([]drivers.BucketMetadata, error) {
|
||||
cache.lock.RLock()
|
||||
defer cache.lock.RUnlock()
|
||||
var results []drivers.BucketMetadata
|
||||
for _, bucket := range memory.storedBuckets {
|
||||
for _, bucket := range cache.storedBuckets {
|
||||
results = append(results, bucket.bucketMetadata)
|
||||
}
|
||||
sort.Sort(ByBucketName(results))
|
||||
sort.Sort(byBucketName(results))
|
||||
return results, nil
|
||||
}
|
||||
|
||||
// GetObjectMetadata - get object metadata from memory
|
||||
func (memory *memoryDriver) GetObjectMetadata(bucket, key string) (drivers.ObjectMetadata, error) {
|
||||
memory.lock.RLock()
|
||||
defer memory.lock.RUnlock()
|
||||
// GetObjectMetadata - get object metadata from cache
|
||||
func (cache *cacheDriver) GetObjectMetadata(bucket, key string) (drivers.ObjectMetadata, error) {
|
||||
cache.lock.RLock()
|
||||
defer cache.lock.RUnlock()
|
||||
// check if bucket exists
|
||||
if !drivers.IsValidBucket(bucket) {
|
||||
return drivers.ObjectMetadata{}, iodine.New(drivers.BucketNameInvalid{Bucket: bucket}, nil)
|
||||
@ -488,10 +483,10 @@ func (memory *memoryDriver) GetObjectMetadata(bucket, key string) (drivers.Objec
|
||||
if !drivers.IsValidObjectName(key) {
|
||||
return drivers.ObjectMetadata{}, iodine.New(drivers.ObjectNameInvalid{Object: key}, nil)
|
||||
}
|
||||
if _, ok := memory.storedBuckets[bucket]; ok == false {
|
||||
if _, ok := cache.storedBuckets[bucket]; ok == false {
|
||||
return drivers.ObjectMetadata{}, iodine.New(drivers.BucketNotFound{Bucket: bucket}, nil)
|
||||
}
|
||||
storedBucket := memory.storedBuckets[bucket]
|
||||
storedBucket := cache.storedBuckets[bucket]
|
||||
objectKey := bucket + "/" + key
|
||||
if object, ok := storedBucket.objectMetadata[objectKey]; ok == true {
|
||||
return object, nil
|
||||
@ -499,18 +494,18 @@ func (memory *memoryDriver) GetObjectMetadata(bucket, key string) (drivers.Objec
|
||||
return drivers.ObjectMetadata{}, iodine.New(drivers.ObjectNotFound{Bucket: bucket, Object: key}, nil)
|
||||
}
|
||||
|
||||
func (memory *memoryDriver) expiredObject(a ...interface{}) {
|
||||
cacheStats := memory.objects.Stats()
|
||||
func (cache *cacheDriver) expiredObject(a ...interface{}) {
|
||||
cacheStats := cache.objects.Stats()
|
||||
log.Printf("CurrentSize: %d, CurrentItems: %d, TotalExpirations: %d",
|
||||
cacheStats.Bytes, cacheStats.Items, cacheStats.Expired)
|
||||
key := a[0].(string)
|
||||
// loop through all buckets
|
||||
for bucket, storedBucket := range memory.storedBuckets {
|
||||
for bucket, storedBucket := range cache.storedBuckets {
|
||||
delete(storedBucket.objectMetadata, key)
|
||||
// remove bucket if no objects found anymore
|
||||
if len(storedBucket.objectMetadata) == 0 {
|
||||
if time.Since(memory.storedBuckets[bucket].bucketMetadata.Created) > memory.expiration {
|
||||
delete(memory.storedBuckets, bucket)
|
||||
if time.Since(cache.storedBuckets[bucket].bucketMetadata.Created) > cache.expiration {
|
||||
delete(cache.storedBuckets, bucket)
|
||||
}
|
||||
}
|
||||
}
|
@ -14,7 +14,7 @@
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package memory
|
||||
package cache
|
||||
|
||||
import (
|
||||
"testing"
|
1
pkg/storage/drivers/dummy/README.md
Normal file
1
pkg/storage/drivers/dummy/README.md
Normal file
@ -0,0 +1 @@
|
||||
This is a dummy driver which is a pass through driver, useful if some one wants to contribute code.
|
108
pkg/storage/drivers/dummy/dummy.go
Normal file
108
pkg/storage/drivers/dummy/dummy.go
Normal file
@ -0,0 +1,108 @@
|
||||
/*
|
||||
* Minimalist Object Storage, (C) 2015 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package dummy
|
||||
|
||||
import (
|
||||
"io"
|
||||
|
||||
"github.com/minio/minio/pkg/storage/drivers"
|
||||
)
|
||||
|
||||
// dummyDriver
|
||||
type dummyDriver struct {
|
||||
driver drivers.Driver
|
||||
}
|
||||
|
||||
// NewDriver provides a new dummy driver
|
||||
func NewDriver(driver drivers.Driver) drivers.Driver {
|
||||
return dummyDriver{driver: driver}
|
||||
}
|
||||
|
||||
// ListBuckets
|
||||
func (dummy dummyDriver) ListBuckets() ([]drivers.BucketMetadata, error) {
|
||||
return dummy.driver.ListBuckets()
|
||||
}
|
||||
|
||||
// CreateBucket
|
||||
func (dummy dummyDriver) CreateBucket(bucket, acl string) error {
|
||||
return dummy.driver.CreateBucket(bucket, acl)
|
||||
}
|
||||
|
||||
// GetBucketMetadata
|
||||
func (dummy dummyDriver) GetBucketMetadata(bucket string) (drivers.BucketMetadata, error) {
|
||||
return dummy.driver.GetBucketMetadata(bucket)
|
||||
}
|
||||
|
||||
// SetBucketMetadata
|
||||
func (dummy dummyDriver) SetBucketMetadata(bucket, acl string) error {
|
||||
return dummy.driver.SetBucketMetadata(bucket, acl)
|
||||
}
|
||||
|
||||
// GetObject
|
||||
func (dummy dummyDriver) GetObject(w io.Writer, bucket, object string) (int64, error) {
|
||||
return dummy.driver.GetObject(w, bucket, object)
|
||||
}
|
||||
|
||||
// GetPartialObject
|
||||
func (dummy dummyDriver) GetPartialObject(w io.Writer, bucket, object string, start int64, length int64) (int64, error) {
|
||||
return dummy.driver.GetPartialObject(w, bucket, object, start, length)
|
||||
}
|
||||
|
||||
// GetObjectMetadata
|
||||
func (dummy dummyDriver) GetObjectMetadata(bucket, object string) (drivers.ObjectMetadata, error) {
|
||||
return dummy.driver.GetObjectMetadata(bucket, object)
|
||||
}
|
||||
|
||||
// ListObjects
|
||||
func (dummy dummyDriver) ListObjects(bucket string, resources drivers.BucketResourcesMetadata) ([]drivers.ObjectMetadata, drivers.BucketResourcesMetadata, error) {
|
||||
return dummy.driver.ListObjects(bucket, resources)
|
||||
}
|
||||
|
||||
// CreateObject
|
||||
func (dummy dummyDriver) CreateObject(bucket, key, contentType, md5sum string, size int64, data io.Reader) (string, error) {
|
||||
return dummy.driver.CreateObject(bucket, key, contentType, md5sum, size, data)
|
||||
}
|
||||
|
||||
// NewMultipartUpload
|
||||
func (dummy dummyDriver) NewMultipartUpload(bucket, key, contentType string) (string, error) {
|
||||
return dummy.driver.NewMultipartUpload(bucket, key, contentType)
|
||||
}
|
||||
|
||||
// CreateObjectPart
|
||||
func (dummy dummyDriver) CreateObjectPart(bucket, key, uploadID string, partID int, contentType string, md5sum string, size int64, data io.Reader) (string, error) {
|
||||
return dummy.driver.CreateObjectPart(bucket, key, uploadID, partID, contentType, md5sum, size, data)
|
||||
}
|
||||
|
||||
// CompleteMultipartUpload
|
||||
func (dummy dummyDriver) CompleteMultipartUpload(bucket, key, uploadID string, parts map[int]string) (string, error) {
|
||||
return dummy.driver.CompleteMultipartUpload(bucket, key, uploadID, parts)
|
||||
}
|
||||
|
||||
// ListObjectParts
|
||||
func (dummy dummyDriver) ListObjectParts(bucket, key string, resources drivers.ObjectResourcesMetadata) (drivers.ObjectResourcesMetadata, error) {
|
||||
return dummy.driver.ListObjectParts(bucket, key, resources)
|
||||
}
|
||||
|
||||
// ListMultipartUploads
|
||||
func (dummy dummyDriver) ListMultipartUploads(bucket string, resources drivers.BucketMultipartResourcesMetadata) (drivers.BucketMultipartResourcesMetadata, error) {
|
||||
return dummy.driver.ListMultipartUploads(bucket, resources)
|
||||
}
|
||||
|
||||
// AbortMultipartUpload
|
||||
func (dummy dummyDriver) AbortMultipartUpload(bucket, key, uploadID string) error {
|
||||
return dummy.driver.AbortMultipartUpload(bucket, key, uploadID)
|
||||
}
|
@ -1,42 +0,0 @@
|
||||
/*
|
||||
* Mini Object Storage, (C) 2015 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package filesystem
|
||||
|
||||
import (
|
||||
"os"
|
||||
"sync"
|
||||
|
||||
"github.com/minio/minio/pkg/storage/drivers"
|
||||
)
|
||||
|
||||
type fsDriver struct {
|
||||
root string
|
||||
lock *sync.Mutex
|
||||
multiparts *Multiparts
|
||||
}
|
||||
|
||||
// NewDriver instantiate a new filesystem driver
|
||||
func NewDriver(root string) (drivers.Driver, error) {
|
||||
fs := new(fsDriver)
|
||||
fs.root = root
|
||||
fs.lock = new(sync.Mutex)
|
||||
// internal related to multiparts
|
||||
fs.multiparts = new(Multiparts)
|
||||
fs.multiparts.ActiveSession = make(map[string]*MultipartSession)
|
||||
err := os.MkdirAll(fs.root, 0700)
|
||||
return fs, err
|
||||
}
|
@ -1,205 +0,0 @@
|
||||
/*
|
||||
* Mini Object Fs, (C) 2015 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package filesystem
|
||||
|
||||
import (
|
||||
"os"
|
||||
"sort"
|
||||
|
||||
"io/ioutil"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/minio/minio/pkg/iodine"
|
||||
"github.com/minio/minio/pkg/storage/drivers"
|
||||
)
|
||||
|
||||
/// Bucket Operations
|
||||
|
||||
// ListBuckets - Get service
|
||||
func (fs *fsDriver) ListBuckets() ([]drivers.BucketMetadata, error) {
|
||||
files, err := ioutil.ReadDir(fs.root)
|
||||
if err != nil {
|
||||
return []drivers.BucketMetadata{}, iodine.New(err, nil)
|
||||
}
|
||||
|
||||
var metadataList []drivers.BucketMetadata
|
||||
for _, file := range files {
|
||||
if !file.IsDir() {
|
||||
// if files found ignore them
|
||||
continue
|
||||
}
|
||||
if file.IsDir() {
|
||||
// if directories found with odd names, skip them too
|
||||
if !drivers.IsValidBucket(file.Name()) {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
metadata := drivers.BucketMetadata{
|
||||
Name: file.Name(),
|
||||
Created: file.ModTime(),
|
||||
}
|
||||
metadataList = append(metadataList, metadata)
|
||||
}
|
||||
return metadataList, nil
|
||||
}
|
||||
|
||||
// CreateBucket - PUT Bucket
|
||||
func (fs *fsDriver) CreateBucket(bucket, acl string) error {
|
||||
fs.lock.Lock()
|
||||
defer fs.lock.Unlock()
|
||||
|
||||
// verify bucket path legal
|
||||
if drivers.IsValidBucket(bucket) == false {
|
||||
return iodine.New(drivers.BucketNameInvalid{Bucket: bucket}, nil)
|
||||
}
|
||||
|
||||
// get bucket path
|
||||
bucketDir := filepath.Join(fs.root, bucket)
|
||||
|
||||
// check if bucket exists
|
||||
if _, err := os.Stat(bucketDir); err == nil {
|
||||
return iodine.New(drivers.BucketExists{
|
||||
Bucket: bucket,
|
||||
}, nil)
|
||||
}
|
||||
|
||||
// make bucket
|
||||
err := os.Mkdir(bucketDir, 0700)
|
||||
if err != nil {
|
||||
return iodine.New(err, nil)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetBucketMetadata -
|
||||
func (fs *fsDriver) GetBucketMetadata(bucket string) (drivers.BucketMetadata, error) {
|
||||
fs.lock.Lock()
|
||||
defer fs.lock.Unlock()
|
||||
if !drivers.IsValidBucket(bucket) {
|
||||
return drivers.BucketMetadata{}, iodine.New(drivers.BucketNameInvalid{Bucket: bucket}, nil)
|
||||
}
|
||||
// get bucket path
|
||||
bucketDir := filepath.Join(fs.root, bucket)
|
||||
bucketMetadata := drivers.BucketMetadata{}
|
||||
fi, err := os.Stat(bucketDir)
|
||||
// check if bucket exists
|
||||
if os.IsNotExist(err) {
|
||||
return drivers.BucketMetadata{}, iodine.New(drivers.BucketNotFound{Bucket: bucket}, nil)
|
||||
}
|
||||
if err != nil {
|
||||
return drivers.BucketMetadata{}, iodine.New(err, nil)
|
||||
}
|
||||
|
||||
bucketMetadata.Name = fi.Name()
|
||||
bucketMetadata.Created = fi.ModTime()
|
||||
// TODO convert os.FileMode to meaningful ACL's
|
||||
bucketMetadata.ACL = drivers.BucketACL("private")
|
||||
return bucketMetadata, nil
|
||||
}
|
||||
|
||||
// aclToPerm - convert acl to filesystem mode
|
||||
func aclToPerm(acl string) os.FileMode {
|
||||
switch acl {
|
||||
case "private":
|
||||
return os.FileMode(0700)
|
||||
case "public-read":
|
||||
return os.FileMode(0500)
|
||||
case "public-read-write":
|
||||
return os.FileMode(0777)
|
||||
case "authenticated-read":
|
||||
return os.FileMode(0770)
|
||||
default:
|
||||
return os.FileMode(0700)
|
||||
}
|
||||
}
|
||||
|
||||
// SetBucketMetadata -
|
||||
func (fs *fsDriver) SetBucketMetadata(bucket, acl string) error {
|
||||
fs.lock.Lock()
|
||||
defer fs.lock.Unlock()
|
||||
if !drivers.IsValidBucket(bucket) {
|
||||
return iodine.New(drivers.BucketNameInvalid{Bucket: bucket}, nil)
|
||||
}
|
||||
if !drivers.IsValidBucketACL(acl) {
|
||||
return iodine.New(drivers.InvalidACL{ACL: acl}, nil)
|
||||
}
|
||||
// get bucket path
|
||||
bucketDir := filepath.Join(fs.root, bucket)
|
||||
err := os.Chmod(bucketDir, aclToPerm(acl))
|
||||
if err != nil {
|
||||
return iodine.New(err, nil)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ListObjects - GET bucket (list objects)
|
||||
func (fs *fsDriver) ListObjects(bucket string, resources drivers.BucketResourcesMetadata) ([]drivers.ObjectMetadata, drivers.BucketResourcesMetadata, error) {
|
||||
p := bucketDir{}
|
||||
p.files = make(map[string]os.FileInfo)
|
||||
|
||||
if drivers.IsValidBucket(bucket) == false {
|
||||
return []drivers.ObjectMetadata{}, resources, iodine.New(drivers.BucketNameInvalid{Bucket: bucket}, nil)
|
||||
}
|
||||
if resources.Prefix != "" && drivers.IsValidObjectName(resources.Prefix) == false {
|
||||
return []drivers.ObjectMetadata{}, resources, iodine.New(drivers.ObjectNameInvalid{Bucket: bucket, Object: resources.Prefix}, nil)
|
||||
}
|
||||
|
||||
rootPrefix := filepath.Join(fs.root, bucket)
|
||||
// check bucket exists
|
||||
if _, err := os.Stat(rootPrefix); os.IsNotExist(err) {
|
||||
return []drivers.ObjectMetadata{}, resources, iodine.New(drivers.BucketNotFound{Bucket: bucket}, nil)
|
||||
}
|
||||
|
||||
p.root = rootPrefix
|
||||
err := filepath.Walk(rootPrefix, p.getAllFiles)
|
||||
if err != nil {
|
||||
return []drivers.ObjectMetadata{}, resources, iodine.New(err, nil)
|
||||
}
|
||||
|
||||
var metadataList []drivers.ObjectMetadata
|
||||
var metadata drivers.ObjectMetadata
|
||||
|
||||
// Populate filtering mode
|
||||
resources.Mode = drivers.GetMode(resources)
|
||||
|
||||
var fileNames []string
|
||||
for name := range p.files {
|
||||
fileNames = append(fileNames, name)
|
||||
}
|
||||
sort.Strings(fileNames)
|
||||
for _, name := range fileNames {
|
||||
if len(metadataList) >= resources.Maxkeys {
|
||||
resources.IsTruncated = true
|
||||
if resources.IsTruncated && resources.IsDelimiterSet() {
|
||||
resources.NextMarker = metadataList[len(metadataList)-1].Key
|
||||
}
|
||||
break
|
||||
}
|
||||
if name > resources.Marker {
|
||||
metadata, resources, err = fs.filterObjects(bucket, name, p.files[name], resources)
|
||||
if err != nil {
|
||||
return []drivers.ObjectMetadata{}, resources, iodine.New(err, nil)
|
||||
}
|
||||
if metadata.Bucket != "" {
|
||||
metadataList = append(metadataList, metadata)
|
||||
}
|
||||
}
|
||||
}
|
||||
sort.Sort(byObjectKey(metadataList))
|
||||
return metadataList, resources, nil
|
||||
}
|
@ -1,88 +0,0 @@
|
||||
/*
|
||||
* Mini Object Storage, (C) 2015 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package filesystem
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"os"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/minio/minio/pkg/storage/drivers"
|
||||
)
|
||||
|
||||
// Metadata - carries metadata about object
|
||||
type Metadata struct {
|
||||
Md5sum []byte
|
||||
ContentType string
|
||||
}
|
||||
|
||||
func appendUniq(slice []string, i string) []string {
|
||||
for _, ele := range slice {
|
||||
if ele == i {
|
||||
return slice
|
||||
}
|
||||
}
|
||||
return append(slice, i)
|
||||
}
|
||||
|
||||
type bucketDir struct {
|
||||
files map[string]os.FileInfo
|
||||
root string
|
||||
}
|
||||
|
||||
func (p *bucketDir) getAllFiles(object string, fl os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if fl.Mode().IsRegular() {
|
||||
if strings.HasSuffix(object, "$metadata") {
|
||||
return nil
|
||||
}
|
||||
if strings.HasSuffix(object, "$multiparts") {
|
||||
return nil
|
||||
}
|
||||
matched, err := regexp.MatchString("\\$[0-9].*$", object)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
if matched {
|
||||
return nil
|
||||
}
|
||||
_p := strings.Split(object, p.root+"/")
|
||||
if len(_p) > 1 {
|
||||
p.files[_p[1]] = fl
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func delimiter(object, delimiter string) string {
|
||||
readBuffer := bytes.NewBufferString(object)
|
||||
reader := bufio.NewReader(readBuffer)
|
||||
stringReader := strings.NewReader(delimiter)
|
||||
delimited, _ := stringReader.ReadByte()
|
||||
delimitedStr, _ := reader.ReadString(delimited)
|
||||
return delimitedStr
|
||||
}
|
||||
|
||||
type byObjectKey []drivers.ObjectMetadata
|
||||
|
||||
func (b byObjectKey) Len() int { return len(b) }
|
||||
func (b byObjectKey) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
|
||||
func (b byObjectKey) Less(i, j int) bool { return b[i].Key < b[j].Key }
|
@ -1,88 +0,0 @@
|
||||
/*
|
||||
* Mini Object Storage, (C) 2015 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package filesystem
|
||||
|
||||
import (
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/minio/minio/pkg/iodine"
|
||||
"github.com/minio/minio/pkg/storage/drivers"
|
||||
)
|
||||
|
||||
func (fs *fsDriver) filterObjects(bucket, name string, file os.FileInfo, resources drivers.BucketResourcesMetadata) (drivers.ObjectMetadata, drivers.BucketResourcesMetadata, error) {
|
||||
var err error
|
||||
var metadata drivers.ObjectMetadata
|
||||
|
||||
switch true {
|
||||
// Both delimiter and Prefix is present
|
||||
case resources.IsDelimiterPrefixSet():
|
||||
if strings.HasPrefix(name, resources.Prefix) {
|
||||
trimmedName := strings.TrimPrefix(name, resources.Prefix)
|
||||
delimitedName := delimiter(trimmedName, resources.Delimiter)
|
||||
switch true {
|
||||
case name == resources.Prefix:
|
||||
// Use resources.Prefix to filter out delimited files
|
||||
metadata, err = fs.GetObjectMetadata(bucket, name)
|
||||
if err != nil {
|
||||
return drivers.ObjectMetadata{}, resources, iodine.New(err, nil)
|
||||
}
|
||||
case delimitedName == file.Name():
|
||||
// Use resources.Prefix to filter out delimited files
|
||||
metadata, err = fs.GetObjectMetadata(bucket, name)
|
||||
if err != nil {
|
||||
return drivers.ObjectMetadata{}, resources, iodine.New(err, nil)
|
||||
}
|
||||
case delimitedName != "":
|
||||
resources.CommonPrefixes = appendUniq(resources.CommonPrefixes, resources.Prefix+delimitedName)
|
||||
}
|
||||
}
|
||||
// Delimiter present and Prefix is absent
|
||||
case resources.IsDelimiterSet():
|
||||
delimitedName := delimiter(name, resources.Delimiter)
|
||||
switch true {
|
||||
case delimitedName == "":
|
||||
metadata, err = fs.GetObjectMetadata(bucket, name)
|
||||
if err != nil {
|
||||
return drivers.ObjectMetadata{}, resources, iodine.New(err, nil)
|
||||
}
|
||||
case delimitedName == file.Name():
|
||||
metadata, err = fs.GetObjectMetadata(bucket, name)
|
||||
if err != nil {
|
||||
return drivers.ObjectMetadata{}, resources, iodine.New(err, nil)
|
||||
}
|
||||
case delimitedName != "":
|
||||
resources.CommonPrefixes = appendUniq(resources.CommonPrefixes, delimitedName)
|
||||
}
|
||||
// Delimiter is absent and only Prefix is present
|
||||
case resources.IsPrefixSet():
|
||||
if strings.HasPrefix(name, resources.Prefix) {
|
||||
// Do not strip prefix object output
|
||||
metadata, err = fs.GetObjectMetadata(bucket, name)
|
||||
if err != nil {
|
||||
return drivers.ObjectMetadata{}, resources, iodine.New(err, nil)
|
||||
}
|
||||
}
|
||||
case resources.IsDefault():
|
||||
metadata, err = fs.GetObjectMetadata(bucket, name)
|
||||
if err != nil {
|
||||
return drivers.ObjectMetadata{}, resources, iodine.New(err, nil)
|
||||
}
|
||||
}
|
||||
|
||||
return metadata, resources, nil
|
||||
}
|
@ -1,618 +0,0 @@
|
||||
/*
|
||||
* Mini Object Storage, (C) 2015 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package filesystem
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/md5"
|
||||
"crypto/sha512"
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math/rand"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/minio/minio/pkg/iodine"
|
||||
"github.com/minio/minio/pkg/storage/drivers"
|
||||
)
|
||||
|
||||
// MultipartSession holds active session information
|
||||
type MultipartSession struct {
|
||||
TotalParts int
|
||||
UploadID string
|
||||
Initiated time.Time
|
||||
Parts []*drivers.PartMetadata
|
||||
}
|
||||
|
||||
// Multiparts collection of many parts
|
||||
type Multiparts struct {
|
||||
ActiveSession map[string]*MultipartSession
|
||||
}
|
||||
|
||||
func (fs *fsDriver) loadActiveSessions(bucket string) {
|
||||
bucketPath := filepath.Join(fs.root, bucket)
|
||||
_, err := os.Stat(bucketPath)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
activeSessionFile, err := os.OpenFile(bucketPath+"$activeSession", os.O_RDONLY, 0600)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer activeSessionFile.Close()
|
||||
var deserializedActiveSession map[string]*MultipartSession
|
||||
decoder := json.NewDecoder(activeSessionFile)
|
||||
err = decoder.Decode(&deserializedActiveSession)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
for key, value := range deserializedActiveSession {
|
||||
fs.multiparts.ActiveSession[key] = value
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (fs *fsDriver) isValidUploadID(key, uploadID string) bool {
|
||||
s, ok := fs.multiparts.ActiveSession[key]
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
if uploadID == s.UploadID {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (fs *fsDriver) writePart(objectPath string, partID int, size int64, data io.Reader) (drivers.PartMetadata, error) {
|
||||
partPath := objectPath + fmt.Sprintf("$%d", partID)
|
||||
// write part
|
||||
partFile, err := os.OpenFile(partPath, os.O_WRONLY|os.O_CREATE, 0600)
|
||||
if err != nil {
|
||||
return drivers.PartMetadata{}, iodine.New(err, nil)
|
||||
}
|
||||
defer partFile.Close()
|
||||
|
||||
h := md5.New()
|
||||
mw := io.MultiWriter(partFile, h)
|
||||
|
||||
_, err = io.CopyN(mw, data, size)
|
||||
if err != nil {
|
||||
return drivers.PartMetadata{}, iodine.New(err, nil)
|
||||
}
|
||||
|
||||
fi, err := os.Stat(partPath)
|
||||
if err != nil {
|
||||
return drivers.PartMetadata{}, iodine.New(err, nil)
|
||||
}
|
||||
partMetadata := drivers.PartMetadata{}
|
||||
partMetadata.ETag = hex.EncodeToString(h.Sum(nil))
|
||||
partMetadata.PartNumber = partID
|
||||
partMetadata.Size = fi.Size()
|
||||
partMetadata.LastModified = fi.ModTime()
|
||||
return partMetadata, nil
|
||||
}
|
||||
|
||||
// byKey is a sortable interface for UploadMetadata slice
|
||||
type byKey []*drivers.UploadMetadata
|
||||
|
||||
func (a byKey) Len() int { return len(a) }
|
||||
func (a byKey) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||
func (a byKey) Less(i, j int) bool { return a[i].Key < a[j].Key }
|
||||
|
||||
func (fs *fsDriver) ListMultipartUploads(bucket string, resources drivers.BucketMultipartResourcesMetadata) (drivers.BucketMultipartResourcesMetadata, error) {
|
||||
fs.lock.Lock()
|
||||
defer fs.lock.Unlock()
|
||||
if !drivers.IsValidBucket(bucket) {
|
||||
return drivers.BucketMultipartResourcesMetadata{}, iodine.New(drivers.BucketNameInvalid{Bucket: bucket}, nil)
|
||||
}
|
||||
bucketPath := filepath.Join(fs.root, bucket)
|
||||
_, err := os.Stat(bucketPath)
|
||||
// check bucket exists
|
||||
if os.IsNotExist(err) {
|
||||
return drivers.BucketMultipartResourcesMetadata{}, iodine.New(drivers.BucketNotFound{Bucket: bucket}, nil)
|
||||
}
|
||||
if err != nil {
|
||||
return drivers.BucketMultipartResourcesMetadata{}, iodine.New(drivers.InternalError{}, nil)
|
||||
}
|
||||
// load from disk
|
||||
fs.loadActiveSessions(bucket)
|
||||
|
||||
var uploads []*drivers.UploadMetadata
|
||||
for key, session := range fs.multiparts.ActiveSession {
|
||||
if strings.HasPrefix(key, resources.Prefix) {
|
||||
if len(uploads) > resources.MaxUploads {
|
||||
sort.Sort(byKey(uploads))
|
||||
resources.Upload = uploads
|
||||
resources.NextKeyMarker = key
|
||||
resources.NextUploadIDMarker = session.UploadID
|
||||
resources.IsTruncated = true
|
||||
return resources, nil
|
||||
}
|
||||
// uploadIDMarker is ignored if KeyMarker is empty
|
||||
switch {
|
||||
case resources.KeyMarker != "" && resources.UploadIDMarker == "":
|
||||
if key > resources.KeyMarker {
|
||||
upload := new(drivers.UploadMetadata)
|
||||
upload.Key = key
|
||||
upload.UploadID = session.UploadID
|
||||
upload.Initiated = session.Initiated
|
||||
uploads = append(uploads, upload)
|
||||
}
|
||||
case resources.KeyMarker != "" && resources.UploadIDMarker != "":
|
||||
if session.UploadID > resources.UploadIDMarker {
|
||||
if key >= resources.KeyMarker {
|
||||
upload := new(drivers.UploadMetadata)
|
||||
upload.Key = key
|
||||
upload.UploadID = session.UploadID
|
||||
upload.Initiated = session.Initiated
|
||||
uploads = append(uploads, upload)
|
||||
}
|
||||
}
|
||||
default:
|
||||
upload := new(drivers.UploadMetadata)
|
||||
upload.Key = key
|
||||
upload.UploadID = session.UploadID
|
||||
upload.Initiated = session.Initiated
|
||||
uploads = append(uploads, upload)
|
||||
}
|
||||
}
|
||||
}
|
||||
sort.Sort(byKey(uploads))
|
||||
resources.Upload = uploads
|
||||
return resources, nil
|
||||
}
|
||||
|
||||
func (fs *fsDriver) concatParts(parts map[int]string, objectPath string, mw io.Writer) error {
|
||||
for i := 1; i <= len(parts); i++ {
|
||||
recvMD5 := parts[i]
|
||||
partFile, err := os.OpenFile(objectPath+fmt.Sprintf("$%d", i), os.O_RDONLY, 0600)
|
||||
if err != nil {
|
||||
return iodine.New(err, nil)
|
||||
}
|
||||
obj, err := ioutil.ReadAll(partFile)
|
||||
if err != nil {
|
||||
return iodine.New(err, nil)
|
||||
}
|
||||
calcMD5Bytes := md5.Sum(obj)
|
||||
// complete multi part request header md5sum per part is hex encoded
|
||||
recvMD5Bytes, err := hex.DecodeString(strings.Trim(recvMD5, "\""))
|
||||
if err != nil {
|
||||
return iodine.New(drivers.InvalidDigest{Md5: recvMD5}, nil)
|
||||
}
|
||||
if !bytes.Equal(recvMD5Bytes, calcMD5Bytes[:]) {
|
||||
return iodine.New(drivers.BadDigest{Md5: recvMD5}, nil)
|
||||
}
|
||||
_, err = io.Copy(mw, bytes.NewBuffer(obj))
|
||||
if err != nil {
|
||||
return iodine.New(err, nil)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (fs *fsDriver) NewMultipartUpload(bucket, key, contentType string) (string, error) {
|
||||
fs.lock.Lock()
|
||||
defer fs.lock.Unlock()
|
||||
if !drivers.IsValidBucket(bucket) {
|
||||
return "", iodine.New(drivers.BucketNameInvalid{Bucket: bucket}, nil)
|
||||
}
|
||||
if !drivers.IsValidObjectName(key) {
|
||||
return "", iodine.New(drivers.ObjectNameInvalid{Object: key}, nil)
|
||||
}
|
||||
|
||||
bucketPath := filepath.Join(fs.root, bucket)
|
||||
_, err := os.Stat(bucketPath)
|
||||
// check bucket exists
|
||||
if os.IsNotExist(err) {
|
||||
return "", iodine.New(drivers.BucketNotFound{Bucket: bucket}, nil)
|
||||
}
|
||||
if err != nil {
|
||||
return "", iodine.New(drivers.InternalError{}, nil)
|
||||
}
|
||||
objectPath := filepath.Join(bucketPath, key)
|
||||
objectDir := filepath.Dir(objectPath)
|
||||
if _, err := os.Stat(objectDir); os.IsNotExist(err) {
|
||||
err = os.MkdirAll(objectDir, 0700)
|
||||
if err != nil {
|
||||
return "", iodine.New(err, nil)
|
||||
}
|
||||
}
|
||||
|
||||
// check if object exists
|
||||
if _, err := os.Stat(objectPath); !os.IsNotExist(err) {
|
||||
return "", iodine.New(drivers.ObjectExists{
|
||||
Bucket: bucket,
|
||||
Object: key,
|
||||
}, nil)
|
||||
}
|
||||
|
||||
var activeSessionFile *os.File
|
||||
_, err = os.Stat(bucketPath + "$activeSession")
|
||||
switch {
|
||||
case os.IsNotExist(err):
|
||||
activeSessionFile, err = os.OpenFile(bucketPath+"$activeSession", os.O_WRONLY|os.O_CREATE, 0600)
|
||||
if err != nil {
|
||||
return "", iodine.New(err, nil)
|
||||
}
|
||||
default:
|
||||
activeSessionFile, err = os.OpenFile(bucketPath+"$activeSession", os.O_WRONLY|os.O_APPEND, 0600)
|
||||
if err != nil {
|
||||
return "", iodine.New(err, nil)
|
||||
}
|
||||
}
|
||||
defer activeSessionFile.Close()
|
||||
|
||||
id := []byte(strconv.FormatInt(rand.Int63(), 10) + bucket + key + time.Now().String())
|
||||
uploadIDSum := sha512.Sum512(id)
|
||||
uploadID := base64.URLEncoding.EncodeToString(uploadIDSum[:])[:47]
|
||||
|
||||
multiPartfile, err := os.OpenFile(objectPath+"$multiparts", os.O_WRONLY|os.O_CREATE, 0600)
|
||||
if err != nil {
|
||||
return "", iodine.New(err, nil)
|
||||
}
|
||||
defer multiPartfile.Close()
|
||||
|
||||
mpartSession := new(MultipartSession)
|
||||
mpartSession.TotalParts = 0
|
||||
mpartSession.UploadID = uploadID
|
||||
mpartSession.Initiated = time.Now().UTC()
|
||||
var parts []*drivers.PartMetadata
|
||||
mpartSession.Parts = parts
|
||||
fs.multiparts.ActiveSession[key] = mpartSession
|
||||
|
||||
encoder := json.NewEncoder(multiPartfile)
|
||||
err = encoder.Encode(mpartSession)
|
||||
if err != nil {
|
||||
return "", iodine.New(err, nil)
|
||||
}
|
||||
encoder = json.NewEncoder(activeSessionFile)
|
||||
err = encoder.Encode(fs.multiparts.ActiveSession)
|
||||
if err != nil {
|
||||
return "", iodine.New(err, nil)
|
||||
}
|
||||
|
||||
return uploadID, nil
|
||||
}
|
||||
|
||||
// partNumber is a sortable interface for Part slice
|
||||
type partNumber []*drivers.PartMetadata
|
||||
|
||||
func (a partNumber) Len() int { return len(a) }
|
||||
func (a partNumber) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||
func (a partNumber) Less(i, j int) bool { return a[i].PartNumber < a[j].PartNumber }
|
||||
|
||||
func (fs *fsDriver) CreateObjectPart(bucket, key, uploadID string, partID int, contentType, expectedMD5Sum string, size int64, data io.Reader) (string, error) {
|
||||
fs.lock.Lock()
|
||||
defer fs.lock.Unlock()
|
||||
|
||||
if partID <= 0 {
|
||||
return "", iodine.New(errors.New("invalid part id, cannot be zero or less than zero"), nil)
|
||||
}
|
||||
// check bucket name valid
|
||||
if drivers.IsValidBucket(bucket) == false {
|
||||
return "", iodine.New(drivers.BucketNameInvalid{Bucket: bucket}, nil)
|
||||
}
|
||||
|
||||
// verify object path legal
|
||||
if drivers.IsValidObjectName(key) == false {
|
||||
return "", iodine.New(drivers.ObjectNameInvalid{Bucket: bucket, Object: key}, nil)
|
||||
}
|
||||
|
||||
if !fs.isValidUploadID(key, uploadID) {
|
||||
return "", iodine.New(drivers.InvalidUploadID{UploadID: uploadID}, nil)
|
||||
}
|
||||
|
||||
if strings.TrimSpace(expectedMD5Sum) != "" {
|
||||
expectedMD5SumBytes, err := base64.StdEncoding.DecodeString(strings.TrimSpace(expectedMD5Sum))
|
||||
if err != nil {
|
||||
// pro-actively close the connection
|
||||
return "", iodine.New(drivers.InvalidDigest{Md5: expectedMD5Sum}, nil)
|
||||
}
|
||||
expectedMD5Sum = hex.EncodeToString(expectedMD5SumBytes)
|
||||
}
|
||||
|
||||
bucketPath := filepath.Join(fs.root, bucket)
|
||||
_, err := os.Stat(bucketPath)
|
||||
|
||||
// check bucket exists
|
||||
if os.IsNotExist(err) {
|
||||
return "", iodine.New(drivers.BucketNotFound{Bucket: bucket}, nil)
|
||||
}
|
||||
if err != nil {
|
||||
return "", iodine.New(drivers.InternalError{}, nil)
|
||||
}
|
||||
|
||||
objectPath := filepath.Join(bucketPath, key)
|
||||
objectDir := filepath.Dir(objectPath)
|
||||
if _, err := os.Stat(objectDir); os.IsNotExist(err) {
|
||||
err = os.MkdirAll(objectDir, 0700)
|
||||
if err != nil {
|
||||
return "", iodine.New(err, nil)
|
||||
}
|
||||
}
|
||||
|
||||
// check if object exists
|
||||
if _, err := os.Stat(objectPath); !os.IsNotExist(err) {
|
||||
return "", iodine.New(drivers.ObjectExists{
|
||||
Bucket: bucket,
|
||||
Object: key,
|
||||
}, nil)
|
||||
}
|
||||
partMetadata, err := fs.writePart(objectPath, partID, size, data)
|
||||
if err != nil {
|
||||
return "", iodine.New(err, nil)
|
||||
}
|
||||
|
||||
// Verify if the written object is equal to what is expected, only if it is requested as such
|
||||
if strings.TrimSpace(expectedMD5Sum) != "" {
|
||||
if err := isMD5SumEqual(strings.TrimSpace(expectedMD5Sum), partMetadata.ETag); err != nil {
|
||||
return "", iodine.New(drivers.BadDigest{Md5: expectedMD5Sum, Bucket: bucket, Key: key}, nil)
|
||||
}
|
||||
}
|
||||
|
||||
multiPartfile, err := os.OpenFile(objectPath+"$multiparts", os.O_RDWR|os.O_APPEND, 0600)
|
||||
if err != nil {
|
||||
return "", iodine.New(err, nil)
|
||||
}
|
||||
defer multiPartfile.Close()
|
||||
|
||||
var deserializedMultipartSession MultipartSession
|
||||
decoder := json.NewDecoder(multiPartfile)
|
||||
err = decoder.Decode(&deserializedMultipartSession)
|
||||
if err != nil {
|
||||
return "", iodine.New(err, nil)
|
||||
}
|
||||
deserializedMultipartSession.Parts = append(deserializedMultipartSession.Parts, &partMetadata)
|
||||
deserializedMultipartSession.TotalParts++
|
||||
fs.multiparts.ActiveSession[key] = &deserializedMultipartSession
|
||||
|
||||
sort.Sort(partNumber(deserializedMultipartSession.Parts))
|
||||
encoder := json.NewEncoder(multiPartfile)
|
||||
err = encoder.Encode(&deserializedMultipartSession)
|
||||
if err != nil {
|
||||
return "", iodine.New(err, nil)
|
||||
}
|
||||
return partMetadata.ETag, nil
|
||||
}
|
||||
|
||||
func (fs *fsDriver) CompleteMultipartUpload(bucket, key, uploadID string, parts map[int]string) (string, error) {
|
||||
fs.lock.Lock()
|
||||
defer fs.lock.Unlock()
|
||||
|
||||
// check bucket name valid
|
||||
if drivers.IsValidBucket(bucket) == false {
|
||||
return "", iodine.New(drivers.BucketNameInvalid{Bucket: bucket}, nil)
|
||||
}
|
||||
|
||||
// verify object path legal
|
||||
if drivers.IsValidObjectName(key) == false {
|
||||
return "", iodine.New(drivers.ObjectNameInvalid{Bucket: bucket, Object: key}, nil)
|
||||
}
|
||||
|
||||
if !fs.isValidUploadID(key, uploadID) {
|
||||
return "", iodine.New(drivers.InvalidUploadID{UploadID: uploadID}, nil)
|
||||
}
|
||||
|
||||
bucketPath := filepath.Join(fs.root, bucket)
|
||||
_, err := os.Stat(bucketPath)
|
||||
// check bucket exists
|
||||
if os.IsNotExist(err) {
|
||||
return "", iodine.New(drivers.BucketNotFound{Bucket: bucket}, nil)
|
||||
}
|
||||
if err != nil {
|
||||
return "", iodine.New(drivers.InternalError{}, nil)
|
||||
}
|
||||
|
||||
objectPath := filepath.Join(bucketPath, key)
|
||||
// check if object exists
|
||||
if _, err := os.Stat(objectPath); !os.IsNotExist(err) {
|
||||
return "", iodine.New(drivers.ObjectExists{
|
||||
Bucket: bucket,
|
||||
Object: key,
|
||||
}, nil)
|
||||
}
|
||||
|
||||
file, err := os.OpenFile(objectPath, os.O_WRONLY|os.O_CREATE, 0600)
|
||||
if err != nil {
|
||||
return "", iodine.New(err, nil)
|
||||
}
|
||||
defer file.Close()
|
||||
h := md5.New()
|
||||
mw := io.MultiWriter(file, h)
|
||||
err = fs.concatParts(parts, objectPath, mw)
|
||||
if err != nil {
|
||||
return "", iodine.New(err, nil)
|
||||
}
|
||||
md5sum := hex.EncodeToString(h.Sum(nil))
|
||||
|
||||
delete(fs.multiparts.ActiveSession, key)
|
||||
for partNumber := range parts {
|
||||
err = os.Remove(objectPath + fmt.Sprintf("$%d", partNumber))
|
||||
if err != nil {
|
||||
return "", iodine.New(err, nil)
|
||||
}
|
||||
}
|
||||
err = os.Remove(objectPath + "$multiparts")
|
||||
if err != nil {
|
||||
return "", iodine.New(err, nil)
|
||||
}
|
||||
|
||||
file, err = os.OpenFile(objectPath+"$metadata", os.O_WRONLY|os.O_CREATE, 0600)
|
||||
if err != nil {
|
||||
return "", iodine.New(err, nil)
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
metadata := &Metadata{
|
||||
ContentType: "application/octet-stream",
|
||||
Md5sum: h.Sum(nil),
|
||||
}
|
||||
// serialize metadata to json
|
||||
encoder := json.NewEncoder(file)
|
||||
err = encoder.Encode(metadata)
|
||||
if err != nil {
|
||||
return "", iodine.New(err, nil)
|
||||
}
|
||||
|
||||
activeSessionFile, err := os.OpenFile(bucketPath+"$activeSession", os.O_WRONLY|os.O_TRUNC, 0600)
|
||||
if err != nil {
|
||||
return "", iodine.New(err, nil)
|
||||
}
|
||||
defer activeSessionFile.Close()
|
||||
encoder = json.NewEncoder(activeSessionFile)
|
||||
err = encoder.Encode(fs.multiparts.ActiveSession)
|
||||
if err != nil {
|
||||
return "", iodine.New(err, nil)
|
||||
}
|
||||
return md5sum, nil
|
||||
}
|
||||
|
||||
func (fs *fsDriver) ListObjectParts(bucket, key string, resources drivers.ObjectResourcesMetadata) (drivers.ObjectResourcesMetadata, error) {
|
||||
fs.lock.Lock()
|
||||
defer fs.lock.Unlock()
|
||||
|
||||
// load from disk
|
||||
fs.loadActiveSessions(bucket)
|
||||
|
||||
// check bucket name valid
|
||||
if drivers.IsValidBucket(bucket) == false {
|
||||
return drivers.ObjectResourcesMetadata{}, iodine.New(drivers.BucketNameInvalid{Bucket: bucket}, nil)
|
||||
}
|
||||
|
||||
// verify object path legal
|
||||
if drivers.IsValidObjectName(key) == false {
|
||||
return drivers.ObjectResourcesMetadata{}, iodine.New(drivers.ObjectNameInvalid{Bucket: bucket, Object: key}, nil)
|
||||
}
|
||||
|
||||
if !fs.isValidUploadID(key, resources.UploadID) {
|
||||
return drivers.ObjectResourcesMetadata{}, iodine.New(drivers.InvalidUploadID{UploadID: resources.UploadID}, nil)
|
||||
}
|
||||
|
||||
objectResourcesMetadata := resources
|
||||
objectResourcesMetadata.Bucket = bucket
|
||||
objectResourcesMetadata.Key = key
|
||||
var startPartNumber int
|
||||
switch {
|
||||
case objectResourcesMetadata.PartNumberMarker == 0:
|
||||
startPartNumber = 1
|
||||
default:
|
||||
startPartNumber = objectResourcesMetadata.PartNumberMarker
|
||||
}
|
||||
|
||||
bucketPath := filepath.Join(fs.root, bucket)
|
||||
_, err := os.Stat(bucketPath)
|
||||
// check bucket exists
|
||||
if os.IsNotExist(err) {
|
||||
return drivers.ObjectResourcesMetadata{}, iodine.New(drivers.BucketNotFound{Bucket: bucket}, nil)
|
||||
}
|
||||
if err != nil {
|
||||
return drivers.ObjectResourcesMetadata{}, iodine.New(drivers.InternalError{}, nil)
|
||||
}
|
||||
|
||||
objectPath := filepath.Join(bucketPath, key)
|
||||
multiPartfile, err := os.OpenFile(objectPath+"$multiparts", os.O_RDONLY, 0600)
|
||||
if err != nil {
|
||||
return drivers.ObjectResourcesMetadata{}, iodine.New(err, nil)
|
||||
}
|
||||
defer multiPartfile.Close()
|
||||
|
||||
var deserializedMultipartSession MultipartSession
|
||||
decoder := json.NewDecoder(multiPartfile)
|
||||
err = decoder.Decode(&deserializedMultipartSession)
|
||||
if err != nil {
|
||||
return drivers.ObjectResourcesMetadata{}, iodine.New(err, nil)
|
||||
}
|
||||
var parts []*drivers.PartMetadata
|
||||
for i := startPartNumber; i <= deserializedMultipartSession.TotalParts; i++ {
|
||||
if len(parts) > objectResourcesMetadata.MaxParts {
|
||||
sort.Sort(partNumber(parts))
|
||||
objectResourcesMetadata.IsTruncated = true
|
||||
objectResourcesMetadata.Part = parts
|
||||
objectResourcesMetadata.NextPartNumberMarker = i
|
||||
return objectResourcesMetadata, nil
|
||||
}
|
||||
parts = append(parts, deserializedMultipartSession.Parts[i-1])
|
||||
}
|
||||
sort.Sort(partNumber(parts))
|
||||
objectResourcesMetadata.Part = parts
|
||||
return objectResourcesMetadata, nil
|
||||
}
|
||||
|
||||
func (fs *fsDriver) AbortMultipartUpload(bucket, key, uploadID string) error {
|
||||
fs.lock.Lock()
|
||||
defer fs.lock.Unlock()
|
||||
|
||||
// check bucket name valid
|
||||
if drivers.IsValidBucket(bucket) == false {
|
||||
return iodine.New(drivers.BucketNameInvalid{Bucket: bucket}, nil)
|
||||
}
|
||||
|
||||
// verify object path legal
|
||||
if drivers.IsValidObjectName(key) == false {
|
||||
return iodine.New(drivers.ObjectNameInvalid{Bucket: bucket, Object: key}, nil)
|
||||
}
|
||||
|
||||
if !fs.isValidUploadID(key, uploadID) {
|
||||
return iodine.New(drivers.InvalidUploadID{UploadID: uploadID}, nil)
|
||||
}
|
||||
|
||||
bucketPath := filepath.Join(fs.root, bucket)
|
||||
_, err := os.Stat(bucketPath)
|
||||
// check bucket exists
|
||||
if os.IsNotExist(err) {
|
||||
return iodine.New(drivers.BucketNotFound{Bucket: bucket}, nil)
|
||||
}
|
||||
if err != nil {
|
||||
return iodine.New(drivers.InternalError{}, nil)
|
||||
}
|
||||
|
||||
objectPath := filepath.Join(bucketPath, key)
|
||||
multiPartfile, err := os.OpenFile(objectPath+"$multiparts", os.O_RDWR, 0600)
|
||||
if err != nil {
|
||||
return iodine.New(err, nil)
|
||||
}
|
||||
|
||||
var deserializedMultipartSession MultipartSession
|
||||
decoder := json.NewDecoder(multiPartfile)
|
||||
err = decoder.Decode(&deserializedMultipartSession)
|
||||
if err != nil {
|
||||
return iodine.New(err, nil)
|
||||
}
|
||||
multiPartfile.Close() // close it right here, since we will delete it subsequently
|
||||
|
||||
delete(fs.multiparts.ActiveSession, key)
|
||||
for _, part := range deserializedMultipartSession.Parts {
|
||||
err = os.RemoveAll(objectPath + fmt.Sprintf("$%d", part.PartNumber))
|
||||
if err != nil {
|
||||
return iodine.New(err, nil)
|
||||
}
|
||||
}
|
||||
err = os.RemoveAll(objectPath + "$multiparts")
|
||||
if err != nil {
|
||||
return iodine.New(err, nil)
|
||||
}
|
||||
return nil
|
||||
}
|
@ -1,295 +0,0 @@
|
||||
/*
|
||||
* Mini Object Storage, (C) 2015 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package filesystem
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"crypto/md5"
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
|
||||
"github.com/minio/minio/pkg/iodine"
|
||||
"github.com/minio/minio/pkg/storage/drivers"
|
||||
)
|
||||
|
||||
/// Object Operations
|
||||
|
||||
// GetPartialObject - GET object from range
|
||||
func (fs *fsDriver) GetPartialObject(w io.Writer, bucket, object string, start, length int64) (int64, error) {
|
||||
// validate bucket
|
||||
if drivers.IsValidBucket(bucket) == false {
|
||||
return 0, iodine.New(drivers.BucketNameInvalid{Bucket: bucket}, nil)
|
||||
}
|
||||
|
||||
// validate object
|
||||
if drivers.IsValidObjectName(object) == false {
|
||||
return 0, iodine.New(drivers.ObjectNameInvalid{Bucket: bucket, Object: object}, nil)
|
||||
}
|
||||
|
||||
objectPath := filepath.Join(fs.root, bucket, object)
|
||||
filestat, err := os.Stat(objectPath)
|
||||
switch err := err.(type) {
|
||||
case nil:
|
||||
{
|
||||
if filestat.IsDir() {
|
||||
return 0, iodine.New(drivers.ObjectNotFound{Bucket: bucket, Object: object}, nil)
|
||||
}
|
||||
}
|
||||
default:
|
||||
{
|
||||
if os.IsNotExist(err) {
|
||||
return 0, iodine.New(drivers.ObjectNotFound{Bucket: bucket, Object: object}, nil)
|
||||
}
|
||||
return 0, iodine.New(err, nil)
|
||||
}
|
||||
}
|
||||
file, err := os.Open(objectPath)
|
||||
if err != nil {
|
||||
return 0, iodine.New(err, nil)
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
_, err = file.Seek(start, os.SEEK_SET)
|
||||
if err != nil {
|
||||
return 0, iodine.New(err, nil)
|
||||
}
|
||||
|
||||
count, err := io.CopyN(w, file, length)
|
||||
if err != nil {
|
||||
return count, iodine.New(err, nil)
|
||||
}
|
||||
|
||||
return count, nil
|
||||
}
|
||||
|
||||
// GetObject - GET object from key
|
||||
func (fs *fsDriver) GetObject(w io.Writer, bucket string, object string) (int64, error) {
|
||||
// validate bucket
|
||||
if drivers.IsValidBucket(bucket) == false {
|
||||
return 0, iodine.New(drivers.BucketNameInvalid{Bucket: bucket}, nil)
|
||||
}
|
||||
|
||||
// validate object
|
||||
if drivers.IsValidObjectName(object) == false {
|
||||
return 0, iodine.New(drivers.ObjectNameInvalid{Bucket: bucket, Object: object}, nil)
|
||||
}
|
||||
objectPath := filepath.Join(fs.root, bucket, object)
|
||||
filestat, err := os.Stat(objectPath)
|
||||
switch err := err.(type) {
|
||||
case nil:
|
||||
{
|
||||
if filestat.IsDir() {
|
||||
return 0, iodine.New(drivers.ObjectNotFound{Bucket: bucket, Object: object}, nil)
|
||||
}
|
||||
}
|
||||
default:
|
||||
{
|
||||
if os.IsNotExist(err) {
|
||||
return 0, iodine.New(drivers.ObjectNotFound{Bucket: bucket, Object: object}, nil)
|
||||
}
|
||||
return 0, iodine.New(err, nil)
|
||||
}
|
||||
}
|
||||
file, err := os.Open(objectPath)
|
||||
defer file.Close()
|
||||
if err != nil {
|
||||
return 0, drivers.EmbedError(bucket, object, err)
|
||||
}
|
||||
|
||||
count, err := io.Copy(w, file)
|
||||
if err != nil {
|
||||
return count, iodine.New(err, nil)
|
||||
}
|
||||
return count, nil
|
||||
}
|
||||
|
||||
// GetObjectMetadata - HEAD object
|
||||
func (fs *fsDriver) GetObjectMetadata(bucket, object string) (drivers.ObjectMetadata, error) {
|
||||
if drivers.IsValidBucket(bucket) == false {
|
||||
return drivers.ObjectMetadata{}, iodine.New(drivers.BucketNameInvalid{Bucket: bucket}, nil)
|
||||
}
|
||||
|
||||
if drivers.IsValidObjectName(object) == false {
|
||||
return drivers.ObjectMetadata{}, iodine.New(drivers.ObjectNameInvalid{Bucket: bucket, Object: bucket}, nil)
|
||||
}
|
||||
|
||||
// Do not use filepath.Join() since filepath.Join strips off any object names with '/', use them as is
|
||||
// in a static manner so that we can send a proper 'ObjectNotFound' reply back upon os.Stat()
|
||||
objectPath := fs.root + "/" + bucket + "/" + object
|
||||
stat, err := os.Stat(objectPath)
|
||||
if os.IsNotExist(err) {
|
||||
return drivers.ObjectMetadata{}, iodine.New(drivers.ObjectNotFound{Bucket: bucket, Object: object}, nil)
|
||||
}
|
||||
|
||||
_, err = os.Stat(objectPath + "$metadata")
|
||||
if os.IsNotExist(err) {
|
||||
return drivers.ObjectMetadata{}, iodine.New(drivers.ObjectNotFound{Bucket: bucket, Object: object}, nil)
|
||||
}
|
||||
|
||||
file, err := os.Open(objectPath + "$metadata")
|
||||
defer file.Close()
|
||||
if err != nil {
|
||||
return drivers.ObjectMetadata{}, iodine.New(err, nil)
|
||||
}
|
||||
|
||||
var deserializedMetadata Metadata
|
||||
decoder := json.NewDecoder(file)
|
||||
err = decoder.Decode(&deserializedMetadata)
|
||||
if err != nil {
|
||||
return drivers.ObjectMetadata{}, iodine.New(err, nil)
|
||||
}
|
||||
|
||||
contentType := "application/octet-stream"
|
||||
if deserializedMetadata.ContentType != "" {
|
||||
contentType = deserializedMetadata.ContentType
|
||||
}
|
||||
contentType = strings.TrimSpace(contentType)
|
||||
|
||||
etag := bucket + "#" + filepath.Base(object)
|
||||
if len(deserializedMetadata.Md5sum) != 0 {
|
||||
etag = hex.EncodeToString(deserializedMetadata.Md5sum)
|
||||
}
|
||||
|
||||
metadata := drivers.ObjectMetadata{
|
||||
Bucket: bucket,
|
||||
Key: object,
|
||||
Created: stat.ModTime(),
|
||||
Size: stat.Size(),
|
||||
Md5: etag,
|
||||
ContentType: contentType,
|
||||
}
|
||||
|
||||
return metadata, nil
|
||||
}
|
||||
|
||||
// isMD5SumEqual - returns error if md5sum mismatches, success its `nil`
|
||||
func isMD5SumEqual(expectedMD5Sum, actualMD5Sum string) error {
|
||||
if strings.TrimSpace(expectedMD5Sum) != "" && strings.TrimSpace(actualMD5Sum) != "" {
|
||||
expectedMD5SumBytes, err := hex.DecodeString(expectedMD5Sum)
|
||||
if err != nil {
|
||||
return iodine.New(err, nil)
|
||||
}
|
||||
actualMD5SumBytes, err := hex.DecodeString(actualMD5Sum)
|
||||
if err != nil {
|
||||
return iodine.New(err, nil)
|
||||
}
|
||||
if !bytes.Equal(expectedMD5SumBytes, actualMD5SumBytes) {
|
||||
return iodine.New(errors.New("bad digest, md5sum mismatch"), nil)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
return iodine.New(errors.New("invalid argument"), nil)
|
||||
}
|
||||
|
||||
// CreateObject - PUT object
|
||||
func (fs *fsDriver) CreateObject(bucket, key, contentType, expectedMD5Sum string, size int64, data io.Reader) (string, error) {
|
||||
fs.lock.Lock()
|
||||
defer fs.lock.Unlock()
|
||||
|
||||
// check bucket name valid
|
||||
if drivers.IsValidBucket(bucket) == false {
|
||||
return "", iodine.New(drivers.BucketNameInvalid{Bucket: bucket}, nil)
|
||||
}
|
||||
|
||||
// check bucket exists
|
||||
if _, err := os.Stat(filepath.Join(fs.root, bucket)); os.IsNotExist(err) {
|
||||
return "", iodine.New(drivers.BucketNotFound{Bucket: bucket}, nil)
|
||||
}
|
||||
|
||||
// verify object path legal
|
||||
if drivers.IsValidObjectName(key) == false {
|
||||
return "", iodine.New(drivers.ObjectNameInvalid{Bucket: bucket, Object: key}, nil)
|
||||
}
|
||||
|
||||
// verify content type
|
||||
if contentType == "" {
|
||||
contentType = "application/octet-stream"
|
||||
}
|
||||
contentType = strings.TrimSpace(contentType)
|
||||
|
||||
// get object path
|
||||
objectPath := filepath.Join(fs.root, bucket, key)
|
||||
objectDir := filepath.Dir(objectPath)
|
||||
if _, err := os.Stat(objectDir); os.IsNotExist(err) {
|
||||
err = os.MkdirAll(objectDir, 0700)
|
||||
if err != nil {
|
||||
return "", iodine.New(err, nil)
|
||||
}
|
||||
}
|
||||
|
||||
// check if object exists
|
||||
if _, err := os.Stat(objectPath); !os.IsNotExist(err) {
|
||||
return "", iodine.New(drivers.ObjectExists{
|
||||
Bucket: bucket,
|
||||
Object: key,
|
||||
}, nil)
|
||||
}
|
||||
|
||||
if strings.TrimSpace(expectedMD5Sum) != "" {
|
||||
expectedMD5SumBytes, err := base64.StdEncoding.DecodeString(strings.TrimSpace(expectedMD5Sum))
|
||||
if err != nil {
|
||||
// pro-actively close the connection
|
||||
return "", iodine.New(drivers.InvalidDigest{Md5: expectedMD5Sum}, nil)
|
||||
}
|
||||
expectedMD5Sum = hex.EncodeToString(expectedMD5SumBytes)
|
||||
}
|
||||
|
||||
// write object
|
||||
file, err := os.OpenFile(objectPath, os.O_WRONLY|os.O_CREATE, 0600)
|
||||
if err != nil {
|
||||
return "", iodine.New(err, nil)
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
h := md5.New()
|
||||
mw := io.MultiWriter(file, h)
|
||||
|
||||
_, err = io.CopyN(mw, data, size)
|
||||
if err != nil {
|
||||
return "", iodine.New(err, nil)
|
||||
}
|
||||
|
||||
file, err = os.OpenFile(objectPath+"$metadata", os.O_WRONLY|os.O_CREATE, 0600)
|
||||
if err != nil {
|
||||
return "", iodine.New(err, nil)
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
metadata := &Metadata{
|
||||
ContentType: contentType,
|
||||
Md5sum: h.Sum(nil),
|
||||
}
|
||||
// serialize metadata to json
|
||||
encoder := json.NewEncoder(file)
|
||||
err = encoder.Encode(metadata)
|
||||
|
||||
md5Sum := hex.EncodeToString(metadata.Md5sum)
|
||||
// Verify if the written object is equal to what is expected, only if it is requested as such
|
||||
if strings.TrimSpace(expectedMD5Sum) != "" {
|
||||
if err := isMD5SumEqual(strings.TrimSpace(expectedMD5Sum), md5Sum); err != nil {
|
||||
return "", iodine.New(drivers.BadDigest{Md5: expectedMD5Sum, Bucket: bucket, Key: key}, nil)
|
||||
}
|
||||
}
|
||||
return md5Sum, nil
|
||||
}
|
@ -1,54 +0,0 @@
|
||||
/*
|
||||
* Mini Object Storage, (C) 2015 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package filesystem
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
. "github.com/minio/check"
|
||||
|
||||
"github.com/minio/minio/pkg/storage/drivers"
|
||||
)
|
||||
|
||||
func Test(t *testing.T) { TestingT(t) }
|
||||
|
||||
type MySuite struct{}
|
||||
|
||||
var _ = Suite(&MySuite{})
|
||||
|
||||
func (s *MySuite) TestAPISuite(c *C) {
|
||||
var storageList []string
|
||||
create := func() drivers.Driver {
|
||||
path, err := ioutil.TempDir(os.TempDir(), "minio-fs-")
|
||||
c.Check(err, IsNil)
|
||||
storageList = append(storageList, path)
|
||||
store, err := NewDriver(path)
|
||||
c.Check(err, IsNil)
|
||||
return store
|
||||
}
|
||||
drivers.APITestSuite(c, create)
|
||||
defer removeRoots(c, storageList)
|
||||
}
|
||||
|
||||
func removeRoots(c *C, roots []string) {
|
||||
for _, root := range roots {
|
||||
err := os.RemoveAll(root)
|
||||
c.Check(err, IsNil)
|
||||
}
|
||||
}
|
Loading…
Reference in New Issue
Block a user