mirror of
https://github.com/minio/minio.git
synced 2025-11-10 05:59:43 -05:00
fs: Break fs package to top-level and introduce ObjectAPI interface.
ObjectAPI interface brings in changes needed for XL ObjectAPI layer.
The new interface for any ObjectAPI layer is as below
```
// ObjectAPI interface.
type ObjectAPI interface {
// Bucket resource API.
DeleteBucket(bucket string) *probe.Error
ListBuckets() ([]BucketInfo, *probe.Error)
MakeBucket(bucket string) *probe.Error
GetBucketInfo(bucket string) (BucketInfo, *probe.Error)
// Bucket query API.
ListObjects(bucket, prefix, marker, delimiter string, maxKeys int) (ListObjectsResult, *probe.Error)
ListMultipartUploads(bucket string, resources BucketMultipartResourcesMetadata) (BucketMultipartResourcesMetadata, *probe.Error)
// Object resource API.
GetObject(bucket, object string, startOffset int64) (io.ReadCloser, *probe.Error)
GetObjectInfo(bucket, object string) (ObjectInfo, *probe.Error)
PutObject(bucket string, object string, size int64, data io.Reader, metadata map[string]string) (ObjectInfo, *probe.Error)
DeleteObject(bucket, object string) *probe.Error
// Object query API.
NewMultipartUpload(bucket, object string) (string, *probe.Error)
PutObjectPart(bucket, object, uploadID string, partID int, size int64, data io.Reader, md5Hex string) (string, *probe.Error)
ListObjectParts(bucket, object string, resources ObjectResourcesMetadata) (ObjectResourcesMetadata, *probe.Error)
CompleteMultipartUpload(bucket string, object string, uploadID string, parts []CompletePart) (ObjectInfo, *probe.Error)
AbortMultipartUpload(bucket, object, uploadID string) *probe.Error
}
```
This commit is contained in:
committed by
Harshavardhana
parent
272c5165aa
commit
efc80343e3
@@ -1,422 +0,0 @@
|
||||
/*
|
||||
* Minimalist Object Storage, (C) 2015, 2016 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package fs
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/md5"
|
||||
"encoding/hex"
|
||||
"encoding/xml"
|
||||
"math/rand"
|
||||
"strconv"
|
||||
|
||||
"gopkg.in/check.v1"
|
||||
)
|
||||
|
||||
// APITestSuite - collection of API tests
|
||||
func APITestSuite(c *check.C, create func() Filesystem) {
|
||||
testMakeBucket(c, create)
|
||||
testMultipleObjectCreation(c, create)
|
||||
testPaging(c, create)
|
||||
testObjectOverwriteWorks(c, create)
|
||||
testNonExistantBucketOperations(c, create)
|
||||
testBucketRecreateFails(c, create)
|
||||
testPutObjectInSubdir(c, create)
|
||||
testListBuckets(c, create)
|
||||
testListBucketsOrder(c, create)
|
||||
testListObjectsTestsForNonExistantBucket(c, create)
|
||||
testNonExistantObjectInBucket(c, create)
|
||||
testGetDirectoryReturnsObjectNotFound(c, create)
|
||||
testDefaultContentType(c, create)
|
||||
testMultipartObjectCreation(c, create)
|
||||
testMultipartObjectAbort(c, create)
|
||||
}
|
||||
|
||||
func testMakeBucket(c *check.C, create func() Filesystem) {
|
||||
fs := create()
|
||||
err := fs.MakeBucket("bucket")
|
||||
c.Assert(err, check.IsNil)
|
||||
}
|
||||
|
||||
func testMultipartObjectCreation(c *check.C, create func() Filesystem) {
|
||||
fs := create()
|
||||
err := fs.MakeBucket("bucket")
|
||||
c.Assert(err, check.IsNil)
|
||||
uploadID, err := fs.NewMultipartUpload("bucket", "key")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
completedParts := CompleteMultipartUpload{}
|
||||
//completedParts.Part = make([]CompletePart, 10)
|
||||
for i := 1; i <= 10; i++ {
|
||||
randomPerm := rand.Perm(10)
|
||||
randomString := ""
|
||||
for _, num := range randomPerm {
|
||||
randomString = randomString + strconv.Itoa(num)
|
||||
}
|
||||
|
||||
hasher := md5.New()
|
||||
hasher.Write([]byte(randomString))
|
||||
expectedMD5Sumhex := hex.EncodeToString(hasher.Sum(nil))
|
||||
|
||||
var calculatedMD5sum string
|
||||
calculatedMD5sum, err = fs.CreateObjectPart("bucket", "key", uploadID, i, int64(len(randomString)), bytes.NewBufferString(randomString), hasher.Sum(nil))
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(calculatedMD5sum, check.Equals, expectedMD5Sumhex)
|
||||
completedParts.Part = append(completedParts.Part, CompletePart{PartNumber: i, ETag: calculatedMD5sum})
|
||||
}
|
||||
completedPartsBytes, e := xml.Marshal(completedParts)
|
||||
c.Assert(e, check.IsNil)
|
||||
objectInfo, err := fs.CompleteMultipartUpload("bucket", "key", uploadID, completedPartsBytes)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(objectInfo.MD5Sum, check.Equals, "9b7d6f13ba00e24d0b02de92e814891b-10")
|
||||
}
|
||||
|
||||
func testMultipartObjectAbort(c *check.C, create func() Filesystem) {
|
||||
fs := create()
|
||||
err := fs.MakeBucket("bucket")
|
||||
c.Assert(err, check.IsNil)
|
||||
uploadID, err := fs.NewMultipartUpload("bucket", "key")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
parts := make(map[int]string)
|
||||
for i := 1; i <= 10; i++ {
|
||||
randomPerm := rand.Perm(10)
|
||||
randomString := ""
|
||||
for _, num := range randomPerm {
|
||||
randomString = randomString + strconv.Itoa(num)
|
||||
}
|
||||
|
||||
hasher := md5.New()
|
||||
hasher.Write([]byte(randomString))
|
||||
expectedMD5Sumhex := hex.EncodeToString(hasher.Sum(nil))
|
||||
|
||||
var calculatedMD5sum string
|
||||
calculatedMD5sum, err = fs.CreateObjectPart("bucket", "key", uploadID, i, int64(len(randomString)), bytes.NewBufferString(randomString), hasher.Sum(nil))
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(calculatedMD5sum, check.Equals, expectedMD5Sumhex)
|
||||
parts[i] = expectedMD5Sumhex
|
||||
}
|
||||
err = fs.AbortMultipartUpload("bucket", "key", uploadID)
|
||||
c.Assert(err, check.IsNil)
|
||||
}
|
||||
|
||||
func testMultipleObjectCreation(c *check.C, create func() Filesystem) {
|
||||
objects := make(map[string][]byte)
|
||||
fs := create()
|
||||
err := fs.MakeBucket("bucket")
|
||||
c.Assert(err, check.IsNil)
|
||||
for i := 0; i < 10; i++ {
|
||||
randomPerm := rand.Perm(10)
|
||||
randomString := ""
|
||||
for _, num := range randomPerm {
|
||||
randomString = randomString + strconv.Itoa(num)
|
||||
}
|
||||
|
||||
hasher := md5.New()
|
||||
hasher.Write([]byte(randomString))
|
||||
expectedMD5Sumhex := hex.EncodeToString(hasher.Sum(nil))
|
||||
|
||||
key := "obj" + strconv.Itoa(i)
|
||||
objects[key] = []byte(randomString)
|
||||
var objectInfo ObjectInfo
|
||||
objectInfo, err = fs.CreateObject("bucket", key, int64(len(randomString)), bytes.NewBufferString(randomString), hasher.Sum(nil))
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(objectInfo.MD5Sum, check.Equals, expectedMD5Sumhex)
|
||||
}
|
||||
|
||||
for key, value := range objects {
|
||||
var byteBuffer bytes.Buffer
|
||||
_, err := fs.GetObject(&byteBuffer, "bucket", key, 0, 0)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(byteBuffer.Bytes(), check.DeepEquals, value)
|
||||
|
||||
metadata, err := fs.GetObjectInfo("bucket", key)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(metadata.Size, check.Equals, int64(len(value)))
|
||||
}
|
||||
}
|
||||
|
||||
func testPaging(c *check.C, create func() Filesystem) {
|
||||
fs := create()
|
||||
fs.MakeBucket("bucket")
|
||||
result, err := fs.ListObjects("bucket", "", "", "", 0)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(len(result.Objects), check.Equals, 0)
|
||||
c.Assert(result.IsTruncated, check.Equals, false)
|
||||
// check before paging occurs
|
||||
for i := 0; i < 5; i++ {
|
||||
key := "obj" + strconv.Itoa(i)
|
||||
_, err = fs.CreateObject("bucket", key, int64(len(key)), bytes.NewBufferString(key), nil)
|
||||
c.Assert(err, check.IsNil)
|
||||
result, err = fs.ListObjects("bucket", "", "", "", 5)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(len(result.Objects), check.Equals, i+1)
|
||||
c.Assert(result.IsTruncated, check.Equals, false)
|
||||
}
|
||||
// check after paging occurs pages work
|
||||
for i := 6; i <= 10; i++ {
|
||||
key := "obj" + strconv.Itoa(i)
|
||||
_, err = fs.CreateObject("bucket", key, int64(len(key)), bytes.NewBufferString(key), nil)
|
||||
c.Assert(err, check.IsNil)
|
||||
result, err = fs.ListObjects("bucket", "obj", "", "", 5)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(len(result.Objects), check.Equals, 5)
|
||||
c.Assert(result.IsTruncated, check.Equals, true)
|
||||
}
|
||||
// check paging with prefix at end returns less objects
|
||||
{
|
||||
_, err = fs.CreateObject("bucket", "newPrefix", int64(len("prefix1")), bytes.NewBufferString("prefix1"), nil)
|
||||
c.Assert(err, check.IsNil)
|
||||
_, err = fs.CreateObject("bucket", "newPrefix2", int64(len("prefix2")), bytes.NewBufferString("prefix2"), nil)
|
||||
c.Assert(err, check.IsNil)
|
||||
result, err = fs.ListObjects("bucket", "new", "", "", 5)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(len(result.Objects), check.Equals, 2)
|
||||
}
|
||||
|
||||
// check ordering of pages
|
||||
{
|
||||
result, err = fs.ListObjects("bucket", "", "", "", 1000)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(result.Objects[0].Name, check.Equals, "newPrefix")
|
||||
c.Assert(result.Objects[1].Name, check.Equals, "newPrefix2")
|
||||
c.Assert(result.Objects[2].Name, check.Equals, "obj0")
|
||||
c.Assert(result.Objects[3].Name, check.Equals, "obj1")
|
||||
c.Assert(result.Objects[4].Name, check.Equals, "obj10")
|
||||
}
|
||||
|
||||
// check delimited results with delimiter and prefix
|
||||
{
|
||||
_, err = fs.CreateObject("bucket", "this/is/delimited", int64(len("prefix1")), bytes.NewBufferString("prefix1"), nil)
|
||||
c.Assert(err, check.IsNil)
|
||||
_, err = fs.CreateObject("bucket", "this/is/also/a/delimited/file", int64(len("prefix2")), bytes.NewBufferString("prefix2"), nil)
|
||||
c.Assert(err, check.IsNil)
|
||||
result, err = fs.ListObjects("bucket", "this/is/", "", "/", 10)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(len(result.Objects), check.Equals, 1)
|
||||
c.Assert(result.Prefixes[0], check.Equals, "this/is/also/")
|
||||
}
|
||||
|
||||
// check delimited results with delimiter without prefix
|
||||
{
|
||||
result, err = fs.ListObjects("bucket", "", "", "/", 1000)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(result.Objects[0].Name, check.Equals, "newPrefix")
|
||||
c.Assert(result.Objects[1].Name, check.Equals, "newPrefix2")
|
||||
c.Assert(result.Objects[2].Name, check.Equals, "obj0")
|
||||
c.Assert(result.Objects[3].Name, check.Equals, "obj1")
|
||||
c.Assert(result.Objects[4].Name, check.Equals, "obj10")
|
||||
c.Assert(result.Prefixes[0], check.Equals, "this/")
|
||||
}
|
||||
|
||||
// check results with Marker
|
||||
{
|
||||
result, err = fs.ListObjects("bucket", "", "newPrefix", "", 3)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(result.Objects[0].Name, check.Equals, "newPrefix2")
|
||||
c.Assert(result.Objects[1].Name, check.Equals, "obj0")
|
||||
c.Assert(result.Objects[2].Name, check.Equals, "obj1")
|
||||
}
|
||||
// check ordering of results with prefix
|
||||
{
|
||||
result, err = fs.ListObjects("bucket", "obj", "", "", 1000)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(result.Objects[0].Name, check.Equals, "obj0")
|
||||
c.Assert(result.Objects[1].Name, check.Equals, "obj1")
|
||||
c.Assert(result.Objects[2].Name, check.Equals, "obj10")
|
||||
c.Assert(result.Objects[3].Name, check.Equals, "obj2")
|
||||
c.Assert(result.Objects[4].Name, check.Equals, "obj3")
|
||||
}
|
||||
// check ordering of results with prefix and no paging
|
||||
{
|
||||
result, err = fs.ListObjects("bucket", "new", "", "", 5)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(result.Objects[0].Name, check.Equals, "newPrefix")
|
||||
c.Assert(result.Objects[1].Name, check.Equals, "newPrefix2")
|
||||
}
|
||||
}
|
||||
|
||||
func testObjectOverwriteWorks(c *check.C, create func() Filesystem) {
|
||||
fs := create()
|
||||
err := fs.MakeBucket("bucket")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
_, err = fs.CreateObject("bucket", "object", int64(len("one")), bytes.NewBufferString("one"), nil)
|
||||
c.Assert(err, check.IsNil)
|
||||
// c.Assert(md5Sum1hex, check.Equals, objectInfo.MD5Sum)
|
||||
|
||||
_, err = fs.CreateObject("bucket", "object", int64(len("three")), bytes.NewBufferString("three"), nil)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
var bytesBuffer bytes.Buffer
|
||||
length, err := fs.GetObject(&bytesBuffer, "bucket", "object", 0, 0)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(length, check.Equals, int64(len("three")))
|
||||
c.Assert(string(bytesBuffer.Bytes()), check.Equals, "three")
|
||||
}
|
||||
|
||||
func testNonExistantBucketOperations(c *check.C, create func() Filesystem) {
|
||||
fs := create()
|
||||
_, err := fs.CreateObject("bucket", "object", int64(len("one")), bytes.NewBufferString("one"), nil)
|
||||
c.Assert(err, check.Not(check.IsNil))
|
||||
}
|
||||
|
||||
func testBucketRecreateFails(c *check.C, create func() Filesystem) {
|
||||
fs := create()
|
||||
err := fs.MakeBucket("string")
|
||||
c.Assert(err, check.IsNil)
|
||||
err = fs.MakeBucket("string")
|
||||
c.Assert(err, check.Not(check.IsNil))
|
||||
}
|
||||
|
||||
func testPutObjectInSubdir(c *check.C, create func() Filesystem) {
|
||||
fs := create()
|
||||
err := fs.MakeBucket("bucket")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
_, err = fs.CreateObject("bucket", "dir1/dir2/object", int64(len("hello world")), bytes.NewBufferString("hello world"), nil)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
var bytesBuffer bytes.Buffer
|
||||
length, err := fs.GetObject(&bytesBuffer, "bucket", "dir1/dir2/object", 0, 0)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(len(bytesBuffer.Bytes()), check.Equals, len("hello world"))
|
||||
c.Assert(int64(len(bytesBuffer.Bytes())), check.Equals, length)
|
||||
}
|
||||
|
||||
func testListBuckets(c *check.C, create func() Filesystem) {
|
||||
fs := create()
|
||||
|
||||
// test empty list
|
||||
buckets, err := fs.ListBuckets()
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(len(buckets), check.Equals, 0)
|
||||
|
||||
// add one and test exists
|
||||
err = fs.MakeBucket("bucket1")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
buckets, err = fs.ListBuckets()
|
||||
c.Assert(len(buckets), check.Equals, 1)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
// add two and test exists
|
||||
err = fs.MakeBucket("bucket2")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
buckets, err = fs.ListBuckets()
|
||||
c.Assert(len(buckets), check.Equals, 2)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
// add three and test exists + prefix
|
||||
err = fs.MakeBucket("bucket22")
|
||||
|
||||
buckets, err = fs.ListBuckets()
|
||||
c.Assert(len(buckets), check.Equals, 3)
|
||||
c.Assert(err, check.IsNil)
|
||||
}
|
||||
|
||||
func testListBucketsOrder(c *check.C, create func() Filesystem) {
|
||||
// if implementation contains a map, order of map keys will vary.
|
||||
// this ensures they return in the same order each time
|
||||
for i := 0; i < 10; i++ {
|
||||
fs := create()
|
||||
// add one and test exists
|
||||
err := fs.MakeBucket("bucket1")
|
||||
c.Assert(err, check.IsNil)
|
||||
err = fs.MakeBucket("bucket2")
|
||||
c.Assert(err, check.IsNil)
|
||||
buckets, err := fs.ListBuckets()
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(len(buckets), check.Equals, 2)
|
||||
c.Assert(buckets[0].Name, check.Equals, "bucket1")
|
||||
c.Assert(buckets[1].Name, check.Equals, "bucket2")
|
||||
}
|
||||
}
|
||||
|
||||
func testListObjectsTestsForNonExistantBucket(c *check.C, create func() Filesystem) {
|
||||
fs := create()
|
||||
result, err := fs.ListObjects("bucket", "", "", "", 1000)
|
||||
c.Assert(err, check.Not(check.IsNil))
|
||||
c.Assert(result.IsTruncated, check.Equals, false)
|
||||
c.Assert(len(result.Objects), check.Equals, 0)
|
||||
}
|
||||
|
||||
func testNonExistantObjectInBucket(c *check.C, create func() Filesystem) {
|
||||
fs := create()
|
||||
err := fs.MakeBucket("bucket")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
var byteBuffer bytes.Buffer
|
||||
length, err := fs.GetObject(&byteBuffer, "bucket", "dir1", 0, 0)
|
||||
c.Assert(length, check.Equals, int64(0))
|
||||
c.Assert(err, check.Not(check.IsNil))
|
||||
c.Assert(len(byteBuffer.Bytes()), check.Equals, 0)
|
||||
switch err := err.ToGoError().(type) {
|
||||
case ObjectNotFound:
|
||||
c.Assert(err, check.ErrorMatches, "Object not found: bucket#dir1")
|
||||
default:
|
||||
c.Assert(err, check.Equals, "fails")
|
||||
}
|
||||
}
|
||||
|
||||
func testGetDirectoryReturnsObjectNotFound(c *check.C, create func() Filesystem) {
|
||||
fs := create()
|
||||
err := fs.MakeBucket("bucket")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
_, err = fs.CreateObject("bucket", "dir1/dir2/object", int64(len("hello world")), bytes.NewBufferString("hello world"), nil)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
var byteBuffer bytes.Buffer
|
||||
length, err := fs.GetObject(&byteBuffer, "bucket", "dir1", 0, 0)
|
||||
c.Assert(length, check.Equals, int64(0))
|
||||
switch err := err.ToGoError().(type) {
|
||||
case ObjectNotFound:
|
||||
c.Assert(err.Bucket, check.Equals, "bucket")
|
||||
c.Assert(err.Object, check.Equals, "dir1")
|
||||
default:
|
||||
// force a failure with a line number
|
||||
c.Assert(err, check.Equals, "ObjectNotFound")
|
||||
}
|
||||
c.Assert(len(byteBuffer.Bytes()), check.Equals, 0)
|
||||
|
||||
var byteBuffer2 bytes.Buffer
|
||||
length, err = fs.GetObject(&byteBuffer, "bucket", "dir1/", 0, 0)
|
||||
c.Assert(length, check.Equals, int64(0))
|
||||
switch err := err.ToGoError().(type) {
|
||||
case ObjectNotFound:
|
||||
c.Assert(err.Bucket, check.Equals, "bucket")
|
||||
c.Assert(err.Object, check.Equals, "dir1/")
|
||||
default:
|
||||
// force a failure with a line number
|
||||
c.Assert(err, check.Equals, "ObjectNotFound")
|
||||
}
|
||||
c.Assert(len(byteBuffer2.Bytes()), check.Equals, 0)
|
||||
}
|
||||
|
||||
func testDefaultContentType(c *check.C, create func() Filesystem) {
|
||||
fs := create()
|
||||
err := fs.MakeBucket("bucket")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
// Test empty
|
||||
_, err = fs.CreateObject("bucket", "one", int64(len("one")), bytes.NewBufferString("one"), nil)
|
||||
metadata, err := fs.GetObjectInfo("bucket", "one")
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(metadata.ContentType, check.Equals, "application/octet-stream")
|
||||
}
|
||||
318
pkg/fs/dir.go
318
pkg/fs/dir.go
@@ -1,318 +0,0 @@
|
||||
/*
|
||||
* Minio Cloud Storage, (C) 2016 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package fs
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
// listObjectsLimit - maximum list objects limit.
|
||||
listObjectsLimit = 1000
|
||||
)
|
||||
|
||||
// isDirEmpty - returns whether given directory is empty or not.
|
||||
func isDirEmpty(dirname string) (status bool, err error) {
|
||||
f, err := os.Open(dirname)
|
||||
if err == nil {
|
||||
defer f.Close()
|
||||
if _, err = f.Readdirnames(1); err == io.EOF {
|
||||
status = true
|
||||
err = nil
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// isDirExist - returns whether given directory is exist or not.
|
||||
func isDirExist(dirname string) (status bool, err error) {
|
||||
fi, err := os.Lstat(dirname)
|
||||
if err == nil {
|
||||
status = fi.IsDir()
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// byName implements sort.Interface for sorting os.FileInfo list.
|
||||
type byName []os.FileInfo
|
||||
|
||||
func (f byName) Len() int {
|
||||
return len(f)
|
||||
}
|
||||
|
||||
func (f byName) Swap(i, j int) {
|
||||
f[i], f[j] = f[j], f[i]
|
||||
}
|
||||
|
||||
func (f byName) Less(i, j int) bool {
|
||||
n1 := f[i].Name()
|
||||
if f[i].IsDir() {
|
||||
n1 = n1 + string(os.PathSeparator)
|
||||
}
|
||||
|
||||
n2 := f[j].Name()
|
||||
if f[j].IsDir() {
|
||||
n2 = n2 + string(os.PathSeparator)
|
||||
}
|
||||
|
||||
return n1 < n2
|
||||
}
|
||||
|
||||
// ObjectInfo - object info.
|
||||
type ObjectInfo struct {
|
||||
Bucket string
|
||||
Name string
|
||||
ModifiedTime time.Time
|
||||
ContentType string
|
||||
MD5Sum string
|
||||
Size int64
|
||||
IsDir bool
|
||||
Err error
|
||||
}
|
||||
|
||||
// Using sort.Search() internally to jump to the file entry containing the prefix.
|
||||
func searchFileInfos(fileInfos []os.FileInfo, x string) int {
|
||||
processFunc := func(i int) bool {
|
||||
return fileInfos[i].Name() >= x
|
||||
}
|
||||
return sort.Search(len(fileInfos), processFunc)
|
||||
}
|
||||
|
||||
// readDir - read 'scanDir' directory. It returns list of ObjectInfo.
|
||||
// Each object name is appended with 'namePrefix'.
|
||||
func readDir(scanDir, namePrefix, queryPrefix string, isFirst bool) (objInfos []ObjectInfo) {
|
||||
f, err := os.Open(scanDir)
|
||||
if err != nil {
|
||||
objInfos = append(objInfos, ObjectInfo{Err: err})
|
||||
return
|
||||
}
|
||||
fis, err := f.Readdir(-1)
|
||||
if err != nil {
|
||||
f.Close()
|
||||
objInfos = append(objInfos, ObjectInfo{Err: err})
|
||||
return
|
||||
}
|
||||
// Close the directory.
|
||||
f.Close()
|
||||
// Sort files by Name.
|
||||
sort.Sort(byName(fis))
|
||||
|
||||
var prefixIndex int
|
||||
// Searching for entries with objectName containing prefix.
|
||||
// Binary search is used for efficient search.
|
||||
if queryPrefix != "" && isFirst {
|
||||
prefixIndex = searchFileInfos(fis, queryPrefix)
|
||||
if prefixIndex == len(fis) {
|
||||
return
|
||||
}
|
||||
|
||||
if !strings.HasPrefix(fis[prefixIndex].Name(), queryPrefix) {
|
||||
return
|
||||
}
|
||||
fis = fis[prefixIndex:]
|
||||
|
||||
}
|
||||
|
||||
// Populate []ObjectInfo from []FileInfo.
|
||||
for _, fi := range fis {
|
||||
name := fi.Name()
|
||||
if queryPrefix != "" && isFirst {
|
||||
// If control is here then there is a queryPrefix, and there are objects which satisfies the prefix.
|
||||
// Since the result is sorted, the object names which satisfies query prefix would be stored one after the other.
|
||||
// Push the objectInfo only if its contains the prefix.
|
||||
// This ensures that the channel containing object Info would only has objects with the given queryPrefix.
|
||||
if !strings.HasPrefix(name, queryPrefix) {
|
||||
return
|
||||
}
|
||||
}
|
||||
size := fi.Size()
|
||||
modTime := fi.ModTime()
|
||||
isDir := fi.Mode().IsDir()
|
||||
|
||||
// Add prefix if name prefix exists.
|
||||
if namePrefix != "" {
|
||||
name = namePrefix + "/" + name
|
||||
}
|
||||
|
||||
// For directories explicitly end with '/'.
|
||||
if isDir {
|
||||
name += "/"
|
||||
// size is set to '0' for directories explicitly.
|
||||
size = 0
|
||||
}
|
||||
|
||||
if fi.Mode()&os.ModeSymlink == os.ModeSymlink {
|
||||
// Handle symlink by doing an additional stat and follow the link.
|
||||
st, e := os.Stat(filepath.Join(scanDir, name))
|
||||
if e != nil {
|
||||
objInfos = append(objInfos, ObjectInfo{Err: err})
|
||||
return
|
||||
}
|
||||
size = st.Size()
|
||||
modTime = st.ModTime()
|
||||
isDir = st.Mode().IsDir()
|
||||
// For directories explicitly end with '/'.
|
||||
if isDir {
|
||||
name += "/"
|
||||
// size is set to '0' for directories explicitly.
|
||||
size = 0
|
||||
}
|
||||
}
|
||||
|
||||
// Populate []ObjectInfo.
|
||||
objInfos = append(objInfos, ObjectInfo{
|
||||
Name: name,
|
||||
ModifiedTime: modTime,
|
||||
MD5Sum: "", // TODO
|
||||
Size: size,
|
||||
IsDir: isDir,
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// ObjectInfoChannel - object info channel.
|
||||
type ObjectInfoChannel struct {
|
||||
ch <-chan ObjectInfo
|
||||
objInfo *ObjectInfo
|
||||
closed bool
|
||||
timeoutCh <-chan struct{}
|
||||
timedOut bool
|
||||
}
|
||||
|
||||
func (oic *ObjectInfoChannel) Read() (ObjectInfo, bool) {
|
||||
if oic.closed {
|
||||
return ObjectInfo{}, false
|
||||
}
|
||||
|
||||
if oic.objInfo == nil {
|
||||
// First read.
|
||||
if oi, ok := <-oic.ch; ok {
|
||||
oic.objInfo = &oi
|
||||
} else {
|
||||
oic.closed = true
|
||||
return ObjectInfo{}, false
|
||||
}
|
||||
}
|
||||
|
||||
retObjInfo := *oic.objInfo
|
||||
status := true
|
||||
oic.objInfo = nil
|
||||
|
||||
// Read once more to know whether it was last read.
|
||||
if oi, ok := <-oic.ch; ok {
|
||||
oic.objInfo = &oi
|
||||
} else {
|
||||
oic.closed = true
|
||||
}
|
||||
|
||||
return retObjInfo, status
|
||||
}
|
||||
|
||||
// IsClosed - return whether channel is closed or not.
|
||||
func (oic ObjectInfoChannel) IsClosed() bool {
|
||||
if oic.objInfo != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
return oic.closed
|
||||
}
|
||||
|
||||
// IsTimedOut - return whether channel is closed due to timeout.
|
||||
func (oic ObjectInfoChannel) IsTimedOut() bool {
|
||||
if oic.timedOut {
|
||||
return true
|
||||
}
|
||||
|
||||
select {
|
||||
case _, ok := <-oic.timeoutCh:
|
||||
if ok {
|
||||
oic.timedOut = true
|
||||
return true
|
||||
}
|
||||
return false
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// treeWalk - walk into 'scanDir' recursively when 'recursive' is true.
|
||||
// It uses 'bucketDir' to get name prefix for object name.
|
||||
func treeWalk(scanDir, bucketDir string, recursive bool, queryPrefix string) ObjectInfoChannel {
|
||||
objectInfoCh := make(chan ObjectInfo, listObjectsLimit)
|
||||
timeoutCh := make(chan struct{}, 1)
|
||||
|
||||
// goroutine - retrieves directory entries, makes ObjectInfo and sends into the channel.
|
||||
go func() {
|
||||
defer close(objectInfoCh)
|
||||
defer close(timeoutCh)
|
||||
|
||||
// send function - returns true if ObjectInfo is sent.
|
||||
// Within (time.Second * 15) else false on time-out.
|
||||
send := func(oi ObjectInfo) bool {
|
||||
timer := time.After(time.Second * 15)
|
||||
select {
|
||||
case objectInfoCh <- oi:
|
||||
return true
|
||||
case <-timer:
|
||||
timeoutCh <- struct{}{}
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
namePrefix := strings.Replace(filepath.ToSlash(scanDir), filepath.ToSlash(bucketDir), "", 1)
|
||||
if strings.HasPrefix(namePrefix, "/") {
|
||||
// Remove forward slash ("/") from beginning.
|
||||
namePrefix = namePrefix[1:]
|
||||
}
|
||||
// The last argument (isFisrt), is set to `true` only during the first run of the function.
|
||||
// This makes sure that the sub-directories inside the prefixDir are recursed
|
||||
// without being asserted for prefix in the object name.
|
||||
isFirst := true
|
||||
for objInfos := readDir(scanDir, namePrefix, queryPrefix, isFirst); len(objInfos) > 0; {
|
||||
var objInfo ObjectInfo
|
||||
objInfo, objInfos = objInfos[0], objInfos[1:]
|
||||
if !send(objInfo) {
|
||||
return
|
||||
}
|
||||
|
||||
if objInfo.IsDir && recursive {
|
||||
scanDir := filepath.Join(bucketDir, filepath.FromSlash(objInfo.Name))
|
||||
namePrefix = strings.Replace(filepath.ToSlash(scanDir), filepath.ToSlash(bucketDir), "", 1)
|
||||
|
||||
if strings.HasPrefix(namePrefix, "/") {
|
||||
/* remove beginning "/" */
|
||||
namePrefix = namePrefix[1:]
|
||||
}
|
||||
// The last argument is set to false in the further calls to readdir.
|
||||
isFirst = false
|
||||
objInfos = append(readDir(scanDir, namePrefix, queryPrefix, isFirst), objInfos...)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return ObjectInfoChannel{ch: objectInfoCh, timeoutCh: timeoutCh}
|
||||
}
|
||||
@@ -1,57 +0,0 @@
|
||||
/*
|
||||
* Minio Cloud Storage, (C) 2015, 2016 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package fs
|
||||
|
||||
import (
|
||||
"github.com/minio/minio/pkg/probe"
|
||||
"github.com/minio/minio/pkg/quick"
|
||||
)
|
||||
|
||||
var multipartsMetadataPath string
|
||||
|
||||
// SetFSMultipartsMetadataPath - set custom multiparts session
|
||||
// metadata path.
|
||||
func setFSMultipartsMetadataPath(metadataPath string) {
|
||||
multipartsMetadataPath = metadataPath
|
||||
}
|
||||
|
||||
// saveMultipartsSession - save multiparts
|
||||
func saveMultipartsSession(multiparts Multiparts) *probe.Error {
|
||||
qc, err := quick.New(multiparts)
|
||||
if err != nil {
|
||||
return err.Trace()
|
||||
}
|
||||
if err := qc.Save(multipartsMetadataPath); err != nil {
|
||||
return err.Trace()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// loadMultipartsSession load multipart session file
|
||||
func loadMultipartsSession() (*Multiparts, *probe.Error) {
|
||||
multiparts := &Multiparts{}
|
||||
multiparts.Version = "1"
|
||||
multiparts.ActiveSession = make(map[string]*MultipartSession)
|
||||
qc, err := quick.New(multiparts)
|
||||
if err != nil {
|
||||
return nil, err.Trace()
|
||||
}
|
||||
if err := qc.Load(multipartsMetadataPath); err != nil {
|
||||
return nil, err.Trace()
|
||||
}
|
||||
return qc.Data().(*Multiparts), nil
|
||||
}
|
||||
@@ -1,178 +0,0 @@
|
||||
/*
|
||||
* Minio Cloud Storage, (C) 2015-2016 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package fs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/url"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/minio/minio/pkg/probe"
|
||||
)
|
||||
|
||||
// ListObjects - lists all objects for a given prefix, returns up to
|
||||
// maxKeys number of objects per call.
|
||||
func (fs Filesystem) ListObjects(bucket, prefix, marker, delimiter string, maxKeys int) (ListObjectsResult, *probe.Error) {
|
||||
result := ListObjectsResult{}
|
||||
var queryPrefix string
|
||||
|
||||
// Input validation.
|
||||
if !IsValidBucketName(bucket) {
|
||||
return result, probe.NewError(BucketNameInvalid{Bucket: bucket})
|
||||
}
|
||||
|
||||
bucket = fs.denormalizeBucket(bucket)
|
||||
bucketDir := filepath.Join(fs.path, bucket)
|
||||
// Verify if bucket exists.
|
||||
if status, err := isDirExist(bucketDir); !status {
|
||||
if err == nil {
|
||||
// File exists, but its not a directory.
|
||||
return result, probe.NewError(BucketNotFound{Bucket: bucket})
|
||||
} else if os.IsNotExist(err) {
|
||||
// File does not exist.
|
||||
return result, probe.NewError(BucketNotFound{Bucket: bucket})
|
||||
} else {
|
||||
return result, probe.NewError(err)
|
||||
}
|
||||
}
|
||||
if !IsValidObjectPrefix(prefix) {
|
||||
return result, probe.NewError(ObjectNameInvalid{Bucket: bucket, Object: prefix})
|
||||
}
|
||||
|
||||
// Verify if delimiter is anything other than '/', which we do not support.
|
||||
if delimiter != "" && delimiter != "/" {
|
||||
return result, probe.NewError(fmt.Errorf("delimiter '%s' is not supported", delimiter))
|
||||
}
|
||||
|
||||
// Marker is set unescape.
|
||||
if marker != "" {
|
||||
if markerUnescaped, err := url.QueryUnescape(marker); err == nil {
|
||||
marker = markerUnescaped
|
||||
} else {
|
||||
return result, probe.NewError(err)
|
||||
}
|
||||
|
||||
if !strings.HasPrefix(marker, prefix) {
|
||||
return result, probe.NewError(fmt.Errorf("Invalid combination of marker '%s' and prefix '%s'", marker, prefix))
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// Return empty response for a valid request when maxKeys is 0.
|
||||
if maxKeys == 0 {
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// Over flowing maxkeys - reset to listObjectsLimit.
|
||||
if maxKeys < 0 || maxKeys > listObjectsLimit {
|
||||
maxKeys = listObjectsLimit
|
||||
}
|
||||
|
||||
// Verify if prefix exists.
|
||||
prefixDir := filepath.Dir(filepath.FromSlash(prefix))
|
||||
rootDir := filepath.Join(bucketDir, prefixDir)
|
||||
_, err := isDirExist(rootDir)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
// Prefix does not exist, not an error just respond empty
|
||||
// list response.
|
||||
return result, nil
|
||||
}
|
||||
// Rest errors should be treated as failure.
|
||||
return result, probe.NewError(err)
|
||||
}
|
||||
|
||||
recursive := true
|
||||
skipDir := true
|
||||
if delimiter == "/" {
|
||||
skipDir = false
|
||||
recursive = false
|
||||
}
|
||||
|
||||
// Maximum 1000 objects returned in a single to listObjects.
|
||||
// Further calls will set right marker value to continue reading the rest of the objectList.
|
||||
// popListObjectCh returns nil if the call to ListObject is done for the first time.
|
||||
// On further calls to ListObjects to retrive more objects within the timeout period,
|
||||
// popListObjectCh returns the channel from which rest of the objects can be retrieved.
|
||||
objectInfoCh := fs.popListObjectCh(ListObjectParams{bucket, delimiter, marker, prefix})
|
||||
if objectInfoCh == nil {
|
||||
if prefix != "" {
|
||||
// queryPrefix variable is set to value of the prefix to be searched.
|
||||
// If prefix contains directory hierarchy queryPrefix is set to empty string,
|
||||
// this ensure that all objects inside the prefixDir is listed.
|
||||
// Otherwise the base name is extracted from path.Base and it'll be will be set to Querystring.
|
||||
// if prefix = /Asia/India/, queryPrefix will be set to empty string(""), so that all objects in prefixDir are listed.
|
||||
// if prefix = /Asia/India/summerpics , Querystring will be set to "summerpics",
|
||||
// so those all objects with the prefix "summerpics" inside the /Asia/India/ prefix folder gets listed.
|
||||
if prefix[len(prefix)-1:] == "/" {
|
||||
queryPrefix = ""
|
||||
} else {
|
||||
queryPrefix = path.Base(prefix)
|
||||
}
|
||||
}
|
||||
ch := treeWalk(rootDir, bucketDir, recursive, queryPrefix)
|
||||
objectInfoCh = &ch
|
||||
}
|
||||
|
||||
nextMarker := ""
|
||||
for i := 0; i < maxKeys; {
|
||||
|
||||
objInfo, ok := objectInfoCh.Read()
|
||||
if !ok {
|
||||
// Closed channel.
|
||||
return result, nil
|
||||
}
|
||||
|
||||
if objInfo.Err != nil {
|
||||
return ListObjectsResult{}, probe.NewError(objInfo.Err)
|
||||
}
|
||||
|
||||
if strings.Contains(objInfo.Name, "$multiparts") || strings.Contains(objInfo.Name, "$tmpobject") {
|
||||
continue
|
||||
}
|
||||
|
||||
if objInfo.IsDir && skipDir {
|
||||
continue
|
||||
}
|
||||
|
||||
// Add the bucket.
|
||||
objInfo.Bucket = bucket
|
||||
// In case its not the first call to ListObjects (before timeout),
|
||||
// The result is already inside the buffered channel.
|
||||
if objInfo.Name > marker {
|
||||
if objInfo.IsDir {
|
||||
result.Prefixes = append(result.Prefixes, objInfo.Name)
|
||||
} else {
|
||||
result.Objects = append(result.Objects, objInfo)
|
||||
}
|
||||
nextMarker = objInfo.Name
|
||||
i++
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if !objectInfoCh.IsClosed() {
|
||||
result.IsTruncated = true
|
||||
result.NextMarker = nextMarker
|
||||
fs.pushListObjectCh(ListObjectParams{bucket, delimiter, nextMarker, prefix}, *objectInfoCh)
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
@@ -1,609 +0,0 @@
|
||||
/*
|
||||
* Minio Cloud Storage, (C) 2015-2016 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package fs
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestListObjects(t *testing.T) {
|
||||
// Make a temporary directory to use as the filesystem.
|
||||
directory, e := ioutil.TempDir("", "minio-list-object-test")
|
||||
if e != nil {
|
||||
t.Fatal(e)
|
||||
}
|
||||
defer os.RemoveAll(directory)
|
||||
|
||||
// Create the filesystem.
|
||||
fs, err := New(directory)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// This bucket is used for testing ListObject operations.
|
||||
err = fs.MakeBucket("test-bucket-list-object")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// Will not store any objects in this bucket,
|
||||
// Its to test ListObjects on an empty bucket.
|
||||
err = fs.MakeBucket("empty-bucket")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
tmpfile, e := ioutil.TempFile("", "simple-file.txt")
|
||||
if e != nil {
|
||||
t.Fatal(e)
|
||||
}
|
||||
defer os.Remove(tmpfile.Name()) // clean up
|
||||
|
||||
_, err = fs.CreateObject("test-bucket-list-object", "Asia-maps", int64(len("asia-maps")), bytes.NewBufferString("asia-maps"), nil)
|
||||
if err != nil {
|
||||
t.Fatal(e)
|
||||
}
|
||||
|
||||
_, err = fs.CreateObject("test-bucket-list-object", "Asia/India/India-summer-photos-1", int64(len("contentstring")), bytes.NewBufferString("contentstring"), nil)
|
||||
if err != nil {
|
||||
t.Fatal(e)
|
||||
}
|
||||
|
||||
_, err = fs.CreateObject("test-bucket-list-object", "Asia/India/Karnataka/Bangalore/Koramangala/pics", int64(len("contentstring")), bytes.NewBufferString("contentstring"), nil)
|
||||
if err != nil {
|
||||
t.Fatal(e)
|
||||
}
|
||||
|
||||
for i := 0; i < 2; i++ {
|
||||
key := "newPrefix" + strconv.Itoa(i)
|
||||
_, err = fs.CreateObject("test-bucket-list-object", key, int64(len(key)), bytes.NewBufferString(key), nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
_, err = fs.CreateObject("test-bucket-list-object", "newzen/zen/recurse/again/again/again/pics", int64(len("recurse")), bytes.NewBufferString("recurse"), nil)
|
||||
if err != nil {
|
||||
t.Fatal(e)
|
||||
}
|
||||
|
||||
for i := 0; i < 3; i++ {
|
||||
key := "obj" + strconv.Itoa(i)
|
||||
_, err = fs.CreateObject("test-bucket-list-object", key, int64(len(key)), bytes.NewBufferString(key), nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Formualting the result data set to be expected from ListObjects call inside the tests,
|
||||
// This will be used in testCases and used for asserting the correctness of ListObjects output in the tests.
|
||||
|
||||
resultCases := []ListObjectsResult{
|
||||
// ListObjectsResult-0.
|
||||
// Testing for listing all objects in the bucket, (testCase 20,21,22).
|
||||
{
|
||||
IsTruncated: false,
|
||||
Objects: []ObjectInfo{
|
||||
{Name: "Asia-maps"},
|
||||
{Name: "Asia/India/India-summer-photos-1"},
|
||||
{Name: "Asia/India/Karnataka/Bangalore/Koramangala/pics"},
|
||||
{Name: "newPrefix0"},
|
||||
{Name: "newPrefix1"},
|
||||
{Name: "newzen/zen/recurse/again/again/again/pics"},
|
||||
{Name: "obj0"},
|
||||
{Name: "obj1"},
|
||||
{Name: "obj2"},
|
||||
},
|
||||
},
|
||||
// ListObjectsResult-1.
|
||||
// Used for asserting the truncated case, (testCase 23).
|
||||
{
|
||||
IsTruncated: true,
|
||||
Objects: []ObjectInfo{
|
||||
{Name: "Asia-maps"},
|
||||
{Name: "Asia/India/India-summer-photos-1"},
|
||||
{Name: "Asia/India/Karnataka/Bangalore/Koramangala/pics"},
|
||||
{Name: "newPrefix0"},
|
||||
{Name: "newPrefix1"},
|
||||
},
|
||||
},
|
||||
// ListObjectsResult-2.
|
||||
// (TestCase 24).
|
||||
{
|
||||
IsTruncated: true,
|
||||
Objects: []ObjectInfo{
|
||||
{Name: "Asia-maps"},
|
||||
{Name: "Asia/India/India-summer-photos-1"},
|
||||
{Name: "Asia/India/Karnataka/Bangalore/Koramangala/pics"},
|
||||
{Name: "newPrefix0"},
|
||||
},
|
||||
},
|
||||
// ListObjectsResult-3.
|
||||
// (TestCase 25).
|
||||
{
|
||||
IsTruncated: true,
|
||||
Objects: []ObjectInfo{
|
||||
{Name: "Asia-maps"},
|
||||
{Name: "Asia/India/India-summer-photos-1"},
|
||||
{Name: "Asia/India/Karnataka/Bangalore/Koramangala/pics"},
|
||||
},
|
||||
},
|
||||
// ListObjectsResult-4.
|
||||
// Again used for truncated case.
|
||||
// (TestCase 26).
|
||||
{
|
||||
IsTruncated: true,
|
||||
Objects: []ObjectInfo{
|
||||
{Name: "Asia-maps"},
|
||||
},
|
||||
},
|
||||
// ListObjectsResult-5.
|
||||
// Used for Asserting prefixes.
|
||||
// Used for test case with prefix "new", (testCase 27-29).
|
||||
{
|
||||
IsTruncated: false,
|
||||
Objects: []ObjectInfo{
|
||||
{Name: "newPrefix0"},
|
||||
{Name: "newPrefix1"},
|
||||
{Name: "newzen/zen/recurse/again/again/again/pics"},
|
||||
},
|
||||
},
|
||||
// ListObjectsResult-6.
|
||||
// Used for Asserting prefixes.
|
||||
// Used for test case with prefix = "obj", (testCase 30).
|
||||
{
|
||||
IsTruncated: false,
|
||||
Objects: []ObjectInfo{
|
||||
{Name: "obj0"},
|
||||
{Name: "obj1"},
|
||||
{Name: "obj2"},
|
||||
},
|
||||
},
|
||||
// ListObjectsResult-7.
|
||||
// Used for Asserting prefixes and truncation.
|
||||
// Used for test case with prefix = "new" and maxKeys = 1, (testCase 31).
|
||||
{
|
||||
IsTruncated: true,
|
||||
Objects: []ObjectInfo{
|
||||
{Name: "newPrefix0"},
|
||||
},
|
||||
},
|
||||
// ListObjectsResult-8.
|
||||
// Used for Asserting prefixes.
|
||||
// Used for test case with prefix = "obj" and maxKeys = 2, (testCase 32).
|
||||
{
|
||||
IsTruncated: true,
|
||||
Objects: []ObjectInfo{
|
||||
{Name: "obj0"},
|
||||
{Name: "obj1"},
|
||||
},
|
||||
},
|
||||
// ListObjectsResult-9.
|
||||
// Used for asserting the case with marker, but without prefix.
|
||||
//marker is set to "newPrefix0" in the testCase, (testCase 33).
|
||||
{
|
||||
IsTruncated: false,
|
||||
Objects: []ObjectInfo{
|
||||
{Name: "newPrefix1"},
|
||||
{Name: "newzen/zen/recurse/again/again/again/pics"},
|
||||
{Name: "obj0"},
|
||||
{Name: "obj1"},
|
||||
{Name: "obj2"},
|
||||
},
|
||||
},
|
||||
// ListObjectsResult-10.
|
||||
//marker is set to "newPrefix1" in the testCase, (testCase 34).
|
||||
{
|
||||
IsTruncated: false,
|
||||
Objects: []ObjectInfo{
|
||||
{Name: "newzen/zen/recurse/again/again/again/pics"},
|
||||
{Name: "obj0"},
|
||||
{Name: "obj1"},
|
||||
{Name: "obj2"},
|
||||
},
|
||||
},
|
||||
// ListObjectsResult-11.
|
||||
//marker is set to "obj0" in the testCase, (testCase 35).
|
||||
{
|
||||
IsTruncated: false,
|
||||
Objects: []ObjectInfo{
|
||||
{Name: "obj1"},
|
||||
{Name: "obj2"},
|
||||
},
|
||||
},
|
||||
// ListObjectsResult-12.
|
||||
// Marker is set to "obj1" in the testCase, (testCase 36).
|
||||
{
|
||||
IsTruncated: false,
|
||||
Objects: []ObjectInfo{
|
||||
{Name: "obj2"},
|
||||
},
|
||||
},
|
||||
// ListObjectsResult-13.
|
||||
// Marker is set to "man" in the testCase, (testCase37).
|
||||
{
|
||||
IsTruncated: false,
|
||||
Objects: []ObjectInfo{
|
||||
{Name: "newPrefix0"},
|
||||
{Name: "newPrefix1"},
|
||||
{Name: "newzen/zen/recurse/again/again/again/pics"},
|
||||
{Name: "obj0"},
|
||||
{Name: "obj1"},
|
||||
{Name: "obj2"},
|
||||
},
|
||||
},
|
||||
// ListObjectsResult-14.
|
||||
// Marker is set to "Abc" in the testCase, (testCase 39).
|
||||
{
|
||||
IsTruncated: false,
|
||||
Objects: []ObjectInfo{
|
||||
{Name: "Asia-maps"},
|
||||
{Name: "Asia/India/India-summer-photos-1"},
|
||||
{Name: "Asia/India/Karnataka/Bangalore/Koramangala/pics"},
|
||||
{Name: "newPrefix0"},
|
||||
{Name: "newPrefix1"},
|
||||
{Name: "newzen/zen/recurse/again/again/again/pics"},
|
||||
{Name: "obj0"},
|
||||
{Name: "obj1"},
|
||||
{Name: "obj2"},
|
||||
},
|
||||
},
|
||||
// ListObjectsResult-15.
|
||||
// Marker is set to "Asia/India/India-summer-photos-1" in the testCase, (testCase 40).
|
||||
{
|
||||
IsTruncated: false,
|
||||
Objects: []ObjectInfo{
|
||||
{Name: "Asia/India/Karnataka/Bangalore/Koramangala/pics"},
|
||||
{Name: "newPrefix0"},
|
||||
{Name: "newPrefix1"},
|
||||
{Name: "newzen/zen/recurse/again/again/again/pics"},
|
||||
{Name: "obj0"},
|
||||
{Name: "obj1"},
|
||||
{Name: "obj2"},
|
||||
},
|
||||
},
|
||||
// ListObjectsResult-16.
|
||||
// Marker is set to "Asia/India/Karnataka/Bangalore/Koramangala/pics" in the testCase, (testCase 41).
|
||||
{
|
||||
IsTruncated: false,
|
||||
Objects: []ObjectInfo{
|
||||
{Name: "newPrefix0"},
|
||||
{Name: "newPrefix1"},
|
||||
{Name: "newzen/zen/recurse/again/again/again/pics"},
|
||||
{Name: "obj0"},
|
||||
{Name: "obj1"},
|
||||
{Name: "obj2"},
|
||||
},
|
||||
},
|
||||
// ListObjectsResult-17.
|
||||
// Used for asserting the case with marker, without prefix but with truncation.
|
||||
// Marker = "newPrefix0" & maxKeys = 3 in the testCase, (testCase42).
|
||||
// Output truncated to 3 values.
|
||||
{
|
||||
IsTruncated: true,
|
||||
Objects: []ObjectInfo{
|
||||
{Name: "newPrefix1"},
|
||||
{Name: "newzen/zen/recurse/again/again/again/pics"},
|
||||
{Name: "obj0"},
|
||||
},
|
||||
},
|
||||
// ListObjectsResult-18.
|
||||
// Marker = "newPrefix1" & maxkeys = 1 in the testCase, (testCase43).
|
||||
// Output truncated to 1 value.
|
||||
{
|
||||
IsTruncated: true,
|
||||
Objects: []ObjectInfo{
|
||||
{Name: "newzen/zen/recurse/again/again/again/pics"},
|
||||
},
|
||||
},
|
||||
// ListObjectsResult-19.
|
||||
// Marker = "obj0" & maxKeys = 1 in the testCase, (testCase44).
|
||||
// Output truncated to 1 value.
|
||||
{
|
||||
IsTruncated: true,
|
||||
Objects: []ObjectInfo{
|
||||
{Name: "obj1"},
|
||||
},
|
||||
},
|
||||
// ListObjectsResult-20.
|
||||
// Marker = "obj0" & prefix = "obj" in the testCase, (testCase 45).
|
||||
{
|
||||
IsTruncated: false,
|
||||
Objects: []ObjectInfo{
|
||||
{Name: "obj1"},
|
||||
{Name: "obj2"},
|
||||
},
|
||||
},
|
||||
// ListObjectsResult-21.
|
||||
// Marker = "obj1" & prefix = "obj" in the testCase, (testCase 46).
|
||||
{
|
||||
IsTruncated: false,
|
||||
Objects: []ObjectInfo{
|
||||
{Name: "obj2"},
|
||||
},
|
||||
},
|
||||
// ListObjectsResult-22.
|
||||
// Marker = "newPrefix0" & prefix = "new" in the testCase,, (testCase 47).
|
||||
{
|
||||
IsTruncated: false,
|
||||
Objects: []ObjectInfo{
|
||||
{Name: "newPrefix1"},
|
||||
{Name: "newzen/zen/recurse/again/again/again/pics"},
|
||||
},
|
||||
},
|
||||
// ListObjectsResult-23.
|
||||
// Prefix is set to "Asia/India/" in the testCase, and delimiter is not set (testCase 55).
|
||||
{
|
||||
IsTruncated: false,
|
||||
Objects: []ObjectInfo{
|
||||
{Name: "Asia/India/India-summer-photos-1"},
|
||||
{Name: "Asia/India/Karnataka/Bangalore/Koramangala/pics"},
|
||||
},
|
||||
},
|
||||
|
||||
// ListObjectsResult-24.
|
||||
// Prefix is set to "Asia" in the testCase, and delimiter is not set (testCase 56).
|
||||
{
|
||||
IsTruncated: false,
|
||||
Objects: []ObjectInfo{
|
||||
{Name: "Asia-maps"},
|
||||
{Name: "Asia/India/India-summer-photos-1"},
|
||||
{Name: "Asia/India/Karnataka/Bangalore/Koramangala/pics"},
|
||||
},
|
||||
},
|
||||
|
||||
// ListObjectsResult-25.
|
||||
// Prefix is set to "Asia" in the testCase, and delimiter is set (testCase 57).
|
||||
{
|
||||
IsTruncated: false,
|
||||
Objects: []ObjectInfo{
|
||||
{Name: "Asia-maps"},
|
||||
},
|
||||
},
|
||||
// ListObjectsResult-26.
|
||||
// prefix = "new" and delimiter is set in the testCase.(testCase 58).
|
||||
{
|
||||
IsTruncated: false,
|
||||
Objects: []ObjectInfo{
|
||||
{Name: "newPrefix0"},
|
||||
{Name: "newPrefix1"},
|
||||
},
|
||||
},
|
||||
// ListObjectsResult-27.
|
||||
// Prefix is set to "Asia/India/" in the testCase, and delimiter is set to forward slash '/' (testCase 59).
|
||||
{
|
||||
IsTruncated: false,
|
||||
Objects: []ObjectInfo{
|
||||
{Name: "Asia/India/India-summer-photos-1"},
|
||||
},
|
||||
},
|
||||
// ListObjectsResult-28.
|
||||
// Marker is set to "Asia/India/India-summer-photos-1" and delimiter set in the testCase, (testCase 60).
|
||||
{
|
||||
IsTruncated: false,
|
||||
Objects: []ObjectInfo{
|
||||
{Name: "newPrefix0"},
|
||||
{Name: "newPrefix1"},
|
||||
{Name: "obj0"},
|
||||
{Name: "obj1"},
|
||||
{Name: "obj2"},
|
||||
},
|
||||
},
|
||||
// ListObjectsResult-29.
|
||||
// Marker is set to "Asia/India/Karnataka/Bangalore/Koramangala/pics" in the testCase and delimeter set, (testCase 61).
|
||||
{
|
||||
IsTruncated: false,
|
||||
Objects: []ObjectInfo{
|
||||
{Name: "newPrefix0"},
|
||||
{Name: "newPrefix1"},
|
||||
{Name: "obj0"},
|
||||
{Name: "obj1"},
|
||||
{Name: "obj2"},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
// Inputs to ListObjects.
|
||||
bucketName string
|
||||
prefix string
|
||||
marker string
|
||||
delimeter string
|
||||
maxKeys int
|
||||
// Expected output of ListObjects.
|
||||
result ListObjectsResult
|
||||
err error
|
||||
// Flag indicating whether the test is expected to pass or not.
|
||||
shouldPass bool
|
||||
}{
|
||||
// Test cases with invalid bucket names ( Test number 1-4 ).
|
||||
{".test", "", "", "", 0, ListObjectsResult{}, BucketNameInvalid{Bucket: ".test"}, false},
|
||||
{"Test", "", "", "", 0, ListObjectsResult{}, BucketNameInvalid{Bucket: "Test"}, false},
|
||||
{"---", "", "", "", 0, ListObjectsResult{}, BucketNameInvalid{Bucket: "---"}, false},
|
||||
{"ad", "", "", "", 0, ListObjectsResult{}, BucketNameInvalid{Bucket: "ad"}, false},
|
||||
// Using an existing file for bucket name, but its not a directory (5).
|
||||
{"simple-file.txt", "", "", "", 0, ListObjectsResult{}, BucketNotFound{Bucket: "simple-file.txt"}, false},
|
||||
// Valid bucket names, but they donot exist (6-8).
|
||||
{"volatile-bucket-1", "", "", "", 0, ListObjectsResult{}, BucketNotFound{Bucket: "volatile-bucket-1"}, false},
|
||||
{"volatile-bucket-2", "", "", "", 0, ListObjectsResult{}, BucketNotFound{Bucket: "volatile-bucket-2"}, false},
|
||||
{"volatile-bucket-3", "", "", "", 0, ListObjectsResult{}, BucketNotFound{Bucket: "volatile-bucket-3"}, false},
|
||||
// Valid, existing bucket, but sending invalid delimeter values (9-10).
|
||||
// Empty string < "" > and forward slash < / > are the ony two valid arguments for delimeter.
|
||||
{"test-bucket-list-object", "", "", "*", 0, ListObjectsResult{}, fmt.Errorf("delimiter '%s' is not supported", "*"), false},
|
||||
{"test-bucket-list-object", "", "", "-", 0, ListObjectsResult{}, fmt.Errorf("delimiter '%s' is not supported", "-"), false},
|
||||
// Marker goes through url QueryUnescape, sending inputs for which QueryUnescape would fail (11-12).
|
||||
// Here is how QueryUnescape behaves https://golang.org/pkg/net/url/#QueryUnescape.
|
||||
// QueryUnescape is necessasry since marker is provided as URL query parameter.
|
||||
{"test-bucket-list-object", "", "test%", "", 0, ListObjectsResult{}, fmt.Errorf("invalid URL escape"), false},
|
||||
{"test-bucket-list-object", "", "test%A", "", 0, ListObjectsResult{}, fmt.Errorf("invalid URL escape"), false},
|
||||
// Testing for failure cases with both perfix and marker (13).
|
||||
// The prefix and marker combination to be valid it should satisy strings.HasPrefix(marker, prefix).
|
||||
{"test-bucket-list-object", "asia", "europe-object", "", 0, ListObjectsResult{}, fmt.Errorf("Invalid combination of marker '%s' and prefix '%s'", "europe-object", "asia"), false},
|
||||
// Setting a non-existing directory to be prefix (14-15).
|
||||
{"empty-bucket", "europe/france/", "", "", 1, ListObjectsResult{}, nil, true},
|
||||
{"empty-bucket", "europe/tunisia/", "", "", 1, ListObjectsResult{}, nil, true},
|
||||
// Testing on empty bucket, that is, bucket without any objects in it (16).
|
||||
{"empty-bucket", "", "", "", 0, ListObjectsResult{}, nil, true},
|
||||
// Setting maxKeys to negative value (17-18).
|
||||
{"empty-bucket", "", "", "", -1, ListObjectsResult{}, nil, true},
|
||||
{"empty-bucket", "", "", "", 1, ListObjectsResult{}, nil, true},
|
||||
// Setting maxKeys to a very large value (19).
|
||||
{"empty-bucket", "", "", "", 1111000000000000, ListObjectsResult{}, nil, true},
|
||||
// Testing for all 7 objects in the bucket (20).
|
||||
{"test-bucket-list-object", "", "", "", 9, resultCases[0], nil, true},
|
||||
//Testing for negative value of maxKey, this should set maxKeys to listObjectsLimit (21).
|
||||
{"test-bucket-list-object", "", "", "", -1, resultCases[0], nil, true},
|
||||
// Testing for very large value of maxKey, this should set maxKeys to listObjectsLimit (22).
|
||||
{"test-bucket-list-object", "", "", "", 1234567891011, resultCases[0], nil, true},
|
||||
// Testing for trancated value (23-26).
|
||||
{"test-bucket-list-object", "", "", "", 5, resultCases[1], nil, true},
|
||||
{"test-bucket-list-object", "", "", "", 4, resultCases[2], nil, true},
|
||||
{"test-bucket-list-object", "", "", "", 3, resultCases[3], nil, true},
|
||||
{"test-bucket-list-object", "", "", "", 1, resultCases[4], nil, true},
|
||||
// Testing with prefix (27-30).
|
||||
{"test-bucket-list-object", "new", "", "", 3, resultCases[5], nil, true},
|
||||
{"test-bucket-list-object", "new", "", "", 4, resultCases[5], nil, true},
|
||||
{"test-bucket-list-object", "new", "", "", 5, resultCases[5], nil, true},
|
||||
{"test-bucket-list-object", "obj", "", "", 3, resultCases[6], nil, true},
|
||||
// Testing with prefix and truncation (31-32).
|
||||
{"test-bucket-list-object", "new", "", "", 1, resultCases[7], nil, true},
|
||||
{"test-bucket-list-object", "obj", "", "", 2, resultCases[8], nil, true},
|
||||
// Testing with marker, but without prefix and truncation (33-37).
|
||||
{"test-bucket-list-object", "", "newPrefix0", "", 5, resultCases[9], nil, true},
|
||||
{"test-bucket-list-object", "", "newPrefix1", "", 4, resultCases[10], nil, true},
|
||||
{"test-bucket-list-object", "", "obj0", "", 2, resultCases[11], nil, true},
|
||||
{"test-bucket-list-object", "", "obj1", "", 1, resultCases[12], nil, true},
|
||||
{"test-bucket-list-object", "", "man", "", 10, resultCases[13], nil, true},
|
||||
// Marker being set to a value which is greater than and all object names when sorted (38).
|
||||
// Expected to send an empty response in this case.
|
||||
{"test-bucket-list-object", "", "zen", "", 10, ListObjectsResult{}, nil, true},
|
||||
// Marker being set to a value which is lesser than and all object names when sorted (39).
|
||||
// Expected to send all the objects in the bucket in this case.
|
||||
{"test-bucket-list-object", "", "Abc", "", 10, resultCases[14], nil, true},
|
||||
// Marker is to a hierarhical value (40-41).
|
||||
{"test-bucket-list-object", "", "Asia/India/India-summer-photos-1", "", 10, resultCases[15], nil, true},
|
||||
{"test-bucket-list-object", "", "Asia/India/Karnataka/Bangalore/Koramangala/pics", "", 10, resultCases[16], nil, true},
|
||||
// Testing with marker and truncation, but no prefix (42-44).
|
||||
{"test-bucket-list-object", "", "newPrefix0", "", 3, resultCases[17], nil, true},
|
||||
{"test-bucket-list-object", "", "newPrefix1", "", 1, resultCases[18], nil, true},
|
||||
{"test-bucket-list-object", "", "obj0", "", 1, resultCases[19], nil, true},
|
||||
// Testing with both marker and prefix, but without truncation (45-47).
|
||||
// The valid combination of marker and prefix should satisfy strings.HasPrefix(marker, prefix).
|
||||
{"test-bucket-list-object", "obj", "obj0", "", 2, resultCases[20], nil, true},
|
||||
{"test-bucket-list-object", "obj", "obj1", "", 1, resultCases[21], nil, true},
|
||||
{"test-bucket-list-object", "new", "newPrefix0", "", 2, resultCases[22], nil, true},
|
||||
// Testing with maxKeys set to 0 (48-54).
|
||||
// The parameters have to valid.
|
||||
{"test-bucket-list-object", "", "obj1", "", 0, ListObjectsResult{}, nil, true},
|
||||
{"test-bucket-list-object", "", "obj0", "", 0, ListObjectsResult{}, nil, true},
|
||||
{"test-bucket-list-object", "new", "", "", 0, ListObjectsResult{}, nil, true},
|
||||
{"test-bucket-list-object", "obj", "", "", 0, ListObjectsResult{}, nil, true},
|
||||
{"test-bucket-list-object", "obj", "obj0", "", 0, ListObjectsResult{}, nil, true},
|
||||
{"test-bucket-list-object", "obj", "obj1", "", 0, ListObjectsResult{}, nil, true},
|
||||
{"test-bucket-list-object", "new", "newPrefix0", "", 0, ListObjectsResult{}, nil, true},
|
||||
// Tests on hierarchical key names as prefix.
|
||||
// Without delimteter the code should recurse into the prefix Dir.
|
||||
// Tests with prefix, but without delimiter (55-56).
|
||||
{"test-bucket-list-object", "Asia/India/", "", "", 10, resultCases[23], nil, true},
|
||||
{"test-bucket-list-object", "Asia", "", "", 10, resultCases[24], nil, true},
|
||||
// Tests with prefix and delimiter (57-59).
|
||||
// With delimeter the code shouldnot recurse into the sub-directories of prefix Dir.
|
||||
{"test-bucket-list-object", "Asia", "", "/", 10, resultCases[25], nil, true},
|
||||
{"test-bucket-list-object", "new", "", "/", 10, resultCases[26], nil, true},
|
||||
{"test-bucket-list-object", "Asia/India/", "", "/", 10, resultCases[27], nil, true},
|
||||
// Test with marker set as hierarhical value and with delimiter. (60-61)
|
||||
{"test-bucket-list-object", "", "Asia/India/India-summer-photos-1", "/", 10, resultCases[28], nil, true},
|
||||
{"test-bucket-list-object", "", "Asia/India/Karnataka/Bangalore/Koramangala/pics", "/", 10, resultCases[29], nil, true},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
result, err := fs.ListObjects(testCase.bucketName, testCase.prefix, testCase.marker, testCase.delimeter, testCase.maxKeys)
|
||||
if err != nil && testCase.shouldPass {
|
||||
t.Errorf("Test %d: Expected to pass, but failed with: <ERROR> %s", i+1, err.Cause.Error())
|
||||
}
|
||||
if err == nil && !testCase.shouldPass {
|
||||
t.Errorf("Test %d: Expected to fail with <ERROR> \"%s\", but passed instead", i+1, testCase.err.Error())
|
||||
}
|
||||
// Failed as expected, but does it fail for the expected reason.
|
||||
if err != nil && !testCase.shouldPass {
|
||||
if !strings.Contains(err.Cause.Error(), testCase.err.Error()) {
|
||||
t.Errorf("Test %d: Expected to fail with error \"%s\", but instead failed with error \"%s\" instead", i+1, testCase.err.Error(), err.Cause.Error())
|
||||
}
|
||||
}
|
||||
// Since there are cases for which ListObjects fails, this is necessary.
|
||||
// Test passes as expected, but the output values are verified for correctness here.
|
||||
if err == nil && testCase.shouldPass {
|
||||
// The length of the expected ListObjectsResult.Objects should match in both expected result from test cases and in the output.
|
||||
// On failure calling t.Fatalf, otherwise it may lead to index out of range error in assertion following this.
|
||||
if len(testCase.result.Objects) != len(result.Objects) {
|
||||
t.Fatalf("Test %d: Expected number of object in the result to be '%d', but found '%d' objects instead", i+1, len(testCase.result.Objects), len(result.Objects))
|
||||
}
|
||||
for j := 0; j < len(testCase.result.Objects); j++ {
|
||||
if testCase.result.Objects[j].Name != result.Objects[j].Name {
|
||||
t.Errorf("Test %d: Expected object name to be \"%s\", but found \"%s\" instead", i+1, testCase.result.Objects[j].Name, result.Objects[j].Name)
|
||||
}
|
||||
}
|
||||
if testCase.result.IsTruncated != result.IsTruncated {
|
||||
t.Errorf("Test %d: Expected IsTruncated flag to be %v, but instead found it to be %v", i+1, testCase.result.IsTruncated, result.IsTruncated)
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func BenchmarkListObjects(b *testing.B) {
|
||||
// Make a temporary directory to use as the filesystem.
|
||||
directory, e := ioutil.TempDir("", "minio-list-benchmark")
|
||||
if e != nil {
|
||||
b.Fatal(e)
|
||||
}
|
||||
defer os.RemoveAll(directory)
|
||||
|
||||
// Create the filesystem.
|
||||
filesystem, err := New(directory)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
// Create a bucket.
|
||||
err = filesystem.MakeBucket("ls-benchmark-bucket")
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
for i := 0; i < 20000; i++ {
|
||||
key := "obj" + strconv.Itoa(i)
|
||||
_, err = filesystem.CreateObject("ls-benchmark-bucket", key, int64(len(key)), bytes.NewBufferString(key), nil)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
b.ResetTimer()
|
||||
|
||||
// List the buckets over and over and over.
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, err = filesystem.ListObjects("ls-benchmark-bucket", "", "obj9000", "", -1)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,183 +0,0 @@
|
||||
/*
|
||||
* Minio Cloud Storage, (C) 2015 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package fs
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/minio/minio/pkg/disk"
|
||||
"github.com/minio/minio/pkg/probe"
|
||||
)
|
||||
|
||||
/// Bucket Operations
|
||||
|
||||
// DeleteBucket - delete a bucket.
|
||||
func (fs Filesystem) DeleteBucket(bucket string) *probe.Error {
|
||||
// Verify bucket is valid.
|
||||
if !IsValidBucketName(bucket) {
|
||||
return probe.NewError(BucketNameInvalid{Bucket: bucket})
|
||||
}
|
||||
bucket = fs.denormalizeBucket(bucket)
|
||||
bucketDir := filepath.Join(fs.path, bucket)
|
||||
if e := os.Remove(bucketDir); e != nil {
|
||||
// Error if there was no bucket in the first place.
|
||||
if os.IsNotExist(e) {
|
||||
return probe.NewError(BucketNotFound{Bucket: bucket})
|
||||
}
|
||||
// On windows the string is slightly different, handle it here.
|
||||
if strings.Contains(e.Error(), "directory is not empty") {
|
||||
return probe.NewError(BucketNotEmpty{Bucket: bucket})
|
||||
}
|
||||
// Hopefully for all other operating systems, this is
|
||||
// assumed to be consistent.
|
||||
if strings.Contains(e.Error(), "directory not empty") {
|
||||
return probe.NewError(BucketNotEmpty{Bucket: bucket})
|
||||
}
|
||||
return probe.NewError(e)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// BucketInfo - name and create date
|
||||
type BucketInfo struct {
|
||||
Name string
|
||||
Created time.Time
|
||||
}
|
||||
|
||||
// ListBuckets - Get service.
|
||||
func (fs Filesystem) ListBuckets() ([]BucketInfo, *probe.Error) {
|
||||
files, e := ioutil.ReadDir(fs.path)
|
||||
if e != nil {
|
||||
return []BucketInfo{}, probe.NewError(e)
|
||||
}
|
||||
var metadataList []BucketInfo
|
||||
for _, file := range files {
|
||||
if !file.IsDir() {
|
||||
// If not directory, ignore all file types.
|
||||
continue
|
||||
}
|
||||
// If directories are found with odd names, skip them.
|
||||
dirName := strings.ToLower(file.Name())
|
||||
if !IsValidBucketName(dirName) {
|
||||
continue
|
||||
}
|
||||
metadata := BucketInfo{
|
||||
Name: dirName,
|
||||
Created: file.ModTime(),
|
||||
}
|
||||
metadataList = append(metadataList, metadata)
|
||||
}
|
||||
// Remove duplicated entries.
|
||||
metadataList = removeDuplicateBuckets(metadataList)
|
||||
return metadataList, nil
|
||||
}
|
||||
|
||||
// removeDuplicateBuckets - remove duplicate buckets.
|
||||
func removeDuplicateBuckets(buckets []BucketInfo) []BucketInfo {
|
||||
length := len(buckets) - 1
|
||||
for i := 0; i < length; i++ {
|
||||
for j := i + 1; j <= length; j++ {
|
||||
if buckets[i].Name == buckets[j].Name {
|
||||
if buckets[i].Created.Sub(buckets[j].Created) > 0 {
|
||||
buckets[i] = buckets[length]
|
||||
} else {
|
||||
buckets[j] = buckets[length]
|
||||
}
|
||||
buckets = buckets[0:length]
|
||||
length--
|
||||
j--
|
||||
}
|
||||
}
|
||||
}
|
||||
return buckets
|
||||
}
|
||||
|
||||
// MakeBucket - PUT Bucket
|
||||
func (fs Filesystem) MakeBucket(bucket string) *probe.Error {
|
||||
di, err := disk.GetInfo(fs.path)
|
||||
if err != nil {
|
||||
return probe.NewError(err)
|
||||
}
|
||||
|
||||
// Remove 5% from total space for cumulative disk space used for
|
||||
// journalling, inodes etc.
|
||||
availableDiskSpace := (float64(di.Free) / (float64(di.Total) - (0.05 * float64(di.Total)))) * 100
|
||||
if int64(availableDiskSpace) <= fs.minFreeDisk {
|
||||
return probe.NewError(RootPathFull{Path: fs.path})
|
||||
}
|
||||
|
||||
// Verify if bucket is valid.
|
||||
if !IsValidBucketName(bucket) {
|
||||
return probe.NewError(BucketNameInvalid{Bucket: bucket})
|
||||
}
|
||||
|
||||
bucket = fs.denormalizeBucket(bucket)
|
||||
bucketDir := filepath.Join(fs.path, bucket)
|
||||
if _, e := os.Stat(bucketDir); e == nil {
|
||||
return probe.NewError(BucketExists{Bucket: bucket})
|
||||
}
|
||||
|
||||
// Make bucket.
|
||||
if e := os.Mkdir(bucketDir, 0700); e != nil {
|
||||
return probe.NewError(err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// denormalizeBucket - will convert incoming bucket names to
|
||||
// corresponding valid bucketnames on the backend in a platform
|
||||
// compatible way for all operating systems.
|
||||
func (fs Filesystem) denormalizeBucket(bucket string) string {
|
||||
buckets, e := ioutil.ReadDir(fs.path)
|
||||
if e != nil {
|
||||
return bucket
|
||||
}
|
||||
for _, b := range buckets {
|
||||
// Verify if lowercase version of the bucket is equal to the
|
||||
// incoming bucket, then use the proper name.
|
||||
if strings.ToLower(b.Name()) == bucket {
|
||||
return b.Name()
|
||||
}
|
||||
}
|
||||
return bucket
|
||||
}
|
||||
|
||||
// GetBucketInfo - get bucket metadata.
|
||||
func (fs Filesystem) GetBucketInfo(bucket string) (BucketInfo, *probe.Error) {
|
||||
if !IsValidBucketName(bucket) {
|
||||
return BucketInfo{}, probe.NewError(BucketNameInvalid{Bucket: bucket})
|
||||
}
|
||||
bucket = fs.denormalizeBucket(bucket)
|
||||
// Get bucket path.
|
||||
bucketDir := filepath.Join(fs.path, bucket)
|
||||
fi, e := os.Stat(bucketDir)
|
||||
if e != nil {
|
||||
// Check if bucket exists.
|
||||
if os.IsNotExist(e) {
|
||||
return BucketInfo{}, probe.NewError(BucketNotFound{Bucket: bucket})
|
||||
}
|
||||
return BucketInfo{}, probe.NewError(e)
|
||||
}
|
||||
bucketMetadata := BucketInfo{}
|
||||
bucketMetadata.Name = fi.Name()
|
||||
bucketMetadata.Created = fi.ModTime()
|
||||
return bucketMetadata, nil
|
||||
}
|
||||
@@ -1,256 +0,0 @@
|
||||
/*
|
||||
* Minio Cloud Storage, (C) 2016 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package fs
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// The test not just includes asserting the correctness of the output,
|
||||
// But also includes test cases for which the function should fail.
|
||||
// For those cases for which it fails, its also asserted whether the function fails as expected.
|
||||
func TestGetBucketInfo(t *testing.T) {
|
||||
// Make a temporary directory to use as the filesystem.
|
||||
directory, e := ioutil.TempDir("", "minio-metadata-test")
|
||||
if e != nil {
|
||||
t.Fatal(e)
|
||||
}
|
||||
defer os.RemoveAll(directory)
|
||||
|
||||
// Create the filesystem.
|
||||
filesystem, err := New(directory)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Creating few buckets.
|
||||
for i := 0; i < 4; i++ {
|
||||
err = filesystem.MakeBucket("meta-test-bucket." + strconv.Itoa(i))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
testCases := []struct {
|
||||
bucketName string
|
||||
metaData BucketInfo
|
||||
e error
|
||||
shouldPass bool
|
||||
}{
|
||||
// Test cases with invalid bucket names.
|
||||
{".test", BucketInfo{}, BucketNameInvalid{Bucket: ".test"}, false},
|
||||
{"Test", BucketInfo{}, BucketNameInvalid{Bucket: "Test"}, false},
|
||||
{"---", BucketInfo{}, BucketNameInvalid{Bucket: "---"}, false},
|
||||
{"ad", BucketInfo{}, BucketNameInvalid{Bucket: "ad"}, false},
|
||||
// Test cases with non-existent buckets.
|
||||
{"volatile-bucket-1", BucketInfo{}, BucketNotFound{Bucket: "volatile-bucket-1"}, false},
|
||||
{"volatile-bucket-2", BucketInfo{}, BucketNotFound{Bucket: "volatile-bucket-2"}, false},
|
||||
// Test cases with existing buckets.
|
||||
{"meta-test-bucket.0", BucketInfo{Name: "meta-test-bucket.0"}, nil, true},
|
||||
{"meta-test-bucket.1", BucketInfo{Name: "meta-test-bucket.1"}, nil, true},
|
||||
{"meta-test-bucket.2", BucketInfo{Name: "meta-test-bucket.2"}, nil, true},
|
||||
{"meta-test-bucket.3", BucketInfo{Name: "meta-test-bucket.3"}, nil, true},
|
||||
}
|
||||
for i, testCase := range testCases {
|
||||
// The err returned is of type *probe.Error.
|
||||
bucketInfo, err := filesystem.GetBucketInfo(testCase.bucketName)
|
||||
|
||||
if err != nil && testCase.shouldPass {
|
||||
t.Errorf("Test %d: Expected to pass, but failed with: <ERROR> %s", i+1, err.Cause.Error())
|
||||
}
|
||||
if err == nil && !testCase.shouldPass {
|
||||
t.Errorf("Test %d: Expected to fail with <ERROR> \"%s\", but passed instead", i+1, testCase.e.Error())
|
||||
|
||||
}
|
||||
// Failed as expected, but does it fail for the expected reason.
|
||||
if err != nil && !testCase.shouldPass {
|
||||
if testCase.e.Error() != err.Cause.Error() {
|
||||
t.Errorf("Test %d: Expected to fail with error \"%s\", but instead failed with error \"%s\" instead", i+1, testCase.e.Error(), err.Cause.Error())
|
||||
}
|
||||
}
|
||||
// Since there are cases for which GetBucketInfo fails, this is necessary.
|
||||
// Test passes as expected, but the output values are verified for correctness here.
|
||||
if err == nil && testCase.shouldPass {
|
||||
if testCase.bucketName != bucketInfo.Name {
|
||||
t.Errorf("Test %d: Expected the bucket name to be \"%s\", but found \"%s\" instead", i+1, testCase.bucketName, bucketInfo.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestListBuckets(t *testing.T) {
|
||||
// Make a temporary directory to use as the filesystem.
|
||||
directory, e := ioutil.TempDir("", "minio-benchmark")
|
||||
if e != nil {
|
||||
t.Fatal(e)
|
||||
}
|
||||
defer os.RemoveAll(directory)
|
||||
|
||||
// Create the filesystem.
|
||||
filesystem, err := New(directory)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Create a few buckets.
|
||||
for i := 0; i < 10; i++ {
|
||||
err = filesystem.MakeBucket("testbucket." + strconv.Itoa(i))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// List, and ensure that they are all there.
|
||||
metadatas, err := filesystem.ListBuckets()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if len(metadatas) != 10 {
|
||||
t.Errorf("incorrect length of metadatas (%d)\n", len(metadatas))
|
||||
}
|
||||
|
||||
// Iterate over the buckets, ensuring that the name is correct.
|
||||
for i := 0; i < len(metadatas); i++ {
|
||||
if !strings.Contains(metadatas[i].Name, "testbucket") {
|
||||
t.Fail()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestDeleteBucket(t *testing.T) {
|
||||
// Make a temporary directory to use as the filesystem.
|
||||
directory, e := ioutil.TempDir("", "minio-benchmark")
|
||||
if e != nil {
|
||||
t.Fatal(e)
|
||||
}
|
||||
defer os.RemoveAll(directory)
|
||||
|
||||
// Create the filesystem.
|
||||
filesystem, err := New(directory)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Deleting a bucket that doesn't exist should error.
|
||||
err = filesystem.DeleteBucket("bucket")
|
||||
if !strings.Contains(err.Cause.Error(), "Bucket not found:") {
|
||||
t.Fail()
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkListBuckets(b *testing.B) {
|
||||
// Make a temporary directory to use as the filesystem.
|
||||
directory, e := ioutil.TempDir("", "minio-benchmark")
|
||||
if e != nil {
|
||||
b.Fatal(e)
|
||||
}
|
||||
defer os.RemoveAll(directory)
|
||||
|
||||
// Create the filesystem.
|
||||
filesystem, err := New(directory)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
// Create a few buckets.
|
||||
for i := 0; i < 20; i++ {
|
||||
err = filesystem.MakeBucket("bucket." + strconv.Itoa(i))
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
b.ResetTimer()
|
||||
|
||||
// List the buckets over and over and over.
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, err = filesystem.ListBuckets()
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkDeleteBucket(b *testing.B) {
|
||||
// Make a temporary directory to use as the filesystem.
|
||||
directory, e := ioutil.TempDir("", "minio-benchmark")
|
||||
if e != nil {
|
||||
b.Fatal(e)
|
||||
}
|
||||
defer os.RemoveAll(directory)
|
||||
|
||||
// Create the filesystem.
|
||||
filesystem, err := New(directory)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
b.ResetTimer()
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
// Creating buckets takes time, so stop and start the timer.
|
||||
b.StopTimer()
|
||||
|
||||
// Create and delete the bucket over and over.
|
||||
err = filesystem.MakeBucket("bucket")
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
b.StartTimer()
|
||||
|
||||
err = filesystem.DeleteBucket("bucket")
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkGetBucketInfo(b *testing.B) {
|
||||
// Make a temporary directory to use as the filesystem.
|
||||
directory, e := ioutil.TempDir("", "minio-benchmark")
|
||||
if e != nil {
|
||||
b.Fatal(e)
|
||||
}
|
||||
defer os.RemoveAll(directory)
|
||||
|
||||
// Create the filesystem.
|
||||
filesystem, err := New(directory)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
// Put up a bucket with some metadata.
|
||||
err = filesystem.MakeBucket("bucket")
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
b.ResetTimer()
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
// Retrieve the metadata!
|
||||
_, err := filesystem.GetBucketInfo("bucket")
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,91 +0,0 @@
|
||||
/*
|
||||
* Minio Cloud Storage, (C) 2015, 2016 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package fs
|
||||
|
||||
import "time"
|
||||
|
||||
// PartMetadata - various types of individual part resources
|
||||
type PartMetadata struct {
|
||||
PartNumber int
|
||||
LastModified time.Time
|
||||
ETag string
|
||||
Size int64
|
||||
}
|
||||
|
||||
// ObjectResourcesMetadata - various types of object resources
|
||||
type ObjectResourcesMetadata struct {
|
||||
Bucket string
|
||||
Object string
|
||||
UploadID string
|
||||
StorageClass string
|
||||
PartNumberMarker int
|
||||
NextPartNumberMarker int
|
||||
MaxParts int
|
||||
IsTruncated bool
|
||||
|
||||
Part []PartMetadata
|
||||
EncodingType string
|
||||
}
|
||||
|
||||
// UploadMetadata container capturing metadata on in progress multipart upload in a given bucket
|
||||
type UploadMetadata struct {
|
||||
Object string
|
||||
UploadID string
|
||||
StorageClass string
|
||||
Initiated time.Time
|
||||
}
|
||||
|
||||
// BucketMultipartResourcesMetadata - various types of bucket resources for inprogress multipart uploads
|
||||
type BucketMultipartResourcesMetadata struct {
|
||||
KeyMarker string
|
||||
UploadIDMarker string
|
||||
NextKeyMarker string
|
||||
NextUploadIDMarker string
|
||||
EncodingType string
|
||||
MaxUploads int
|
||||
IsTruncated bool
|
||||
Upload []*UploadMetadata
|
||||
Prefix string
|
||||
Delimiter string
|
||||
CommonPrefixes []string
|
||||
}
|
||||
|
||||
// ListObjectsResult - container for list object request results.
|
||||
type ListObjectsResult struct {
|
||||
IsTruncated bool
|
||||
NextMarker string
|
||||
Objects []ObjectInfo
|
||||
Prefixes []string
|
||||
}
|
||||
|
||||
// CompletePart - completed part container
|
||||
type CompletePart struct {
|
||||
PartNumber int
|
||||
ETag string
|
||||
}
|
||||
|
||||
// completedParts is a sortable interface for Part slice
|
||||
type completedParts []CompletePart
|
||||
|
||||
func (a completedParts) Len() int { return len(a) }
|
||||
func (a completedParts) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||
func (a completedParts) Less(i, j int) bool { return a[i].PartNumber < a[j].PartNumber }
|
||||
|
||||
// CompleteMultipartUpload container for completing multipart upload
|
||||
type CompleteMultipartUpload struct {
|
||||
Part []CompletePart
|
||||
}
|
||||
@@ -1,295 +0,0 @@
|
||||
/*
|
||||
* Minio Cloud Storage, (C) 2015 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package fs
|
||||
|
||||
import "fmt"
|
||||
|
||||
// InvalidArgument invalid argument
|
||||
type InvalidArgument struct{}
|
||||
|
||||
func (e InvalidArgument) Error() string {
|
||||
return "Invalid argument"
|
||||
}
|
||||
|
||||
// UnsupportedFilesystem unsupported filesystem type
|
||||
type UnsupportedFilesystem struct {
|
||||
Type string
|
||||
}
|
||||
|
||||
func (e UnsupportedFilesystem) Error() string {
|
||||
return "Unsupported filesystem: " + e.Type
|
||||
}
|
||||
|
||||
// RootPathFull root path out of space
|
||||
type RootPathFull struct {
|
||||
Path string
|
||||
}
|
||||
|
||||
func (e RootPathFull) Error() string {
|
||||
return "Root path " + e.Path + " reached its minimum free disk threshold."
|
||||
}
|
||||
|
||||
// BucketNotFound bucket does not exist
|
||||
type BucketNotFound struct {
|
||||
Bucket string
|
||||
}
|
||||
|
||||
func (e BucketNotFound) Error() string {
|
||||
return "Bucket not found: " + e.Bucket
|
||||
}
|
||||
|
||||
// BucketNotEmpty bucket is not empty
|
||||
type BucketNotEmpty struct {
|
||||
Bucket string
|
||||
}
|
||||
|
||||
func (e BucketNotEmpty) Error() string {
|
||||
return "Bucket not empty: " + e.Bucket
|
||||
}
|
||||
|
||||
// ObjectNotFound object does not exist
|
||||
type ObjectNotFound struct {
|
||||
Bucket string
|
||||
Object string
|
||||
}
|
||||
|
||||
func (e ObjectNotFound) Error() string {
|
||||
return "Object not found: " + e.Bucket + "#" + e.Object
|
||||
}
|
||||
|
||||
// ObjectExistsAsPrefix object already exists with a requested prefix.
|
||||
type ObjectExistsAsPrefix struct {
|
||||
Bucket string
|
||||
Prefix string
|
||||
}
|
||||
|
||||
func (e ObjectExistsAsPrefix) Error() string {
|
||||
return "Object exists on : " + e.Bucket + " as prefix " + e.Prefix
|
||||
}
|
||||
|
||||
// ObjectCorrupted object found to be corrupted
|
||||
type ObjectCorrupted struct {
|
||||
Object string
|
||||
}
|
||||
|
||||
func (e ObjectCorrupted) Error() string {
|
||||
return "Object found corrupted: " + e.Object
|
||||
}
|
||||
|
||||
// BucketExists bucket exists
|
||||
type BucketExists struct {
|
||||
Bucket string
|
||||
}
|
||||
|
||||
func (e BucketExists) Error() string {
|
||||
return "Bucket exists: " + e.Bucket
|
||||
}
|
||||
|
||||
// CorruptedBackend backend found to be corrupted
|
||||
type CorruptedBackend struct {
|
||||
Backend string
|
||||
}
|
||||
|
||||
func (e CorruptedBackend) Error() string {
|
||||
return "Corrupted backend: " + e.Backend
|
||||
}
|
||||
|
||||
// NotImplemented function not implemented
|
||||
type NotImplemented struct {
|
||||
Function string
|
||||
}
|
||||
|
||||
func (e NotImplemented) Error() string {
|
||||
return "Not implemented: " + e.Function
|
||||
}
|
||||
|
||||
// InvalidDisksArgument invalid number of disks per node
|
||||
type InvalidDisksArgument struct{}
|
||||
|
||||
func (e InvalidDisksArgument) Error() string {
|
||||
return "Invalid number of disks per node"
|
||||
}
|
||||
|
||||
// BadDigest - Content-MD5 you specified did not match what we received.
|
||||
type BadDigest struct {
|
||||
ExpectedMD5 string
|
||||
CalculatedMD5 string
|
||||
}
|
||||
|
||||
func (e BadDigest) Error() string {
|
||||
return "Bad digest expected " + e.ExpectedMD5 + " is not valid with what we calculated " + e.CalculatedMD5
|
||||
}
|
||||
|
||||
// InternalError - generic internal error
|
||||
type InternalError struct{}
|
||||
|
||||
// BackendError - generic disk backend error
|
||||
type BackendError struct {
|
||||
Path string
|
||||
}
|
||||
|
||||
// BackendCorrupted - path has corrupted data
|
||||
type BackendCorrupted BackendError
|
||||
|
||||
// APINotImplemented - generic API not implemented error
|
||||
type APINotImplemented struct {
|
||||
API string
|
||||
}
|
||||
|
||||
// GenericBucketError - generic bucket error
|
||||
type GenericBucketError struct {
|
||||
Bucket string
|
||||
}
|
||||
|
||||
// BucketPolicyNotFound - no bucket policy found.
|
||||
type BucketPolicyNotFound GenericBucketError
|
||||
|
||||
func (e BucketPolicyNotFound) Error() string {
|
||||
return "No bucket policy found for bucket: " + e.Bucket
|
||||
}
|
||||
|
||||
// GenericObjectError - generic object error
|
||||
type GenericObjectError struct {
|
||||
Bucket string
|
||||
Object string
|
||||
}
|
||||
|
||||
// ImplementationError - generic implementation error
|
||||
type ImplementationError struct {
|
||||
Bucket string
|
||||
Object string
|
||||
Err error
|
||||
}
|
||||
|
||||
/// Bucket related errors
|
||||
|
||||
// BucketNameInvalid - bucketname provided is invalid
|
||||
type BucketNameInvalid GenericBucketError
|
||||
|
||||
/// Object related errors
|
||||
|
||||
// ObjectNameInvalid - object name provided is invalid
|
||||
type ObjectNameInvalid GenericObjectError
|
||||
|
||||
// Return string an error formatted as the given text
|
||||
func (e ImplementationError) Error() string {
|
||||
error := ""
|
||||
if e.Bucket != "" {
|
||||
error = error + "Bucket: " + e.Bucket + " "
|
||||
}
|
||||
if e.Object != "" {
|
||||
error = error + "Object: " + e.Object + " "
|
||||
}
|
||||
error = error + "Error: " + e.Err.Error()
|
||||
return error
|
||||
}
|
||||
|
||||
// EmbedError - wrapper function for error object
|
||||
func EmbedError(bucket, object string, err error) ImplementationError {
|
||||
return ImplementationError{
|
||||
Bucket: bucket,
|
||||
Object: object,
|
||||
Err: err,
|
||||
}
|
||||
}
|
||||
|
||||
// Return string an error formatted as the given text
|
||||
func (e InternalError) Error() string {
|
||||
return "Internal error occured"
|
||||
}
|
||||
|
||||
// Return string an error formatted as the given text
|
||||
func (e APINotImplemented) Error() string {
|
||||
return "Api not implemented: " + e.API
|
||||
}
|
||||
|
||||
// Return string an error formatted as the given text
|
||||
func (e BucketNameInvalid) Error() string {
|
||||
return "Bucket name invalid: " + e.Bucket
|
||||
}
|
||||
|
||||
// Return string an error formatted as the given text
|
||||
func (e ObjectNameInvalid) Error() string {
|
||||
return "Object name invalid: " + e.Bucket + "#" + e.Object
|
||||
}
|
||||
|
||||
// IncompleteBody You did not provide the number of bytes specified by the Content-Length HTTP header
|
||||
type IncompleteBody GenericObjectError
|
||||
|
||||
// Return string an error formatted as the given text
|
||||
func (e IncompleteBody) Error() string {
|
||||
return e.Bucket + "#" + e.Object + "has incomplete body"
|
||||
}
|
||||
|
||||
// Return string an error formatted as the given text
|
||||
func (e BackendCorrupted) Error() string {
|
||||
return "Backend corrupted: " + e.Path
|
||||
}
|
||||
|
||||
// OperationNotPermitted - operation not permitted
|
||||
type OperationNotPermitted struct {
|
||||
Op string
|
||||
Reason string
|
||||
}
|
||||
|
||||
func (e OperationNotPermitted) Error() string {
|
||||
return "Operation " + e.Op + " not permitted for reason: " + e.Reason
|
||||
}
|
||||
|
||||
// InvalidRange - invalid range
|
||||
type InvalidRange struct {
|
||||
Start int64
|
||||
Length int64
|
||||
}
|
||||
|
||||
func (e InvalidRange) Error() string {
|
||||
return fmt.Sprintf("Invalid range start:%d length:%d", e.Start, e.Length)
|
||||
}
|
||||
|
||||
/// Multipart related errors
|
||||
|
||||
// InvalidUploadID invalid upload id
|
||||
type InvalidUploadID struct {
|
||||
UploadID string
|
||||
}
|
||||
|
||||
func (e InvalidUploadID) Error() string {
|
||||
return "Invalid upload id " + e.UploadID
|
||||
}
|
||||
|
||||
// InvalidPart One or more of the specified parts could not be found
|
||||
type InvalidPart struct{}
|
||||
|
||||
func (e InvalidPart) Error() string {
|
||||
return "One or more of the specified parts could not be found"
|
||||
}
|
||||
|
||||
// InvalidPartOrder parts are not ordered as Requested
|
||||
type InvalidPartOrder struct {
|
||||
UploadID string
|
||||
}
|
||||
|
||||
func (e InvalidPartOrder) Error() string {
|
||||
return "Invalid part order sent for " + e.UploadID
|
||||
}
|
||||
|
||||
// MalformedXML invalid xml format
|
||||
type MalformedXML struct{}
|
||||
|
||||
func (e MalformedXML) Error() string {
|
||||
return "Malformed XML"
|
||||
}
|
||||
@@ -1,666 +0,0 @@
|
||||
/*
|
||||
* Minio Cloud Storage, (C) 2015 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package fs
|
||||
|
||||
import (
|
||||
"crypto/md5"
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"encoding/xml"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math/rand"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/minio/minio/pkg/atomic"
|
||||
"github.com/minio/minio/pkg/crypto/sha512"
|
||||
"github.com/minio/minio/pkg/disk"
|
||||
"github.com/minio/minio/pkg/mimedb"
|
||||
"github.com/minio/minio/pkg/probe"
|
||||
)
|
||||
|
||||
// isValidUploadID - is upload id.
|
||||
func (fs Filesystem) isValidUploadID(object, uploadID string) (ok bool) {
|
||||
fs.rwLock.RLock()
|
||||
defer fs.rwLock.RUnlock()
|
||||
_, ok = fs.multiparts.ActiveSession[uploadID]
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// byObjectInfoKey is a sortable interface for UploadMetadata slice
|
||||
type byUploadMetadataKey []*UploadMetadata
|
||||
|
||||
func (b byUploadMetadataKey) Len() int { return len(b) }
|
||||
func (b byUploadMetadataKey) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
|
||||
func (b byUploadMetadataKey) Less(i, j int) bool { return b[i].Object < b[j].Object }
|
||||
|
||||
// ListMultipartUploads - list incomplete multipart sessions for a given BucketMultipartResourcesMetadata
|
||||
func (fs Filesystem) ListMultipartUploads(bucket string, resources BucketMultipartResourcesMetadata) (BucketMultipartResourcesMetadata, *probe.Error) {
|
||||
// Input validation.
|
||||
if !IsValidBucketName(bucket) {
|
||||
return BucketMultipartResourcesMetadata{}, probe.NewError(BucketNameInvalid{Bucket: bucket})
|
||||
}
|
||||
bucket = fs.denormalizeBucket(bucket)
|
||||
bucketPath := filepath.Join(fs.path, bucket)
|
||||
if _, e := os.Stat(bucketPath); e != nil {
|
||||
// Check bucket exists.
|
||||
if os.IsNotExist(e) {
|
||||
return BucketMultipartResourcesMetadata{}, probe.NewError(BucketNotFound{Bucket: bucket})
|
||||
}
|
||||
return BucketMultipartResourcesMetadata{}, probe.NewError(e)
|
||||
}
|
||||
var uploads []*UploadMetadata
|
||||
fs.rwLock.RLock()
|
||||
defer fs.rwLock.RUnlock()
|
||||
for uploadID, session := range fs.multiparts.ActiveSession {
|
||||
objectName := session.ObjectName
|
||||
if strings.HasPrefix(objectName, resources.Prefix) {
|
||||
if len(uploads) > resources.MaxUploads {
|
||||
sort.Sort(byUploadMetadataKey(uploads))
|
||||
resources.Upload = uploads
|
||||
resources.NextKeyMarker = session.ObjectName
|
||||
resources.NextUploadIDMarker = uploadID
|
||||
resources.IsTruncated = true
|
||||
return resources, nil
|
||||
}
|
||||
// UploadIDMarker is ignored if KeyMarker is empty.
|
||||
switch {
|
||||
case resources.KeyMarker != "" && resources.UploadIDMarker == "":
|
||||
if objectName > resources.KeyMarker {
|
||||
upload := new(UploadMetadata)
|
||||
upload.Object = objectName
|
||||
upload.UploadID = uploadID
|
||||
upload.Initiated = session.Initiated
|
||||
uploads = append(uploads, upload)
|
||||
}
|
||||
case resources.KeyMarker != "" && resources.UploadIDMarker != "":
|
||||
if session.UploadID > resources.UploadIDMarker {
|
||||
if objectName >= resources.KeyMarker {
|
||||
upload := new(UploadMetadata)
|
||||
upload.Object = objectName
|
||||
upload.UploadID = uploadID
|
||||
upload.Initiated = session.Initiated
|
||||
uploads = append(uploads, upload)
|
||||
}
|
||||
}
|
||||
default:
|
||||
upload := new(UploadMetadata)
|
||||
upload.Object = objectName
|
||||
upload.UploadID = uploadID
|
||||
upload.Initiated = session.Initiated
|
||||
uploads = append(uploads, upload)
|
||||
}
|
||||
}
|
||||
}
|
||||
sort.Sort(byUploadMetadataKey(uploads))
|
||||
resources.Upload = uploads
|
||||
return resources, nil
|
||||
}
|
||||
|
||||
// verify if parts sent over the network do really match with what we
|
||||
// have for the session.
|
||||
func doPartsMatch(parts []CompletePart, savedParts []PartMetadata) bool {
|
||||
if parts == nil || savedParts == nil {
|
||||
return false
|
||||
}
|
||||
if len(parts) != len(savedParts) {
|
||||
return false
|
||||
}
|
||||
// Range of incoming parts and compare them with saved parts.
|
||||
for i, part := range parts {
|
||||
if strings.Trim(part.ETag, "\"") != savedParts[i].ETag {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Create an s3 compatible MD5sum for complete multipart transaction.
|
||||
func makeS3MD5(md5Strs ...string) (string, *probe.Error) {
|
||||
var finalMD5Bytes []byte
|
||||
for _, md5Str := range md5Strs {
|
||||
md5Bytes, e := hex.DecodeString(md5Str)
|
||||
if e != nil {
|
||||
return "", probe.NewError(e)
|
||||
}
|
||||
finalMD5Bytes = append(finalMD5Bytes, md5Bytes...)
|
||||
}
|
||||
md5Hasher := md5.New()
|
||||
md5Hasher.Write(finalMD5Bytes)
|
||||
s3MD5 := fmt.Sprintf("%s-%d", hex.EncodeToString(md5Hasher.Sum(nil)), len(md5Strs))
|
||||
return s3MD5, nil
|
||||
}
|
||||
|
||||
type multiCloser struct {
|
||||
Closers []io.Closer
|
||||
}
|
||||
|
||||
func (m multiCloser) Close() error {
|
||||
for _, c := range m.Closers {
|
||||
if e := c.Close(); e != nil {
|
||||
return e
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// MultiCloser - returns a Closer that's the logical
|
||||
// concatenation of the provided input closers. They're closed
|
||||
// sequentially. If any of the closers return a non-nil error, Close
|
||||
// will return that error.
|
||||
func MultiCloser(closers ...io.Closer) io.Closer {
|
||||
return multiCloser{closers}
|
||||
}
|
||||
|
||||
// removeParts - remove all parts.
|
||||
func removeParts(partPathPrefix string, parts []PartMetadata) *probe.Error {
|
||||
for _, part := range parts {
|
||||
// We are on purpose ignoring the return values here, since
|
||||
// another thread would have purged these entries.
|
||||
os.Remove(partPathPrefix + part.ETag + fmt.Sprintf("$%d-$multiparts", part.PartNumber))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// saveParts - concantenate and save all parts.
|
||||
func saveParts(partPathPrefix string, mw io.Writer, parts []CompletePart) *probe.Error {
|
||||
var partReaders []io.Reader
|
||||
var partClosers []io.Closer
|
||||
for _, part := range parts {
|
||||
// Trim prefix
|
||||
md5Sum := strings.TrimPrefix(part.ETag, "\"")
|
||||
// Trim suffix
|
||||
md5Sum = strings.TrimSuffix(md5Sum, "\"")
|
||||
partFile, e := os.OpenFile(partPathPrefix+md5Sum+fmt.Sprintf("$%d-$multiparts", part.PartNumber), os.O_RDONLY, 0600)
|
||||
if e != nil {
|
||||
if !os.IsNotExist(e) {
|
||||
return probe.NewError(e)
|
||||
}
|
||||
// Some clients do not set Content-Md5, so we would have
|
||||
// created part files without 'ETag' in them.
|
||||
partFile, e = os.OpenFile(partPathPrefix+fmt.Sprintf("$%d-$multiparts", part.PartNumber), os.O_RDONLY, 0600)
|
||||
if e != nil {
|
||||
return probe.NewError(e)
|
||||
}
|
||||
}
|
||||
partReaders = append(partReaders, partFile)
|
||||
partClosers = append(partClosers, partFile)
|
||||
}
|
||||
// Concatenate a list of closers and close upon return.
|
||||
closer := MultiCloser(partClosers...)
|
||||
defer closer.Close()
|
||||
|
||||
reader := io.MultiReader(partReaders...)
|
||||
readBufferSize := 8 * 1024 * 1024 // 8MiB
|
||||
readBuffer := make([]byte, readBufferSize) // Allocate 8MiB buffer.
|
||||
if _, e := io.CopyBuffer(mw, reader, readBuffer); e != nil {
|
||||
return probe.NewError(e)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewMultipartUpload - initiate a new multipart session
|
||||
func (fs Filesystem) NewMultipartUpload(bucket, object string) (string, *probe.Error) {
|
||||
di, e := disk.GetInfo(fs.path)
|
||||
if e != nil {
|
||||
return "", probe.NewError(e)
|
||||
}
|
||||
|
||||
// Remove 5% from total space for cumulative disk space used for
|
||||
// journalling, inodes etc.
|
||||
availableDiskSpace := (float64(di.Free) / (float64(di.Total) - (0.05 * float64(di.Total)))) * 100
|
||||
if int64(availableDiskSpace) <= fs.minFreeDisk {
|
||||
return "", probe.NewError(RootPathFull{Path: fs.path})
|
||||
}
|
||||
|
||||
// Input validation.
|
||||
if !IsValidBucketName(bucket) {
|
||||
return "", probe.NewError(BucketNameInvalid{Bucket: bucket})
|
||||
}
|
||||
if !IsValidObjectName(object) {
|
||||
return "", probe.NewError(ObjectNameInvalid{Object: object})
|
||||
}
|
||||
|
||||
bucket = fs.denormalizeBucket(bucket)
|
||||
bucketPath := filepath.Join(fs.path, bucket)
|
||||
if _, e = os.Stat(bucketPath); e != nil {
|
||||
// Check bucket exists.
|
||||
if os.IsNotExist(e) {
|
||||
return "", probe.NewError(BucketNotFound{Bucket: bucket})
|
||||
}
|
||||
return "", probe.NewError(e)
|
||||
}
|
||||
|
||||
objectPath := filepath.Join(bucketPath, object)
|
||||
objectDir := filepath.Dir(objectPath)
|
||||
if _, e = os.Stat(objectDir); e != nil {
|
||||
if !os.IsNotExist(e) {
|
||||
return "", probe.NewError(e)
|
||||
}
|
||||
e = os.MkdirAll(objectDir, 0700)
|
||||
if e != nil {
|
||||
return "", probe.NewError(e)
|
||||
}
|
||||
}
|
||||
|
||||
// Generate new upload id.
|
||||
id := []byte(strconv.FormatInt(rand.Int63(), 10) + bucket + object + time.Now().String())
|
||||
uploadIDSum := sha512.Sum512(id)
|
||||
uploadID := base64.URLEncoding.EncodeToString(uploadIDSum[:])[:47]
|
||||
|
||||
// Critical region requiring write lock.
|
||||
fs.rwLock.Lock()
|
||||
defer fs.rwLock.Unlock()
|
||||
// Initialize multipart session.
|
||||
mpartSession := &MultipartSession{}
|
||||
mpartSession.TotalParts = 0
|
||||
mpartSession.ObjectName = object
|
||||
mpartSession.UploadID = uploadID
|
||||
mpartSession.Initiated = time.Now().UTC()
|
||||
// Multipart has maximum of 10000 parts.
|
||||
var parts []PartMetadata
|
||||
mpartSession.Parts = parts
|
||||
|
||||
fs.multiparts.ActiveSession[uploadID] = mpartSession
|
||||
if err := saveMultipartsSession(*fs.multiparts); err != nil {
|
||||
return "", err.Trace(objectPath)
|
||||
}
|
||||
return uploadID, nil
|
||||
}
|
||||
|
||||
// Remove all duplicated parts based on the latest time of their upload.
|
||||
func removeDuplicateParts(parts []PartMetadata) []PartMetadata {
|
||||
length := len(parts) - 1
|
||||
for i := 0; i < length; i++ {
|
||||
for j := i + 1; j <= length; j++ {
|
||||
if parts[i].PartNumber == parts[j].PartNumber {
|
||||
if parts[i].LastModified.Sub(parts[j].LastModified) > 0 {
|
||||
parts[i] = parts[length]
|
||||
} else {
|
||||
parts[j] = parts[length]
|
||||
}
|
||||
parts = parts[0:length]
|
||||
length--
|
||||
j--
|
||||
}
|
||||
}
|
||||
}
|
||||
return parts
|
||||
}
|
||||
|
||||
// partNumber is a sortable interface for Part slice.
|
||||
type partNumber []PartMetadata
|
||||
|
||||
func (a partNumber) Len() int { return len(a) }
|
||||
func (a partNumber) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||
func (a partNumber) Less(i, j int) bool { return a[i].PartNumber < a[j].PartNumber }
|
||||
|
||||
// CreateObjectPart - create a part in a multipart session
|
||||
func (fs Filesystem) CreateObjectPart(bucket, object, uploadID string, partID int, size int64, data io.Reader, md5Bytes []byte) (string, *probe.Error) {
|
||||
di, err := disk.GetInfo(fs.path)
|
||||
if err != nil {
|
||||
return "", probe.NewError(err)
|
||||
}
|
||||
|
||||
// Remove 5% from total space for cumulative disk space used for
|
||||
// journalling, inodes etc.
|
||||
availableDiskSpace := (float64(di.Free) / (float64(di.Total) - (0.05 * float64(di.Total)))) * 100
|
||||
if int64(availableDiskSpace) <= fs.minFreeDisk {
|
||||
return "", probe.NewError(RootPathFull{Path: fs.path})
|
||||
}
|
||||
|
||||
// Part id cannot be negative.
|
||||
if partID <= 0 {
|
||||
return "", probe.NewError(errors.New("invalid part id, cannot be zero or less than zero"))
|
||||
}
|
||||
|
||||
// Check bucket name valid.
|
||||
if !IsValidBucketName(bucket) {
|
||||
return "", probe.NewError(BucketNameInvalid{Bucket: bucket})
|
||||
}
|
||||
|
||||
// Verify object path legal.
|
||||
if !IsValidObjectName(object) {
|
||||
return "", probe.NewError(ObjectNameInvalid{Bucket: bucket, Object: object})
|
||||
}
|
||||
|
||||
// Verify upload is valid for the incoming object.
|
||||
if !fs.isValidUploadID(object, uploadID) {
|
||||
return "", probe.NewError(InvalidUploadID{UploadID: uploadID})
|
||||
}
|
||||
|
||||
bucket = fs.denormalizeBucket(bucket)
|
||||
bucketPath := filepath.Join(fs.path, bucket)
|
||||
if _, e := os.Stat(bucketPath); e != nil {
|
||||
// Check bucket exists.
|
||||
if os.IsNotExist(e) {
|
||||
return "", probe.NewError(BucketNotFound{Bucket: bucket})
|
||||
}
|
||||
return "", probe.NewError(e)
|
||||
}
|
||||
|
||||
// md5Hex representation.
|
||||
var md5Hex string
|
||||
if len(md5Bytes) != 0 {
|
||||
md5Hex = hex.EncodeToString(md5Bytes)
|
||||
}
|
||||
|
||||
objectPath := filepath.Join(bucketPath, object)
|
||||
partPathPrefix := objectPath + uploadID
|
||||
partPath := partPathPrefix + md5Hex + fmt.Sprintf("$%d-$multiparts", partID)
|
||||
partFile, e := atomic.FileCreateWithPrefix(partPath, "$multiparts")
|
||||
if e != nil {
|
||||
return "", probe.NewError(e)
|
||||
}
|
||||
defer partFile.Close()
|
||||
|
||||
// Initialize md5 writer.
|
||||
md5Writer := md5.New()
|
||||
|
||||
// Create a multiwriter.
|
||||
multiWriter := io.MultiWriter(md5Writer, partFile)
|
||||
|
||||
if _, e = io.CopyN(multiWriter, data, size); e != nil {
|
||||
partFile.CloseAndPurge()
|
||||
return "", probe.NewError(e)
|
||||
}
|
||||
|
||||
// Finalize new md5.
|
||||
newMD5Hex := hex.EncodeToString(md5Writer.Sum(nil))
|
||||
if len(md5Bytes) != 0 {
|
||||
if newMD5Hex != md5Hex {
|
||||
return "", probe.NewError(BadDigest{md5Hex, newMD5Hex})
|
||||
}
|
||||
}
|
||||
|
||||
// Stat the file to get the latest information.
|
||||
fi, e := os.Stat(partFile.Name())
|
||||
if e != nil {
|
||||
return "", probe.NewError(e)
|
||||
}
|
||||
partMetadata := PartMetadata{}
|
||||
partMetadata.PartNumber = partID
|
||||
partMetadata.ETag = newMD5Hex
|
||||
partMetadata.Size = fi.Size()
|
||||
partMetadata.LastModified = fi.ModTime()
|
||||
|
||||
// Critical region requiring read lock.
|
||||
fs.rwLock.RLock()
|
||||
deserializedMultipartSession, ok := fs.multiparts.ActiveSession[uploadID]
|
||||
fs.rwLock.RUnlock()
|
||||
if !ok {
|
||||
return "", probe.NewError(InvalidUploadID{UploadID: uploadID})
|
||||
}
|
||||
|
||||
// Add all incoming parts.
|
||||
deserializedMultipartSession.Parts = append(deserializedMultipartSession.Parts, partMetadata)
|
||||
|
||||
// Remove duplicate parts based on the most recent uploaded.
|
||||
deserializedMultipartSession.Parts = removeDuplicateParts(deserializedMultipartSession.Parts)
|
||||
// Save total parts uploaded.
|
||||
deserializedMultipartSession.TotalParts = len(deserializedMultipartSession.Parts)
|
||||
|
||||
// Sort by part number before saving.
|
||||
sort.Sort(partNumber(deserializedMultipartSession.Parts))
|
||||
|
||||
// Critical region requiring write lock.
|
||||
fs.rwLock.Lock()
|
||||
defer fs.rwLock.Unlock()
|
||||
|
||||
fs.multiparts.ActiveSession[uploadID] = deserializedMultipartSession
|
||||
if err := saveMultipartsSession(*fs.multiparts); err != nil {
|
||||
return "", err.Trace(partPathPrefix)
|
||||
}
|
||||
return newMD5Hex, nil
|
||||
}
|
||||
|
||||
// CompleteMultipartUpload - complete a multipart upload and persist the data
|
||||
func (fs Filesystem) CompleteMultipartUpload(bucket string, object string, uploadID string, completeMultipartBytes []byte) (ObjectInfo, *probe.Error) {
|
||||
// Check bucket name is valid.
|
||||
if !IsValidBucketName(bucket) {
|
||||
return ObjectInfo{}, probe.NewError(BucketNameInvalid{Bucket: bucket})
|
||||
}
|
||||
|
||||
// Verify object path is legal.
|
||||
if !IsValidObjectName(object) {
|
||||
return ObjectInfo{}, probe.NewError(ObjectNameInvalid{Bucket: bucket, Object: object})
|
||||
}
|
||||
|
||||
// Verify if valid upload for incoming object.
|
||||
if !fs.isValidUploadID(object, uploadID) {
|
||||
return ObjectInfo{}, probe.NewError(InvalidUploadID{UploadID: uploadID})
|
||||
}
|
||||
|
||||
bucket = fs.denormalizeBucket(bucket)
|
||||
bucketPath := filepath.Join(fs.path, bucket)
|
||||
if _, e := os.Stat(bucketPath); e != nil {
|
||||
// Check bucket exists.
|
||||
if os.IsNotExist(e) {
|
||||
return ObjectInfo{}, probe.NewError(BucketNotFound{Bucket: bucket})
|
||||
}
|
||||
return ObjectInfo{}, probe.NewError(InternalError{})
|
||||
}
|
||||
|
||||
objectPath := filepath.Join(bucketPath, object)
|
||||
objectWriter, e := atomic.FileCreateWithPrefix(objectPath, "$tmpobject")
|
||||
if e != nil {
|
||||
return ObjectInfo{}, probe.NewError(e)
|
||||
}
|
||||
|
||||
completeMultipartUpload := &CompleteMultipartUpload{}
|
||||
if e = xml.Unmarshal(completeMultipartBytes, completeMultipartUpload); e != nil {
|
||||
objectWriter.CloseAndPurge()
|
||||
return ObjectInfo{}, probe.NewError(MalformedXML{})
|
||||
}
|
||||
if !sort.IsSorted(completedParts(completeMultipartUpload.Part)) {
|
||||
objectWriter.CloseAndPurge()
|
||||
return ObjectInfo{}, probe.NewError(InvalidPartOrder{})
|
||||
}
|
||||
|
||||
// Save parts for verification.
|
||||
parts := completeMultipartUpload.Part
|
||||
|
||||
// Critical region requiring read lock.
|
||||
fs.rwLock.RLock()
|
||||
savedParts := fs.multiparts.ActiveSession[uploadID].Parts
|
||||
fs.rwLock.RUnlock()
|
||||
|
||||
if !doPartsMatch(parts, savedParts) {
|
||||
objectWriter.CloseAndPurge()
|
||||
return ObjectInfo{}, probe.NewError(InvalidPart{})
|
||||
}
|
||||
|
||||
// Parts successfully validated, save all the parts.
|
||||
partPathPrefix := objectPath + uploadID
|
||||
if err := saveParts(partPathPrefix, objectWriter, parts); err != nil {
|
||||
objectWriter.CloseAndPurge()
|
||||
return ObjectInfo{}, err.Trace(partPathPrefix)
|
||||
}
|
||||
var md5Strs []string
|
||||
for _, part := range savedParts {
|
||||
md5Strs = append(md5Strs, part.ETag)
|
||||
}
|
||||
// Save the s3 md5.
|
||||
s3MD5, err := makeS3MD5(md5Strs...)
|
||||
if err != nil {
|
||||
objectWriter.CloseAndPurge()
|
||||
return ObjectInfo{}, err.Trace(md5Strs...)
|
||||
}
|
||||
|
||||
// Successfully saved multipart, remove all parts in a routine.
|
||||
go removeParts(partPathPrefix, savedParts)
|
||||
|
||||
// Critical region requiring write lock.
|
||||
fs.rwLock.Lock()
|
||||
delete(fs.multiparts.ActiveSession, uploadID)
|
||||
if err := saveMultipartsSession(*fs.multiparts); err != nil {
|
||||
fs.rwLock.Unlock()
|
||||
objectWriter.CloseAndPurge()
|
||||
return ObjectInfo{}, err.Trace(partPathPrefix)
|
||||
}
|
||||
if e = objectWriter.Close(); e != nil {
|
||||
fs.rwLock.Unlock()
|
||||
return ObjectInfo{}, probe.NewError(e)
|
||||
}
|
||||
fs.rwLock.Unlock()
|
||||
|
||||
// Send stat again to get object metadata.
|
||||
st, e := os.Stat(objectPath)
|
||||
if e != nil {
|
||||
return ObjectInfo{}, probe.NewError(e)
|
||||
}
|
||||
|
||||
contentType := "application/octet-stream"
|
||||
if objectExt := filepath.Ext(objectPath); objectExt != "" {
|
||||
content, ok := mimedb.DB[strings.ToLower(strings.TrimPrefix(objectExt, "."))]
|
||||
if ok {
|
||||
contentType = content.ContentType
|
||||
}
|
||||
}
|
||||
newObject := ObjectInfo{
|
||||
Bucket: bucket,
|
||||
Name: object,
|
||||
ModifiedTime: st.ModTime(),
|
||||
Size: st.Size(),
|
||||
ContentType: contentType,
|
||||
MD5Sum: s3MD5,
|
||||
}
|
||||
return newObject, nil
|
||||
}
|
||||
|
||||
// ListObjectParts - list parts from incomplete multipart session for a given ObjectResourcesMetadata
|
||||
func (fs Filesystem) ListObjectParts(bucket, object string, resources ObjectResourcesMetadata) (ObjectResourcesMetadata, *probe.Error) {
|
||||
// Check bucket name is valid.
|
||||
if !IsValidBucketName(bucket) {
|
||||
return ObjectResourcesMetadata{}, probe.NewError(BucketNameInvalid{Bucket: bucket})
|
||||
}
|
||||
|
||||
// Verify object path legal.
|
||||
if !IsValidObjectName(object) {
|
||||
return ObjectResourcesMetadata{}, probe.NewError(ObjectNameInvalid{Bucket: bucket, Object: object})
|
||||
}
|
||||
|
||||
// Save upload id.
|
||||
uploadID := resources.UploadID
|
||||
|
||||
// Verify if upload id is valid for incoming object.
|
||||
if !fs.isValidUploadID(object, uploadID) {
|
||||
return ObjectResourcesMetadata{}, probe.NewError(InvalidUploadID{UploadID: uploadID})
|
||||
}
|
||||
|
||||
objectResourcesMetadata := resources
|
||||
objectResourcesMetadata.Bucket = bucket
|
||||
objectResourcesMetadata.Object = object
|
||||
var startPartNumber int
|
||||
switch {
|
||||
case objectResourcesMetadata.PartNumberMarker == 0:
|
||||
startPartNumber = 1
|
||||
default:
|
||||
startPartNumber = objectResourcesMetadata.PartNumberMarker
|
||||
}
|
||||
|
||||
bucket = fs.denormalizeBucket(bucket)
|
||||
bucketPath := filepath.Join(fs.path, bucket)
|
||||
if _, e := os.Stat(bucketPath); e != nil {
|
||||
// Check bucket exists.
|
||||
if os.IsNotExist(e) {
|
||||
return ObjectResourcesMetadata{}, probe.NewError(BucketNotFound{Bucket: bucket})
|
||||
}
|
||||
return ObjectResourcesMetadata{}, probe.NewError(e)
|
||||
}
|
||||
|
||||
// Critical region requiring read lock.
|
||||
fs.rwLock.RLock()
|
||||
deserializedMultipartSession, ok := fs.multiparts.ActiveSession[uploadID]
|
||||
fs.rwLock.RUnlock()
|
||||
if !ok {
|
||||
return ObjectResourcesMetadata{}, probe.NewError(InvalidUploadID{UploadID: resources.UploadID})
|
||||
}
|
||||
var parts []PartMetadata
|
||||
for i := startPartNumber; i <= deserializedMultipartSession.TotalParts; i++ {
|
||||
if len(parts) > objectResourcesMetadata.MaxParts {
|
||||
sort.Sort(partNumber(parts))
|
||||
objectResourcesMetadata.IsTruncated = true
|
||||
objectResourcesMetadata.Part = parts
|
||||
objectResourcesMetadata.NextPartNumberMarker = i
|
||||
return objectResourcesMetadata, nil
|
||||
}
|
||||
parts = append(parts, deserializedMultipartSession.Parts[i-1])
|
||||
}
|
||||
sort.Sort(partNumber(parts))
|
||||
objectResourcesMetadata.Part = parts
|
||||
return objectResourcesMetadata, nil
|
||||
}
|
||||
|
||||
// AbortMultipartUpload - abort an incomplete multipart session
|
||||
func (fs Filesystem) AbortMultipartUpload(bucket, object, uploadID string) *probe.Error {
|
||||
// Check bucket name valid.
|
||||
if !IsValidBucketName(bucket) {
|
||||
return probe.NewError(BucketNameInvalid{Bucket: bucket})
|
||||
}
|
||||
|
||||
// Verify object path legal.
|
||||
if !IsValidObjectName(object) {
|
||||
return probe.NewError(ObjectNameInvalid{Bucket: bucket, Object: object})
|
||||
}
|
||||
|
||||
if !fs.isValidUploadID(object, uploadID) {
|
||||
return probe.NewError(InvalidUploadID{UploadID: uploadID})
|
||||
}
|
||||
|
||||
bucket = fs.denormalizeBucket(bucket)
|
||||
bucketPath := filepath.Join(fs.path, bucket)
|
||||
if _, e := os.Stat(bucketPath); e != nil {
|
||||
// Check bucket exists.
|
||||
if os.IsNotExist(e) {
|
||||
return probe.NewError(BucketNotFound{Bucket: bucket})
|
||||
}
|
||||
return probe.NewError(e)
|
||||
}
|
||||
|
||||
objectPath := filepath.Join(bucketPath, object)
|
||||
partPathPrefix := objectPath + uploadID
|
||||
|
||||
// Critical region requiring read lock.
|
||||
fs.rwLock.RLock()
|
||||
savedParts := fs.multiparts.ActiveSession[uploadID].Parts
|
||||
fs.rwLock.RUnlock()
|
||||
|
||||
// Remove all parts.
|
||||
if err := removeParts(partPathPrefix, savedParts); err != nil {
|
||||
return err.Trace(partPathPrefix)
|
||||
}
|
||||
|
||||
// Critical region requiring write lock.
|
||||
fs.rwLock.Lock()
|
||||
defer fs.rwLock.Unlock()
|
||||
|
||||
delete(fs.multiparts.ActiveSession, uploadID)
|
||||
if err := saveMultipartsSession(*fs.multiparts); err != nil {
|
||||
return err.Trace(partPathPrefix)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -1,383 +0,0 @@
|
||||
/*
|
||||
* Minio Cloud Storage, (C) 2015 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package fs
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/md5"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"encoding/hex"
|
||||
"runtime"
|
||||
|
||||
"github.com/minio/minio/pkg/atomic"
|
||||
"github.com/minio/minio/pkg/disk"
|
||||
"github.com/minio/minio/pkg/mimedb"
|
||||
"github.com/minio/minio/pkg/probe"
|
||||
)
|
||||
|
||||
/// Object Operations
|
||||
|
||||
// GetObject - GET object
|
||||
func (fs Filesystem) GetObject(w io.Writer, bucket, object string, start, length int64) (int64, *probe.Error) {
|
||||
// Critical region requiring read lock.
|
||||
fs.rwLock.RLock()
|
||||
defer fs.rwLock.RUnlock()
|
||||
|
||||
// Input validation.
|
||||
if !IsValidBucketName(bucket) {
|
||||
return 0, probe.NewError(BucketNameInvalid{Bucket: bucket})
|
||||
}
|
||||
if !IsValidObjectName(object) {
|
||||
return 0, probe.NewError(ObjectNameInvalid{Bucket: bucket, Object: object})
|
||||
}
|
||||
|
||||
// normalize buckets.
|
||||
bucket = fs.denormalizeBucket(bucket)
|
||||
objectPath := filepath.Join(fs.path, bucket, object)
|
||||
|
||||
file, e := os.Open(objectPath)
|
||||
if e != nil {
|
||||
// If the object doesn't exist, the bucket might not exist either. Stat for
|
||||
// the bucket and give a better error message if that is true.
|
||||
if os.IsNotExist(e) {
|
||||
_, e = os.Stat(filepath.Join(fs.path, bucket))
|
||||
if os.IsNotExist(e) {
|
||||
return 0, probe.NewError(BucketNotFound{Bucket: bucket})
|
||||
}
|
||||
|
||||
return 0, probe.NewError(ObjectNotFound{Bucket: bucket, Object: object})
|
||||
}
|
||||
return 0, probe.NewError(e)
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
_, e = file.Seek(start, os.SEEK_SET)
|
||||
if e != nil {
|
||||
// When the "handle is invalid", the file might be a directory on Windows.
|
||||
if runtime.GOOS == "windows" && strings.Contains(e.Error(), "handle is invalid") {
|
||||
return 0, probe.NewError(ObjectNotFound{Bucket: bucket, Object: object})
|
||||
}
|
||||
|
||||
return 0, probe.NewError(e)
|
||||
}
|
||||
|
||||
var count int64
|
||||
// Copy over the whole file if the length is non-positive.
|
||||
if length > 0 {
|
||||
count, e = io.CopyN(w, file, length)
|
||||
} else {
|
||||
count, e = io.Copy(w, file)
|
||||
}
|
||||
|
||||
if e != nil {
|
||||
// This call will fail if the object is a directory. Stat the file to see if
|
||||
// this is true, if so, return an ObjectNotFound error.
|
||||
stat, e := os.Stat(objectPath)
|
||||
if e == nil && stat.IsDir() {
|
||||
return count, probe.NewError(ObjectNotFound{Bucket: bucket, Object: object})
|
||||
}
|
||||
|
||||
return count, probe.NewError(e)
|
||||
}
|
||||
|
||||
return count, nil
|
||||
}
|
||||
|
||||
// GetObjectInfo - get object info.
|
||||
func (fs Filesystem) GetObjectInfo(bucket, object string) (ObjectInfo, *probe.Error) {
|
||||
// Input validation.
|
||||
if !IsValidBucketName(bucket) {
|
||||
return ObjectInfo{}, probe.NewError(BucketNameInvalid{Bucket: bucket})
|
||||
}
|
||||
|
||||
if !IsValidObjectName(object) {
|
||||
return ObjectInfo{}, probe.NewError(ObjectNameInvalid{Bucket: bucket, Object: object})
|
||||
}
|
||||
|
||||
// Normalize buckets.
|
||||
bucket = fs.denormalizeBucket(bucket)
|
||||
bucketPath := filepath.Join(fs.path, bucket)
|
||||
if _, e := os.Stat(bucketPath); e != nil {
|
||||
if os.IsNotExist(e) {
|
||||
return ObjectInfo{}, probe.NewError(BucketNotFound{Bucket: bucket})
|
||||
}
|
||||
return ObjectInfo{}, probe.NewError(e)
|
||||
}
|
||||
|
||||
info, err := getObjectInfo(fs.path, bucket, object)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err.ToGoError()) {
|
||||
return ObjectInfo{}, probe.NewError(ObjectNotFound{Bucket: bucket, Object: object})
|
||||
}
|
||||
return ObjectInfo{}, err.Trace(bucket, object)
|
||||
}
|
||||
if info.IsDir {
|
||||
return ObjectInfo{}, probe.NewError(ObjectNotFound{Bucket: bucket, Object: object})
|
||||
}
|
||||
return info, nil
|
||||
}
|
||||
|
||||
// getObjectInfo - get object stat info.
|
||||
func getObjectInfo(rootPath, bucket, object string) (ObjectInfo, *probe.Error) {
|
||||
// Do not use filepath.Join() since filepath.Join strips off any
|
||||
// object names with '/', use them as is in a static manner so
|
||||
// that we can send a proper 'ObjectNotFound' reply back upon os.Stat().
|
||||
var objectPath string
|
||||
// For windows use its special os.PathSeparator == "\\"
|
||||
if runtime.GOOS == "windows" {
|
||||
objectPath = rootPath + string(os.PathSeparator) + bucket + string(os.PathSeparator) + object
|
||||
} else {
|
||||
objectPath = rootPath + string(os.PathSeparator) + bucket + string(os.PathSeparator) + object
|
||||
}
|
||||
stat, e := os.Stat(objectPath)
|
||||
if e != nil {
|
||||
return ObjectInfo{}, probe.NewError(e)
|
||||
}
|
||||
contentType := "application/octet-stream"
|
||||
if runtime.GOOS == "windows" {
|
||||
object = filepath.ToSlash(object)
|
||||
}
|
||||
|
||||
if objectExt := filepath.Ext(object); objectExt != "" {
|
||||
content, ok := mimedb.DB[strings.ToLower(strings.TrimPrefix(objectExt, "."))]
|
||||
if ok {
|
||||
contentType = content.ContentType
|
||||
}
|
||||
}
|
||||
metadata := ObjectInfo{
|
||||
Bucket: bucket,
|
||||
Name: object,
|
||||
ModifiedTime: stat.ModTime(),
|
||||
Size: stat.Size(),
|
||||
ContentType: contentType,
|
||||
IsDir: stat.Mode().IsDir(),
|
||||
}
|
||||
return metadata, nil
|
||||
}
|
||||
|
||||
// isMD5SumEqual - returns error if md5sum mismatches, success its `nil`
|
||||
func isMD5SumEqual(expectedMD5Sum, actualMD5Sum string) bool {
|
||||
// Verify the md5sum.
|
||||
if expectedMD5Sum != "" && actualMD5Sum != "" {
|
||||
// Decode md5sum to bytes from their hexadecimal
|
||||
// representations.
|
||||
expectedMD5SumBytes, err := hex.DecodeString(expectedMD5Sum)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
actualMD5SumBytes, err := hex.DecodeString(actualMD5Sum)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
// Verify md5sum bytes are equal after successful decoding.
|
||||
if !bytes.Equal(expectedMD5SumBytes, actualMD5SumBytes) {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// CreateObject - create an object.
|
||||
func (fs Filesystem) CreateObject(bucket string, object string, size int64, data io.Reader, md5Bytes []byte) (ObjectInfo, *probe.Error) {
|
||||
di, e := disk.GetInfo(fs.path)
|
||||
if e != nil {
|
||||
return ObjectInfo{}, probe.NewError(e)
|
||||
}
|
||||
|
||||
// Remove 5% from total space for cumulative disk space used for
|
||||
// journalling, inodes etc.
|
||||
availableDiskSpace := (float64(di.Free) / (float64(di.Total) - (0.05 * float64(di.Total)))) * 100
|
||||
if int64(availableDiskSpace) <= fs.minFreeDisk {
|
||||
return ObjectInfo{}, probe.NewError(RootPathFull{Path: fs.path})
|
||||
}
|
||||
|
||||
// Check bucket name valid.
|
||||
if !IsValidBucketName(bucket) {
|
||||
return ObjectInfo{}, probe.NewError(BucketNameInvalid{Bucket: bucket})
|
||||
}
|
||||
|
||||
bucket = fs.denormalizeBucket(bucket)
|
||||
bucketPath := filepath.Join(fs.path, bucket)
|
||||
if _, e = os.Stat(bucketPath); e != nil {
|
||||
if os.IsNotExist(e) {
|
||||
return ObjectInfo{}, probe.NewError(BucketNotFound{Bucket: bucket})
|
||||
}
|
||||
return ObjectInfo{}, probe.NewError(e)
|
||||
}
|
||||
|
||||
// Verify object path legal.
|
||||
if !IsValidObjectName(object) {
|
||||
return ObjectInfo{}, probe.NewError(ObjectNameInvalid{Bucket: bucket, Object: object})
|
||||
}
|
||||
|
||||
// Get object path.
|
||||
objectPath := filepath.Join(bucketPath, object)
|
||||
|
||||
// md5Hex representation.
|
||||
var md5Hex string
|
||||
if len(md5Bytes) != 0 {
|
||||
md5Hex = hex.EncodeToString(md5Bytes)
|
||||
}
|
||||
|
||||
// Write object.
|
||||
file, e := atomic.FileCreateWithPrefix(objectPath, md5Hex+"$tmpobject")
|
||||
if e != nil {
|
||||
switch e := e.(type) {
|
||||
case *os.PathError:
|
||||
if e.Op == "mkdir" {
|
||||
if strings.Contains(e.Error(), "not a directory") {
|
||||
return ObjectInfo{}, probe.NewError(ObjectExistsAsPrefix{Bucket: bucket, Prefix: object})
|
||||
}
|
||||
}
|
||||
return ObjectInfo{}, probe.NewError(e)
|
||||
default:
|
||||
return ObjectInfo{}, probe.NewError(e)
|
||||
}
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
// Initialize md5 writer.
|
||||
md5Writer := md5.New()
|
||||
|
||||
// Instantiate a new multi writer.
|
||||
multiWriter := io.MultiWriter(md5Writer, file)
|
||||
|
||||
// Instantiate checksum hashers and create a multiwriter.
|
||||
if size > 0 {
|
||||
if _, e = io.CopyN(multiWriter, data, size); e != nil {
|
||||
file.CloseAndPurge()
|
||||
return ObjectInfo{}, probe.NewError(e)
|
||||
}
|
||||
} else {
|
||||
if _, e = io.Copy(multiWriter, data); e != nil {
|
||||
file.CloseAndPurge()
|
||||
return ObjectInfo{}, probe.NewError(e)
|
||||
}
|
||||
}
|
||||
|
||||
newMD5Hex := hex.EncodeToString(md5Writer.Sum(nil))
|
||||
if len(md5Bytes) != 0 {
|
||||
if newMD5Hex != md5Hex {
|
||||
return ObjectInfo{}, probe.NewError(BadDigest{md5Hex, newMD5Hex})
|
||||
}
|
||||
}
|
||||
|
||||
// Set stat again to get the latest metadata.
|
||||
st, e := os.Stat(file.Name())
|
||||
if e != nil {
|
||||
return ObjectInfo{}, probe.NewError(e)
|
||||
}
|
||||
|
||||
contentType := "application/octet-stream"
|
||||
if objectExt := filepath.Ext(objectPath); objectExt != "" {
|
||||
content, ok := mimedb.DB[strings.ToLower(strings.TrimPrefix(objectExt, "."))]
|
||||
if ok {
|
||||
contentType = content.ContentType
|
||||
}
|
||||
}
|
||||
newObject := ObjectInfo{
|
||||
Bucket: bucket,
|
||||
Name: object,
|
||||
ModifiedTime: st.ModTime(),
|
||||
Size: st.Size(),
|
||||
MD5Sum: newMD5Hex,
|
||||
ContentType: contentType,
|
||||
}
|
||||
return newObject, nil
|
||||
}
|
||||
|
||||
// deleteObjectPath - delete object path if its empty.
|
||||
func deleteObjectPath(basePath, deletePath, bucket, object string) *probe.Error {
|
||||
if basePath == deletePath {
|
||||
return nil
|
||||
}
|
||||
// Verify if the path exists.
|
||||
pathSt, e := os.Stat(deletePath)
|
||||
if e != nil {
|
||||
if os.IsNotExist(e) {
|
||||
return probe.NewError(ObjectNotFound{Bucket: bucket, Object: object})
|
||||
}
|
||||
return probe.NewError(e)
|
||||
}
|
||||
if pathSt.IsDir() {
|
||||
// Verify if directory is empty.
|
||||
empty, e := isDirEmpty(deletePath)
|
||||
if e != nil {
|
||||
return probe.NewError(e)
|
||||
}
|
||||
if !empty {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
// Attempt to remove path.
|
||||
if e := os.Remove(deletePath); e != nil {
|
||||
return probe.NewError(e)
|
||||
}
|
||||
// Recursively go down the next path and delete again.
|
||||
if err := deleteObjectPath(basePath, filepath.Dir(deletePath), bucket, object); err != nil {
|
||||
return err.Trace(basePath, deletePath, bucket, object)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeleteObject - delete object.
|
||||
func (fs Filesystem) DeleteObject(bucket, object string) *probe.Error {
|
||||
// Check bucket name valid
|
||||
if !IsValidBucketName(bucket) {
|
||||
return probe.NewError(BucketNameInvalid{Bucket: bucket})
|
||||
}
|
||||
|
||||
bucket = fs.denormalizeBucket(bucket)
|
||||
bucketPath := filepath.Join(fs.path, bucket)
|
||||
// Check bucket exists
|
||||
if _, e := os.Stat(bucketPath); e != nil {
|
||||
if os.IsNotExist(e) {
|
||||
return probe.NewError(BucketNotFound{Bucket: bucket})
|
||||
}
|
||||
return probe.NewError(e)
|
||||
}
|
||||
|
||||
// Verify object path legal
|
||||
if !IsValidObjectName(object) {
|
||||
return probe.NewError(ObjectNameInvalid{Bucket: bucket, Object: object})
|
||||
}
|
||||
|
||||
// Do not use filepath.Join() since filepath.Join strips off any
|
||||
// object names with '/', use them as is in a static manner so
|
||||
// that we can send a proper 'ObjectNotFound' reply back upon
|
||||
// os.Stat().
|
||||
var objectPath string
|
||||
if runtime.GOOS == "windows" {
|
||||
objectPath = fs.path + string(os.PathSeparator) + bucket + string(os.PathSeparator) + object
|
||||
} else {
|
||||
objectPath = fs.path + string(os.PathSeparator) + bucket + string(os.PathSeparator) + object
|
||||
}
|
||||
// Delete object path if its empty.
|
||||
err := deleteObjectPath(bucketPath, objectPath, bucket, object)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err.ToGoError()) {
|
||||
return probe.NewError(ObjectNotFound{Bucket: bucket, Object: object})
|
||||
}
|
||||
return err.Trace(bucketPath, objectPath, bucket, object)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -1,249 +0,0 @@
|
||||
/*
|
||||
* Minio Cloud Storage, (C) 2016 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package fs
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/md5"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// Testing GetObjectInfo().
|
||||
func TestGetObjectInfo(t *testing.T) {
|
||||
directory, e := ioutil.TempDir("", "minio-get-objinfo-test")
|
||||
if e != nil {
|
||||
t.Fatal(e)
|
||||
}
|
||||
defer os.RemoveAll(directory)
|
||||
|
||||
// Create the filesystem.
|
||||
fs, err := New(directory)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// This bucket is used for testing getObjectInfo operations.
|
||||
err = fs.MakeBucket("test-getobjectinfo")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
_, err = fs.CreateObject("test-getobjectinfo", "Asia/asiapics.jpg", int64(len("asiapics")), bytes.NewBufferString("asiapics"), nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
resultCases := []ObjectInfo{
|
||||
// ObjectInfo -1.
|
||||
// ObjectName set to a existing object in the test case (Test case 14).
|
||||
{Bucket: "test-getobjectinfo", Name: "Asia/asiapics.jpg", ContentType: "image/jpeg", IsDir: false},
|
||||
}
|
||||
testCases := []struct {
|
||||
rootPath string
|
||||
bucketName string
|
||||
objectName string
|
||||
|
||||
// Expected output of GetObjectInfo.
|
||||
result ObjectInfo
|
||||
err error
|
||||
// Flag indicating whether the test is expected to pass or not.
|
||||
shouldPass bool
|
||||
}{
|
||||
// Test cases with invalid bucket names ( Test number 1-4 ).
|
||||
{fs.path, ".test", "", ObjectInfo{}, BucketNameInvalid{Bucket: ".test"}, false},
|
||||
{fs.path, "Test", "", ObjectInfo{}, BucketNameInvalid{Bucket: "Test"}, false},
|
||||
{fs.path, "---", "", ObjectInfo{}, BucketNameInvalid{Bucket: "---"}, false},
|
||||
{fs.path, "ad", "", ObjectInfo{}, BucketNameInvalid{Bucket: "ad"}, false},
|
||||
// Test cases with valid but non-existing bucket names (Test number 5-7).
|
||||
{fs.path, "abcdefgh", "abc", ObjectInfo{}, BucketNotFound{Bucket: "abcdefgh"}, false},
|
||||
{fs.path, "ijklmnop", "efg", ObjectInfo{}, BucketNotFound{Bucket: "ijklmnop"}, false},
|
||||
// Test cases with valid but non-existing bucket names and invalid object name (Test number 8-9).
|
||||
{fs.path, "abcdefgh", "", ObjectInfo{}, ObjectNameInvalid{Bucket: "abcdefgh", Object: ""}, false},
|
||||
{fs.path, "ijklmnop", "", ObjectInfo{}, ObjectNameInvalid{Bucket: "ijklmnop", Object: ""}, false},
|
||||
// Test cases with non-existing object name with existing bucket (Test number 10-12).
|
||||
{fs.path, "test-getobjectinfo", "Africa", ObjectInfo{}, ObjectNotFound{Bucket: "test-getobjectinfo", Object: "Africa"}, false},
|
||||
{fs.path, "test-getobjectinfo", "Antartica", ObjectInfo{}, ObjectNotFound{Bucket: "test-getobjectinfo", Object: "Antartica"}, false},
|
||||
{fs.path, "test-getobjectinfo", "Asia/myfile", ObjectInfo{}, ObjectNotFound{Bucket: "test-getobjectinfo", Object: "Asia/myfile"}, false},
|
||||
// Test case with existing bucket but object name set to a directory (Test number 13).
|
||||
{fs.path, "test-getobjectinfo", "Asia", ObjectInfo{}, ObjectNotFound{Bucket: "test-getobjectinfo", Object: "Asia"}, false},
|
||||
// Valid case with existing object (Test number 14).
|
||||
{fs.path, "test-getobjectinfo", "Asia/asiapics.jpg", resultCases[0], nil, true},
|
||||
}
|
||||
for i, testCase := range testCases {
|
||||
result, err := fs.GetObjectInfo(testCase.bucketName, testCase.objectName)
|
||||
if err != nil && testCase.shouldPass {
|
||||
t.Errorf("Test %d: Expected to pass, but failed with: <ERROR> %s", i+1, err.Cause.Error())
|
||||
}
|
||||
if err == nil && !testCase.shouldPass {
|
||||
t.Errorf("Test %d: Expected to fail with <ERROR> \"%s\", but passed instead", i+1, testCase.err.Error())
|
||||
}
|
||||
// Failed as expected, but does it fail for the expected reason.
|
||||
if err != nil && !testCase.shouldPass {
|
||||
if testCase.err.Error() != err.Cause.Error() {
|
||||
t.Errorf("Test %d: Expected to fail with error \"%s\", but instead failed with error \"%s\" instead", i+1, testCase.err.Error(), err.Cause.Error())
|
||||
}
|
||||
}
|
||||
|
||||
// Test passes as expected, but the output values are verified for correctness here.
|
||||
if err == nil && testCase.shouldPass {
|
||||
if testCase.result.Bucket != result.Bucket {
|
||||
t.Fatalf("Test %d: Expected Bucket name to be '%s', but found '%s' instead", i+1, testCase.result.Bucket, result.Bucket)
|
||||
}
|
||||
if testCase.result.Name != result.Name {
|
||||
t.Errorf("Test %d: Expected Object name to be %s, but instead found it to be %s", i+1, testCase.result.Name, result.Name)
|
||||
}
|
||||
if testCase.result.ContentType != result.ContentType {
|
||||
t.Errorf("Test %d: Expected Content Type of the object to be %v, but instead found it to be %v", i+1, testCase.result.ContentType, result.ContentType)
|
||||
}
|
||||
if testCase.result.IsDir != result.IsDir {
|
||||
t.Errorf("Test %d: Expected IsDir flag of the object to be %v, but instead found it to be %v", i+1, testCase.result.IsDir, result.IsDir)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Testing getObjectInfo().
|
||||
func TestGetObjectInfoCore(t *testing.T) {
|
||||
directory, e := ioutil.TempDir("", "minio-get-objinfo-test")
|
||||
if e != nil {
|
||||
t.Fatal(e)
|
||||
}
|
||||
defer os.RemoveAll(directory)
|
||||
|
||||
// Create the filesystem.
|
||||
fs, err := New(directory)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// This bucket is used for testing getObjectInfo operations.
|
||||
err = fs.MakeBucket("test-getobjinfo")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
_, err = fs.CreateObject("test-getobjinfo", "Asia/asiapics.jpg", int64(len("asiapics")), bytes.NewBufferString("asiapics"), nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
resultCases := []ObjectInfo{
|
||||
// ObjectInfo - 1.
|
||||
// ObjectName object name set to a existing directory in the test case.
|
||||
{Bucket: "test-getobjinfo", Name: "Asia", Size: 0, ContentType: "application/octet-stream", IsDir: true},
|
||||
// ObjectInfo -2.
|
||||
// ObjectName set to a existing object in the test case.
|
||||
{Bucket: "test-getobjinfo", Name: "Asia/asiapics.jpg", Size: int64(len("asiapics")), ContentType: "image/jpeg", IsDir: false},
|
||||
// ObjectInfo-3.
|
||||
// Object name set to a non-existing object in the test case.
|
||||
{Bucket: "test-getobjinfo", Name: "Africa", Size: 0, ContentType: "image/jpeg", IsDir: false},
|
||||
}
|
||||
testCases := []struct {
|
||||
rootPath string
|
||||
bucketName string
|
||||
objectName string
|
||||
|
||||
// Expected output of getObjectInfo.
|
||||
result ObjectInfo
|
||||
err error
|
||||
|
||||
// Flag indicating whether the test is expected to pass or not.
|
||||
shouldPass bool
|
||||
}{
|
||||
// Testcase with object name set to a existing directory ( Test number 1).
|
||||
{fs.path, "test-getobjinfo", "Asia", resultCases[0], nil, true},
|
||||
// ObjectName set to a existing object ( Test number 2).
|
||||
{fs.path, "test-getobjinfo", "Asia/asiapics.jpg", resultCases[1], nil, true},
|
||||
// Object name set to a non-existing object. (Test number 3).
|
||||
{fs.path, "test-getobjinfo", "Africa", resultCases[2], fmt.Errorf("%s", filepath.FromSlash("test-getobjinfo/Africa")), false},
|
||||
}
|
||||
for i, testCase := range testCases {
|
||||
result, err := getObjectInfo(testCase.rootPath, testCase.bucketName, testCase.objectName)
|
||||
if err != nil && testCase.shouldPass {
|
||||
t.Errorf("Test %d: Expected to pass, but failed with: <ERROR> %s", i+1, err.Cause.Error())
|
||||
}
|
||||
if err == nil && !testCase.shouldPass {
|
||||
t.Errorf("Test %d: Expected to fail with <ERROR> \"%s\", but passed instead", i+1, testCase.err.Error())
|
||||
}
|
||||
// Failed as expected, but does it fail for the expected reason.
|
||||
if err != nil && !testCase.shouldPass {
|
||||
if !strings.Contains(err.Cause.Error(), testCase.err.Error()) {
|
||||
t.Errorf("Test %d: Expected to fail with error \"%s\", but instead failed with error \"%s\" instead", i+1, testCase.err.Error(), err.Cause.Error())
|
||||
}
|
||||
}
|
||||
|
||||
// Test passes as expected, but the output values are verified for correctness here.
|
||||
if err == nil && testCase.shouldPass {
|
||||
if testCase.result.Bucket != result.Bucket {
|
||||
t.Fatalf("Test %d: Expected Bucket name to be '%s', but found '%s' instead", i+1, testCase.result.Bucket, result.Bucket)
|
||||
}
|
||||
if testCase.result.Name != result.Name {
|
||||
t.Errorf("Test %d: Expected Object name to be %s, but instead found it to be %s", i+1, testCase.result.Name, result.Name)
|
||||
}
|
||||
if testCase.result.ContentType != result.ContentType {
|
||||
t.Errorf("Test %d: Expected Content Type of the object to be %v, but instead found it to be %v", i+1, testCase.result.ContentType, result.ContentType)
|
||||
}
|
||||
if testCase.result.IsDir != result.IsDir {
|
||||
t.Errorf("Test %d: Expected IsDir flag of the object to be %v, but instead found it to be %v", i+1, testCase.result.IsDir, result.IsDir)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkGetObject(b *testing.B) {
|
||||
// Make a temporary directory to use as the filesystem.
|
||||
directory, e := ioutil.TempDir("", "minio-benchmark-getobject")
|
||||
if e != nil {
|
||||
b.Fatal(e)
|
||||
}
|
||||
defer os.RemoveAll(directory)
|
||||
|
||||
// Create the filesystem.
|
||||
filesystem, err := New(directory)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
// Make a bucket and put in a few objects.
|
||||
err = filesystem.MakeBucket("bucket")
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
text := "Jack and Jill went up the hill / To fetch a pail of water."
|
||||
hasher := md5.New()
|
||||
hasher.Write([]byte(text))
|
||||
for i := 0; i < 10; i++ {
|
||||
_, err = filesystem.CreateObject("bucket", "object"+strconv.Itoa(i), int64(len(text)), bytes.NewBufferString(text), hasher.Sum(nil))
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
var w bytes.Buffer
|
||||
b.ResetTimer()
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
n, err := filesystem.GetObject(&w, "bucket", "object"+strconv.Itoa(i%10), 0, 0)
|
||||
if err != nil {
|
||||
b.Error(err)
|
||||
}
|
||||
if n != int64(len(text)) {
|
||||
b.Errorf("GetObject returned incorrect length %d (should be %d)\n", n, int64(len(text)))
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,65 +0,0 @@
|
||||
/*
|
||||
* Minio Cloud Storage, (C) 2015, 2016 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package fs
|
||||
|
||||
import (
|
||||
"regexp"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
// validBucket regexp.
|
||||
var validBucket = regexp.MustCompile(`^[a-z0-9][a-z0-9\.\-]{1,61}[a-z0-9]$`)
|
||||
|
||||
// IsValidBucketName verifies a bucket name in accordance with Amazon's
|
||||
// requirements. It must be 3-63 characters long, can contain dashes
|
||||
// and periods, but must begin and end with a lowercase letter or a number.
|
||||
// See: http://docs.aws.amazon.com/AmazonS3/latest/dev/BucketRestrictions.html
|
||||
func IsValidBucketName(bucket string) bool {
|
||||
if len(bucket) < 3 || len(bucket) > 63 {
|
||||
return false
|
||||
}
|
||||
if bucket[0] == '.' || bucket[len(bucket)-1] == '.' {
|
||||
return false
|
||||
}
|
||||
return validBucket.MatchString(bucket)
|
||||
}
|
||||
|
||||
// IsValidObjectName verifies an object name in accordance with Amazon's
|
||||
// requirements. It cannot exceed 1024 characters and must be a valid UTF8
|
||||
// string.
|
||||
// See: http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html
|
||||
func IsValidObjectName(object string) bool {
|
||||
if len(object) > 1024 || len(object) == 0 {
|
||||
return false
|
||||
}
|
||||
if !utf8.ValidString(object) {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// IsValidObjectPrefix verifies whether the prefix is a valid object name.
|
||||
// Its valid to have a empty prefix.
|
||||
func IsValidObjectPrefix(object string) bool {
|
||||
// Prefix can be empty.
|
||||
if object == "" {
|
||||
return true
|
||||
}
|
||||
// Verify if prefix is a valid object name.
|
||||
return IsValidObjectName(object)
|
||||
|
||||
}
|
||||
@@ -1,114 +0,0 @@
|
||||
/*
|
||||
* Minio Cloud Storage, (C) 2016 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package fs
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
//Validating bucket name.
|
||||
func ensureBucketName(t *testing.T, name string, testNum int, pass bool) {
|
||||
isValidBucketName := IsValidBucketName(name)
|
||||
if pass && !isValidBucketName {
|
||||
t.Errorf("Test case %d: Expected \"%s\" to be a valid bucket name", testNum, name)
|
||||
}
|
||||
if !pass && isValidBucketName {
|
||||
t.Errorf("Test case %d: Expected bucket name \"%s\" to be invalid", testNum, name)
|
||||
}
|
||||
}
|
||||
|
||||
func TestIsValidBucketName(t *testing.T) {
|
||||
testCases := []struct {
|
||||
bucketName string
|
||||
shouldPass bool
|
||||
}{
|
||||
//cases which should pass the test
|
||||
//passing in valid bucket names
|
||||
{"lol", true},
|
||||
{"1-this-is-valid", true},
|
||||
{"1-this-too-is-valid-1", true},
|
||||
{"this.works.too.1", true},
|
||||
{"1234567", true},
|
||||
{"123", true},
|
||||
{"s3-eu-west-1.amazonaws.com", true},
|
||||
{"ideas-are-more-powerful-than-guns", true},
|
||||
{"testbucket", true},
|
||||
{"1bucket", true},
|
||||
{"bucket1", true},
|
||||
//cases for which test should fail
|
||||
//passing invalid bucket names
|
||||
{"------", false},
|
||||
{"$this-is-not-valid-too", false},
|
||||
{"contains-$-dollar", false},
|
||||
{"contains-^-carrot", false},
|
||||
{"contains-$-dollar", false},
|
||||
{"contains-$-dollar", false},
|
||||
{"......", false},
|
||||
{"", false},
|
||||
{"a", false},
|
||||
{"ab", false},
|
||||
{".starts-with-a-dot", false},
|
||||
{"ends-with-a-dot.", false},
|
||||
{"ends-with-a-dash-", false},
|
||||
{"-starts-with-a-dash", false},
|
||||
{"THIS-BEINGS-WITH-UPPERCASe", false},
|
||||
{"tHIS-ENDS-WITH-UPPERCASE", false},
|
||||
{"ThisBeginsAndEndsWithUpperCase", false},
|
||||
{"una ñina", false},
|
||||
{"lalalallalallalalalallalallalala-theString-size-is-greater-than-64", false},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
ensureBucketName(t, testCase.bucketName, i+1, testCase.shouldPass)
|
||||
}
|
||||
}
|
||||
|
||||
//Test for validating object name.
|
||||
func ensureObjectName(t *testing.T, name string, testNum int, pass bool) {
|
||||
isValidObjectName := IsValidObjectName(name)
|
||||
if pass && !isValidObjectName {
|
||||
t.Errorf("Test case %d: Expected \"%s\" to be a valid object name", testNum, name)
|
||||
}
|
||||
if !pass && isValidObjectName {
|
||||
t.Errorf("Test case %d: Expected object name \"%s\" to be invalid", testNum, name)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestIsValidObjectName(t *testing.T) {
|
||||
testCases := []struct {
|
||||
objectName string
|
||||
shouldPass bool
|
||||
}{
|
||||
//cases which should pass the test
|
||||
//passing in valid object name
|
||||
{"object", true},
|
||||
{"The Shining Script <v1>.pdf", true},
|
||||
{"Cost Benefit Analysis (2009-2010).pptx", true},
|
||||
{"117Gn8rfHL2ACARPAhaFd0AGzic9pUbIA/5OCn5A", true},
|
||||
{"SHØRT", true},
|
||||
{"There are far too many object names, and far too few bucket names!", true},
|
||||
//cases for which test should fail
|
||||
//passing invalid object names
|
||||
{"", false},
|
||||
{string([]byte{0xff, 0xfe, 0xfd}), false},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
ensureObjectName(t, testCase.objectName, i+1, testCase.shouldPass)
|
||||
}
|
||||
}
|
||||
141
pkg/fs/fs.go
141
pkg/fs/fs.go
@@ -1,141 +0,0 @@
|
||||
/*
|
||||
* Minio Cloud Storage, (C) 2015, 2016 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package fs
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/minio/minio/pkg/probe"
|
||||
)
|
||||
|
||||
// ListObjectParams - list object params used for list object map
|
||||
type ListObjectParams struct {
|
||||
bucket string
|
||||
delimiter string
|
||||
marker string
|
||||
prefix string
|
||||
}
|
||||
|
||||
// Filesystem - local variables
|
||||
type Filesystem struct {
|
||||
path string
|
||||
minFreeDisk int64
|
||||
rwLock *sync.RWMutex
|
||||
multiparts *Multiparts
|
||||
listObjectMap map[ListObjectParams][]ObjectInfoChannel
|
||||
listObjectMapMutex *sync.Mutex
|
||||
}
|
||||
|
||||
func (fs *Filesystem) pushListObjectCh(params ListObjectParams, ch ObjectInfoChannel) {
|
||||
fs.listObjectMapMutex.Lock()
|
||||
defer fs.listObjectMapMutex.Unlock()
|
||||
|
||||
channels := []ObjectInfoChannel{ch}
|
||||
if _, ok := fs.listObjectMap[params]; ok {
|
||||
channels = append(fs.listObjectMap[params], ch)
|
||||
}
|
||||
|
||||
fs.listObjectMap[params] = channels
|
||||
}
|
||||
|
||||
func (fs *Filesystem) popListObjectCh(params ListObjectParams) *ObjectInfoChannel {
|
||||
fs.listObjectMapMutex.Lock()
|
||||
defer fs.listObjectMapMutex.Unlock()
|
||||
|
||||
if channels, ok := fs.listObjectMap[params]; ok {
|
||||
for i, channel := range channels {
|
||||
if !channel.IsTimedOut() {
|
||||
chs := channels[i+1:]
|
||||
if len(chs) > 0 {
|
||||
fs.listObjectMap[params] = chs
|
||||
} else {
|
||||
delete(fs.listObjectMap, params)
|
||||
}
|
||||
|
||||
return &channel
|
||||
}
|
||||
}
|
||||
|
||||
// As all channels are timed out, delete the map entry
|
||||
delete(fs.listObjectMap, params)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// MultipartSession holds active session information
|
||||
type MultipartSession struct {
|
||||
TotalParts int
|
||||
ObjectName string
|
||||
UploadID string
|
||||
Initiated time.Time
|
||||
Parts []PartMetadata
|
||||
}
|
||||
|
||||
// Multiparts collection of many parts
|
||||
type Multiparts struct {
|
||||
Version string `json:"version"`
|
||||
ActiveSession map[string]*MultipartSession `json:"activeSessions"`
|
||||
}
|
||||
|
||||
// New instantiate a new donut
|
||||
func New(rootPath string) (Filesystem, *probe.Error) {
|
||||
setFSMultipartsMetadataPath(filepath.Join(rootPath, "$multiparts-session.json"))
|
||||
|
||||
var err *probe.Error
|
||||
// load multiparts session from disk
|
||||
var multiparts *Multiparts
|
||||
multiparts, err = loadMultipartsSession()
|
||||
if err != nil {
|
||||
if os.IsNotExist(err.ToGoError()) {
|
||||
multiparts = &Multiparts{
|
||||
Version: "1",
|
||||
ActiveSession: make(map[string]*MultipartSession),
|
||||
}
|
||||
if err = saveMultipartsSession(*multiparts); err != nil {
|
||||
return Filesystem{}, err.Trace()
|
||||
}
|
||||
} else {
|
||||
return Filesystem{}, err.Trace()
|
||||
}
|
||||
}
|
||||
|
||||
fs := Filesystem{
|
||||
rwLock: &sync.RWMutex{},
|
||||
}
|
||||
fs.path = rootPath
|
||||
fs.multiparts = multiparts
|
||||
|
||||
/// Defaults
|
||||
|
||||
// minium free disk required for i/o operations to succeed.
|
||||
fs.minFreeDisk = 5
|
||||
|
||||
fs.listObjectMap = make(map[ListObjectParams][]ObjectInfoChannel)
|
||||
fs.listObjectMapMutex = &sync.Mutex{}
|
||||
|
||||
// Return here.
|
||||
return fs, nil
|
||||
}
|
||||
|
||||
// GetRootPath - get root path.
|
||||
func (fs Filesystem) GetRootPath() string {
|
||||
return fs.path
|
||||
}
|
||||
@@ -1,52 +0,0 @@
|
||||
/*
|
||||
* Minio Cloud Storage, (C) 2015 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package fs
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
. "gopkg.in/check.v1"
|
||||
)
|
||||
|
||||
func Test(t *testing.T) { TestingT(t) }
|
||||
|
||||
type MySuite struct{}
|
||||
|
||||
var _ = Suite(&MySuite{})
|
||||
|
||||
func (s *MySuite) TestAPISuite(c *C) {
|
||||
var storageList []string
|
||||
create := func() Filesystem {
|
||||
path, e := ioutil.TempDir(os.TempDir(), "minio-")
|
||||
c.Check(e, IsNil)
|
||||
storageList = append(storageList, path)
|
||||
store, err := New(path)
|
||||
c.Check(err, IsNil)
|
||||
return store
|
||||
}
|
||||
APITestSuite(c, create)
|
||||
defer removeRoots(c, storageList)
|
||||
}
|
||||
|
||||
func removeRoots(c *C, roots []string) {
|
||||
for _, root := range roots {
|
||||
err := os.RemoveAll(root)
|
||||
c.Check(err, IsNil)
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user