mirror of
https://github.com/minio/minio.git
synced 2025-11-09 21:49:46 -05:00
Implementing min-free-disk
This commit is contained in:
@@ -1,3 +1,19 @@
|
||||
/*
|
||||
* Minio Cloud Storage, (C) 2015 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package fs
|
||||
|
||||
import (
|
||||
@@ -6,7 +22,7 @@ import (
|
||||
)
|
||||
|
||||
// IsPrivateBucket - is private bucket
|
||||
func (fs API) IsPrivateBucket(bucket string) bool {
|
||||
func (fs Filesystem) IsPrivateBucket(bucket string) bool {
|
||||
fs.lock.Lock()
|
||||
defer fs.lock.Unlock()
|
||||
// get bucket path
|
||||
@@ -19,7 +35,7 @@ func (fs API) IsPrivateBucket(bucket string) bool {
|
||||
}
|
||||
|
||||
// IsPublicBucket - is public bucket
|
||||
func (fs API) IsPublicBucket(bucket string) bool {
|
||||
func (fs Filesystem) IsPublicBucket(bucket string) bool {
|
||||
fs.lock.Lock()
|
||||
defer fs.lock.Unlock()
|
||||
// get bucket path
|
||||
@@ -32,7 +48,7 @@ func (fs API) IsPublicBucket(bucket string) bool {
|
||||
}
|
||||
|
||||
// IsReadOnlyBucket - is read only bucket
|
||||
func (fs API) IsReadOnlyBucket(bucket string) bool {
|
||||
func (fs Filesystem) IsReadOnlyBucket(bucket string) bool {
|
||||
fs.lock.Lock()
|
||||
defer fs.lock.Unlock()
|
||||
// get bucket path
|
||||
|
||||
@@ -32,7 +32,7 @@ import (
|
||||
)
|
||||
|
||||
// APITestSuite - collection of API tests
|
||||
func APITestSuite(c *check.C, create func() CloudStorage) {
|
||||
func APITestSuite(c *check.C, create func() Filesystem) {
|
||||
testMakeBucket(c, create)
|
||||
testMultipleObjectCreation(c, create)
|
||||
testPaging(c, create)
|
||||
@@ -51,13 +51,13 @@ func APITestSuite(c *check.C, create func() CloudStorage) {
|
||||
testMultipartObjectAbort(c, create)
|
||||
}
|
||||
|
||||
func testMakeBucket(c *check.C, create func() CloudStorage) {
|
||||
func testMakeBucket(c *check.C, create func() Filesystem) {
|
||||
fs := create()
|
||||
err := fs.MakeBucket("bucket", "")
|
||||
c.Assert(err, check.IsNil)
|
||||
}
|
||||
|
||||
func testMultipartObjectCreation(c *check.C, create func() CloudStorage) {
|
||||
func testMultipartObjectCreation(c *check.C, create func() Filesystem) {
|
||||
fs := create()
|
||||
err := fs.MakeBucket("bucket", "")
|
||||
c.Assert(err, check.IsNil)
|
||||
@@ -95,7 +95,7 @@ func testMultipartObjectCreation(c *check.C, create func() CloudStorage) {
|
||||
c.Assert(objectMetadata.Md5, check.Equals, finalExpectedmd5SumHex)
|
||||
}
|
||||
|
||||
func testMultipartObjectAbort(c *check.C, create func() CloudStorage) {
|
||||
func testMultipartObjectAbort(c *check.C, create func() Filesystem) {
|
||||
fs := create()
|
||||
err := fs.MakeBucket("bucket", "")
|
||||
c.Assert(err, check.IsNil)
|
||||
@@ -126,7 +126,7 @@ func testMultipartObjectAbort(c *check.C, create func() CloudStorage) {
|
||||
c.Assert(err, check.IsNil)
|
||||
}
|
||||
|
||||
func testMultipleObjectCreation(c *check.C, create func() CloudStorage) {
|
||||
func testMultipleObjectCreation(c *check.C, create func() Filesystem) {
|
||||
objects := make(map[string][]byte)
|
||||
fs := create()
|
||||
err := fs.MakeBucket("bucket", "")
|
||||
@@ -162,7 +162,7 @@ func testMultipleObjectCreation(c *check.C, create func() CloudStorage) {
|
||||
}
|
||||
}
|
||||
|
||||
func testPaging(c *check.C, create func() CloudStorage) {
|
||||
func testPaging(c *check.C, create func() Filesystem) {
|
||||
fs := create()
|
||||
fs.MakeBucket("bucket", "")
|
||||
resources := BucketResourcesMetadata{}
|
||||
@@ -295,7 +295,7 @@ func testPaging(c *check.C, create func() CloudStorage) {
|
||||
}
|
||||
}
|
||||
|
||||
func testObjectOverwriteWorks(c *check.C, create func() CloudStorage) {
|
||||
func testObjectOverwriteWorks(c *check.C, create func() Filesystem) {
|
||||
fs := create()
|
||||
err := fs.MakeBucket("bucket", "")
|
||||
c.Assert(err, check.IsNil)
|
||||
@@ -321,13 +321,13 @@ func testObjectOverwriteWorks(c *check.C, create func() CloudStorage) {
|
||||
c.Assert(string(bytesBuffer.Bytes()), check.Equals, "three")
|
||||
}
|
||||
|
||||
func testNonExistantBucketOperations(c *check.C, create func() CloudStorage) {
|
||||
func testNonExistantBucketOperations(c *check.C, create func() Filesystem) {
|
||||
fs := create()
|
||||
_, err := fs.CreateObject("bucket", "object", "", int64(len("one")), bytes.NewBufferString("one"), nil)
|
||||
c.Assert(err, check.Not(check.IsNil))
|
||||
}
|
||||
|
||||
func testBucketMetadata(c *check.C, create func() CloudStorage) {
|
||||
func testBucketMetadata(c *check.C, create func() Filesystem) {
|
||||
fs := create()
|
||||
err := fs.MakeBucket("string", "")
|
||||
c.Assert(err, check.IsNil)
|
||||
@@ -337,7 +337,7 @@ func testBucketMetadata(c *check.C, create func() CloudStorage) {
|
||||
c.Assert(metadata.ACL, check.Equals, BucketACL("private"))
|
||||
}
|
||||
|
||||
func testBucketRecreateFails(c *check.C, create func() CloudStorage) {
|
||||
func testBucketRecreateFails(c *check.C, create func() Filesystem) {
|
||||
fs := create()
|
||||
err := fs.MakeBucket("string", "")
|
||||
c.Assert(err, check.IsNil)
|
||||
@@ -345,7 +345,7 @@ func testBucketRecreateFails(c *check.C, create func() CloudStorage) {
|
||||
c.Assert(err, check.Not(check.IsNil))
|
||||
}
|
||||
|
||||
func testPutObjectInSubdir(c *check.C, create func() CloudStorage) {
|
||||
func testPutObjectInSubdir(c *check.C, create func() Filesystem) {
|
||||
fs := create()
|
||||
err := fs.MakeBucket("bucket", "")
|
||||
c.Assert(err, check.IsNil)
|
||||
@@ -365,7 +365,7 @@ func testPutObjectInSubdir(c *check.C, create func() CloudStorage) {
|
||||
c.Assert(int64(len(bytesBuffer.Bytes())), check.Equals, length)
|
||||
}
|
||||
|
||||
func testListBuckets(c *check.C, create func() CloudStorage) {
|
||||
func testListBuckets(c *check.C, create func() Filesystem) {
|
||||
fs := create()
|
||||
|
||||
// test empty list
|
||||
@@ -397,7 +397,7 @@ func testListBuckets(c *check.C, create func() CloudStorage) {
|
||||
c.Assert(err, check.IsNil)
|
||||
}
|
||||
|
||||
func testListBucketsOrder(c *check.C, create func() CloudStorage) {
|
||||
func testListBucketsOrder(c *check.C, create func() Filesystem) {
|
||||
// if implementation contains a map, order of map keys will vary.
|
||||
// this ensures they return in the same order each time
|
||||
for i := 0; i < 10; i++ {
|
||||
@@ -415,7 +415,7 @@ func testListBucketsOrder(c *check.C, create func() CloudStorage) {
|
||||
}
|
||||
}
|
||||
|
||||
func testListObjectsTestsForNonExistantBucket(c *check.C, create func() CloudStorage) {
|
||||
func testListObjectsTestsForNonExistantBucket(c *check.C, create func() Filesystem) {
|
||||
fs := create()
|
||||
resources := BucketResourcesMetadata{Prefix: "", Maxkeys: 1000}
|
||||
objects, resources, err := fs.ListObjects("bucket", resources)
|
||||
@@ -424,7 +424,7 @@ func testListObjectsTestsForNonExistantBucket(c *check.C, create func() CloudSto
|
||||
c.Assert(len(objects), check.Equals, 0)
|
||||
}
|
||||
|
||||
func testNonExistantObjectInBucket(c *check.C, create func() CloudStorage) {
|
||||
func testNonExistantObjectInBucket(c *check.C, create func() Filesystem) {
|
||||
fs := create()
|
||||
err := fs.MakeBucket("bucket", "")
|
||||
c.Assert(err, check.IsNil)
|
||||
@@ -442,7 +442,7 @@ func testNonExistantObjectInBucket(c *check.C, create func() CloudStorage) {
|
||||
}
|
||||
}
|
||||
|
||||
func testGetDirectoryReturnsObjectNotFound(c *check.C, create func() CloudStorage) {
|
||||
func testGetDirectoryReturnsObjectNotFound(c *check.C, create func() Filesystem) {
|
||||
fs := create()
|
||||
err := fs.MakeBucket("bucket", "")
|
||||
c.Assert(err, check.IsNil)
|
||||
@@ -477,7 +477,7 @@ func testGetDirectoryReturnsObjectNotFound(c *check.C, create func() CloudStorag
|
||||
c.Assert(len(byteBuffer2.Bytes()), check.Equals, 0)
|
||||
}
|
||||
|
||||
func testDefaultContentType(c *check.C, create func() CloudStorage) {
|
||||
func testDefaultContentType(c *check.C, create func() Filesystem) {
|
||||
fs := create()
|
||||
err := fs.MakeBucket("bucket", "")
|
||||
c.Assert(err, check.IsNil)
|
||||
@@ -489,7 +489,7 @@ func testDefaultContentType(c *check.C, create func() CloudStorage) {
|
||||
c.Assert(metadata.ContentType, check.Equals, "application/octet-stream")
|
||||
}
|
||||
|
||||
func testContentMd5Set(c *check.C, create func() CloudStorage) {
|
||||
func testContentMd5Set(c *check.C, create func() Filesystem) {
|
||||
fs := create()
|
||||
err := fs.MakeBucket("bucket", "")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
@@ -32,7 +32,7 @@ import (
|
||||
)
|
||||
|
||||
// APITestSuite - collection of API tests
|
||||
func APITestSuite(c *check.C, create func() CloudStorage) {
|
||||
func APITestSuite(c *check.C, create func() Filesystem) {
|
||||
testMakeBucket(c, create)
|
||||
testMultipleObjectCreation(c, create)
|
||||
testPaging(c, create)
|
||||
@@ -51,13 +51,13 @@ func APITestSuite(c *check.C, create func() CloudStorage) {
|
||||
testMultipartObjectAbort(c, create)
|
||||
}
|
||||
|
||||
func testMakeBucket(c *check.C, create func() CloudStorage) {
|
||||
func testMakeBucket(c *check.C, create func() Filesystem) {
|
||||
fs := create()
|
||||
err := fs.MakeBucket("bucket", "")
|
||||
c.Assert(err, check.IsNil)
|
||||
}
|
||||
|
||||
func testMultipartObjectCreation(c *check.C, create func() CloudStorage) {
|
||||
func testMultipartObjectCreation(c *check.C, create func() Filesystem) {
|
||||
fs := create()
|
||||
err := fs.MakeBucket("bucket", "")
|
||||
c.Assert(err, check.IsNil)
|
||||
@@ -95,7 +95,7 @@ func testMultipartObjectCreation(c *check.C, create func() CloudStorage) {
|
||||
c.Assert(objectMetadata.Md5, check.Equals, finalExpectedmd5SumHex)
|
||||
}
|
||||
|
||||
func testMultipartObjectAbort(c *check.C, create func() CloudStorage) {
|
||||
func testMultipartObjectAbort(c *check.C, create func() Filesystem) {
|
||||
fs := create()
|
||||
err := fs.MakeBucket("bucket", "")
|
||||
c.Assert(err, check.IsNil)
|
||||
@@ -126,7 +126,7 @@ func testMultipartObjectAbort(c *check.C, create func() CloudStorage) {
|
||||
c.Assert(err, check.IsNil)
|
||||
}
|
||||
|
||||
func testMultipleObjectCreation(c *check.C, create func() CloudStorage) {
|
||||
func testMultipleObjectCreation(c *check.C, create func() Filesystem) {
|
||||
objects := make(map[string][]byte)
|
||||
fs := create()
|
||||
err := fs.MakeBucket("bucket", "")
|
||||
@@ -162,7 +162,7 @@ func testMultipleObjectCreation(c *check.C, create func() CloudStorage) {
|
||||
}
|
||||
}
|
||||
|
||||
func testPaging(c *check.C, create func() CloudStorage) {
|
||||
func testPaging(c *check.C, create func() Filesystem) {
|
||||
fs := create()
|
||||
fs.MakeBucket("bucket", "")
|
||||
resources := BucketResourcesMetadata{}
|
||||
@@ -295,7 +295,7 @@ func testPaging(c *check.C, create func() CloudStorage) {
|
||||
}
|
||||
}
|
||||
|
||||
func testObjectOverwriteWorks(c *check.C, create func() CloudStorage) {
|
||||
func testObjectOverwriteWorks(c *check.C, create func() Filesystem) {
|
||||
fs := create()
|
||||
fs.MakeBucket("bucket", "")
|
||||
|
||||
@@ -320,13 +320,13 @@ func testObjectOverwriteWorks(c *check.C, create func() CloudStorage) {
|
||||
c.Assert(string(bytesBuffer.Bytes()), check.Equals, "three")
|
||||
}
|
||||
|
||||
func testNonExistantBucketOperations(c *check.C, create func() CloudStorage) {
|
||||
func testNonExistantBucketOperations(c *check.C, create func() Filesystem) {
|
||||
fs := create()
|
||||
_, err := fs.CreateObject("bucket", "object", "", int64(len("one")), bytes.NewBufferString("one"), nil)
|
||||
c.Assert(err, check.Not(check.IsNil))
|
||||
}
|
||||
|
||||
func testBucketMetadata(c *check.C, create func() CloudStorage) {
|
||||
func testBucketMetadata(c *check.C, create func() Filesystem) {
|
||||
fs := create()
|
||||
err := fs.MakeBucket("string", "private")
|
||||
c.Assert(err, check.IsNil)
|
||||
@@ -337,7 +337,7 @@ func testBucketMetadata(c *check.C, create func() CloudStorage) {
|
||||
c.Assert(metadata.ACL, check.Equals, BucketACL("public-read-write"))
|
||||
}
|
||||
|
||||
func testBucketRecreateFails(c *check.C, create func() CloudStorage) {
|
||||
func testBucketRecreateFails(c *check.C, create func() Filesystem) {
|
||||
fs := create()
|
||||
err := fs.MakeBucket("string", "private")
|
||||
c.Assert(err, check.IsNil)
|
||||
@@ -345,7 +345,7 @@ func testBucketRecreateFails(c *check.C, create func() CloudStorage) {
|
||||
c.Assert(err, check.Not(check.IsNil))
|
||||
}
|
||||
|
||||
func testPutObjectInSubdir(c *check.C, create func() CloudStorage) {
|
||||
func testPutObjectInSubdir(c *check.C, create func() Filesystem) {
|
||||
fs := create()
|
||||
err := fs.MakeBucket("bucket", "private")
|
||||
c.Assert(err, check.IsNil)
|
||||
@@ -365,7 +365,7 @@ func testPutObjectInSubdir(c *check.C, create func() CloudStorage) {
|
||||
c.Assert(int64(len(bytesBuffer.Bytes())), check.Equals, length)
|
||||
}
|
||||
|
||||
func testListBuckets(c *check.C, create func() CloudStorage) {
|
||||
func testListBuckets(c *check.C, create func() Filesystem) {
|
||||
fs := create()
|
||||
|
||||
// test empty list
|
||||
@@ -397,7 +397,7 @@ func testListBuckets(c *check.C, create func() CloudStorage) {
|
||||
c.Assert(err, check.IsNil)
|
||||
}
|
||||
|
||||
func testListBucketsOrder(c *check.C, create func() CloudStorage) {
|
||||
func testListBucketsOrder(c *check.C, create func() Filesystem) {
|
||||
// if implementation contains a map, order of map keys will vary.
|
||||
// this ensures they return in the same order each time
|
||||
for i := 0; i < 10; i++ {
|
||||
@@ -415,7 +415,7 @@ func testListBucketsOrder(c *check.C, create func() CloudStorage) {
|
||||
}
|
||||
}
|
||||
|
||||
func testListObjectsTestsForNonExistantBucket(c *check.C, create func() CloudStorage) {
|
||||
func testListObjectsTestsForNonExistantBucket(c *check.C, create func() Filesystem) {
|
||||
fs := create()
|
||||
resources := BucketResourcesMetadata{Prefix: "", Maxkeys: 1000}
|
||||
objects, resources, err := fs.ListObjects("bucket", resources)
|
||||
@@ -424,7 +424,7 @@ func testListObjectsTestsForNonExistantBucket(c *check.C, create func() CloudSto
|
||||
c.Assert(len(objects), check.Equals, 0)
|
||||
}
|
||||
|
||||
func testNonExistantObjectInBucket(c *check.C, create func() CloudStorage) {
|
||||
func testNonExistantObjectInBucket(c *check.C, create func() Filesystem) {
|
||||
fs := create()
|
||||
err := fs.MakeBucket("bucket", "")
|
||||
c.Assert(err, check.IsNil)
|
||||
@@ -446,7 +446,7 @@ func testNonExistantObjectInBucket(c *check.C, create func() CloudStorage) {
|
||||
}
|
||||
}
|
||||
|
||||
func testGetDirectoryReturnsObjectNotFound(c *check.C, create func() CloudStorage) {
|
||||
func testGetDirectoryReturnsObjectNotFound(c *check.C, create func() Filesystem) {
|
||||
fs := create()
|
||||
err := fs.MakeBucket("bucket", "")
|
||||
c.Assert(err, check.IsNil)
|
||||
@@ -481,7 +481,7 @@ func testGetDirectoryReturnsObjectNotFound(c *check.C, create func() CloudStorag
|
||||
c.Assert(len(byteBuffer2.Bytes()), check.Equals, 0)
|
||||
}
|
||||
|
||||
func testDefaultContentType(c *check.C, create func() CloudStorage) {
|
||||
func testDefaultContentType(c *check.C, create func() Filesystem) {
|
||||
fs := create()
|
||||
err := fs.MakeBucket("bucket", "")
|
||||
c.Assert(err, check.IsNil)
|
||||
@@ -493,7 +493,7 @@ func testDefaultContentType(c *check.C, create func() CloudStorage) {
|
||||
c.Assert(metadata.ContentType, check.Equals, "application/octet-stream")
|
||||
}
|
||||
|
||||
func testContentMd5Set(c *check.C, create func() CloudStorage) {
|
||||
func testContentMd5Set(c *check.C, create func() Filesystem) {
|
||||
fs := create()
|
||||
err := fs.MakeBucket("bucket", "")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
@@ -65,6 +65,15 @@ func (e UnsupportedFilesystem) Error() string {
|
||||
return "Unsupported filesystem: " + e.Type
|
||||
}
|
||||
|
||||
// RootPathFull root path out of space
|
||||
type RootPathFull struct {
|
||||
Path string
|
||||
}
|
||||
|
||||
func (e RootPathFull) Error() string {
|
||||
return "Root path " + e.Path + " reached its minimum free disk threshold."
|
||||
}
|
||||
|
||||
// BucketNotFound bucket does not exist
|
||||
type BucketNotFound struct {
|
||||
Bucket string
|
||||
|
||||
434
pkg/fs/fs-bucket.go
Normal file
434
pkg/fs/fs-bucket.go
Normal file
@@ -0,0 +1,434 @@
|
||||
/*
|
||||
* Minio Cloud Storage, (C) 2015 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package fs
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
"github.com/minio/minio-xl/pkg/probe"
|
||||
"github.com/minio/minio/pkg/disk"
|
||||
)
|
||||
|
||||
/// Bucket Operations
|
||||
|
||||
// DeleteBucket - delete bucket
|
||||
func (fs Filesystem) DeleteBucket(bucket string) *probe.Error {
|
||||
fs.lock.Lock()
|
||||
defer fs.lock.Unlock()
|
||||
// verify bucket path legal
|
||||
if !IsValidBucket(bucket) {
|
||||
return probe.NewError(BucketNameInvalid{Bucket: bucket})
|
||||
}
|
||||
bucketDir := filepath.Join(fs.path, bucket)
|
||||
// check bucket exists
|
||||
if _, err := os.Stat(bucketDir); os.IsNotExist(err) {
|
||||
return probe.NewError(BucketNotFound{Bucket: bucket})
|
||||
}
|
||||
if err := RemoveAllDirs(bucketDir); err != nil {
|
||||
if err == ErrDirNotEmpty || strings.Contains(err.Error(), "directory not empty") {
|
||||
return probe.NewError(BucketNotEmpty{Bucket: bucket})
|
||||
}
|
||||
return probe.NewError(err)
|
||||
}
|
||||
if err := os.Remove(bucketDir); err != nil {
|
||||
if strings.Contains(err.Error(), "directory not empty") {
|
||||
return probe.NewError(BucketNotEmpty{Bucket: bucket})
|
||||
}
|
||||
return probe.NewError(err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ListBuckets - Get service
|
||||
func (fs Filesystem) ListBuckets() ([]BucketMetadata, *probe.Error) {
|
||||
fs.lock.Lock()
|
||||
defer fs.lock.Unlock()
|
||||
|
||||
files, err := ioutil.ReadDir(fs.path)
|
||||
if err != nil {
|
||||
return []BucketMetadata{}, probe.NewError(err)
|
||||
}
|
||||
|
||||
var metadataList []BucketMetadata
|
||||
for _, file := range files {
|
||||
if !file.IsDir() {
|
||||
// if files found ignore them
|
||||
continue
|
||||
}
|
||||
if file.IsDir() {
|
||||
// if directories found with odd names, skip them too
|
||||
if !IsValidBucket(file.Name()) {
|
||||
continue
|
||||
}
|
||||
}
|
||||
metadata := BucketMetadata{
|
||||
Name: file.Name(),
|
||||
Created: file.ModTime(),
|
||||
}
|
||||
metadataList = append(metadataList, metadata)
|
||||
}
|
||||
return metadataList, nil
|
||||
}
|
||||
|
||||
// MakeBucket - PUT Bucket
|
||||
func (fs Filesystem) MakeBucket(bucket, acl string) *probe.Error {
|
||||
fs.lock.Lock()
|
||||
defer fs.lock.Unlock()
|
||||
|
||||
stfs, err := disk.Stat(fs.path)
|
||||
if err != nil {
|
||||
return probe.NewError(err)
|
||||
}
|
||||
|
||||
if int64((float64(stfs.Free)/float64(stfs.Total))*100) <= fs.minFreeDisk {
|
||||
return probe.NewError(RootPathFull{Path: fs.path})
|
||||
}
|
||||
|
||||
// verify bucket path legal
|
||||
if !IsValidBucket(bucket) {
|
||||
return probe.NewError(BucketNameInvalid{Bucket: bucket})
|
||||
}
|
||||
|
||||
// get bucket path
|
||||
bucketDir := filepath.Join(fs.path, bucket)
|
||||
|
||||
// check if bucket exists
|
||||
if _, err := os.Stat(bucketDir); err == nil {
|
||||
return probe.NewError(BucketExists{
|
||||
Bucket: bucket,
|
||||
})
|
||||
}
|
||||
|
||||
// make bucket
|
||||
err = os.Mkdir(bucketDir, aclToPerm(acl))
|
||||
if err != nil {
|
||||
return probe.NewError(err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetBucketMetadata -
|
||||
func (fs Filesystem) GetBucketMetadata(bucket string) (BucketMetadata, *probe.Error) {
|
||||
fs.lock.Lock()
|
||||
defer fs.lock.Unlock()
|
||||
if !IsValidBucket(bucket) {
|
||||
return BucketMetadata{}, probe.NewError(BucketNameInvalid{Bucket: bucket})
|
||||
}
|
||||
// get bucket path
|
||||
bucketDir := filepath.Join(fs.path, bucket)
|
||||
bucketMetadata := BucketMetadata{}
|
||||
fi, err := os.Stat(bucketDir)
|
||||
// check if bucket exists
|
||||
if os.IsNotExist(err) {
|
||||
return BucketMetadata{}, probe.NewError(BucketNotFound{Bucket: bucket})
|
||||
}
|
||||
if err != nil {
|
||||
return BucketMetadata{}, probe.NewError(err)
|
||||
}
|
||||
|
||||
bucketMetadata.Name = fi.Name()
|
||||
bucketMetadata.Created = fi.ModTime()
|
||||
bucketMetadata.ACL = permToACL(fi.Mode())
|
||||
return bucketMetadata, nil
|
||||
}
|
||||
|
||||
// permToACL - convert perm to meaningful ACL
|
||||
func permToACL(mode os.FileMode) BucketACL {
|
||||
switch mode.Perm() {
|
||||
case os.FileMode(0700):
|
||||
return BucketACL("private")
|
||||
case os.FileMode(0500):
|
||||
return BucketACL("public-read")
|
||||
case os.FileMode(0777):
|
||||
return BucketACL("public-read-write")
|
||||
default:
|
||||
return BucketACL("private")
|
||||
}
|
||||
}
|
||||
|
||||
// aclToPerm - convert acl to filesystem mode
|
||||
func aclToPerm(acl string) os.FileMode {
|
||||
switch acl {
|
||||
case "private":
|
||||
return os.FileMode(0700)
|
||||
case "public-read":
|
||||
return os.FileMode(0500)
|
||||
case "public-read-write":
|
||||
return os.FileMode(0777)
|
||||
default:
|
||||
return os.FileMode(0700)
|
||||
}
|
||||
}
|
||||
|
||||
// SetBucketMetadata -
|
||||
func (fs Filesystem) SetBucketMetadata(bucket string, metadata map[string]string) *probe.Error {
|
||||
fs.lock.Lock()
|
||||
defer fs.lock.Unlock()
|
||||
if !IsValidBucket(bucket) {
|
||||
return probe.NewError(BucketNameInvalid{Bucket: bucket})
|
||||
}
|
||||
acl := metadata["acl"]
|
||||
if !IsValidBucketACL(acl) {
|
||||
return probe.NewError(InvalidACL{ACL: acl})
|
||||
}
|
||||
// get bucket path
|
||||
bucketDir := filepath.Join(fs.path, bucket)
|
||||
err := os.Chmod(bucketDir, aclToPerm(acl))
|
||||
if err != nil {
|
||||
return probe.NewError(err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ListObjects - GET bucket (list objects)
|
||||
func (fs Filesystem) ListObjects(bucket string, resources BucketResourcesMetadata) ([]ObjectMetadata, BucketResourcesMetadata, *probe.Error) {
|
||||
fs.lock.Lock()
|
||||
defer fs.lock.Unlock()
|
||||
if !IsValidBucket(bucket) {
|
||||
return nil, resources, probe.NewError(BucketNameInvalid{Bucket: bucket})
|
||||
}
|
||||
if resources.Prefix != "" && IsValidObjectName(resources.Prefix) == false {
|
||||
return nil, resources, probe.NewError(ObjectNameInvalid{Bucket: bucket, Object: resources.Prefix})
|
||||
}
|
||||
|
||||
p := bucketDir{}
|
||||
rootPrefix := filepath.Join(fs.path, bucket)
|
||||
// check bucket exists
|
||||
if _, err := os.Stat(rootPrefix); os.IsNotExist(err) {
|
||||
return nil, resources, probe.NewError(BucketNotFound{Bucket: bucket})
|
||||
}
|
||||
|
||||
p.root = rootPrefix
|
||||
/// automatically treat "/" delimiter as "\\" delimiter on windows due to its path constraints.
|
||||
if resources.Delimiter == "/" {
|
||||
if runtime.GOOS == "windows" {
|
||||
resources.Delimiter = string(os.PathSeparator)
|
||||
}
|
||||
}
|
||||
|
||||
// if delimiter is supplied and not prefix then we are the very top level, list everything and move on.
|
||||
if resources.Delimiter != "" && resources.Prefix == "" {
|
||||
files, err := ioutil.ReadDir(rootPrefix)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return nil, resources, probe.NewError(BucketNotFound{Bucket: bucket})
|
||||
}
|
||||
return nil, resources, probe.NewError(err)
|
||||
}
|
||||
for _, fl := range files {
|
||||
p.files = append(p.files, contentInfo{
|
||||
Prefix: fl.Name(),
|
||||
Size: fl.Size(),
|
||||
Mode: fl.Mode(),
|
||||
ModTime: fl.ModTime(),
|
||||
FileInfo: fl,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// If delimiter and prefix is supplied make sure that paging doesn't go deep, treat it as simple directory listing.
|
||||
if resources.Delimiter != "" && resources.Prefix != "" {
|
||||
if !strings.HasSuffix(resources.Prefix, resources.Delimiter) {
|
||||
fl, err := os.Stat(filepath.Join(rootPrefix, resources.Prefix))
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return nil, resources, probe.NewError(ObjectNotFound{Bucket: bucket, Object: resources.Prefix})
|
||||
}
|
||||
return nil, resources, probe.NewError(err)
|
||||
}
|
||||
p.files = append(p.files, contentInfo{
|
||||
Prefix: resources.Prefix,
|
||||
Size: fl.Size(),
|
||||
Mode: os.ModeDir,
|
||||
ModTime: fl.ModTime(),
|
||||
FileInfo: fl,
|
||||
})
|
||||
} else {
|
||||
files, err := ioutil.ReadDir(filepath.Join(rootPrefix, resources.Prefix))
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return nil, resources, probe.NewError(ObjectNotFound{Bucket: bucket, Object: resources.Prefix})
|
||||
}
|
||||
return nil, resources, probe.NewError(err)
|
||||
}
|
||||
for _, fl := range files {
|
||||
prefix := fl.Name()
|
||||
if resources.Prefix != "" {
|
||||
prefix = filepath.Join(resources.Prefix, fl.Name())
|
||||
}
|
||||
p.files = append(p.files, contentInfo{
|
||||
Prefix: prefix,
|
||||
Size: fl.Size(),
|
||||
Mode: fl.Mode(),
|
||||
ModTime: fl.ModTime(),
|
||||
FileInfo: fl,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
if resources.Delimiter == "" {
|
||||
var files []contentInfo
|
||||
getAllFiles := func(fp string, fl os.FileInfo, err error) error {
|
||||
// If any error return back quickly
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if strings.HasSuffix(fp, "$multiparts") {
|
||||
return nil
|
||||
}
|
||||
// if file pointer equals to rootPrefix - discard it
|
||||
if fp == p.root {
|
||||
return nil
|
||||
}
|
||||
if len(files) > resources.Maxkeys {
|
||||
return ErrSkipFile
|
||||
}
|
||||
// Split the root prefix from the incoming file pointer
|
||||
realFp := ""
|
||||
if runtime.GOOS == "windows" {
|
||||
if splits := strings.Split(fp, (p.root + string(os.PathSeparator))); len(splits) > 1 {
|
||||
realFp = splits[1]
|
||||
}
|
||||
} else {
|
||||
if splits := strings.Split(fp, (p.root + string(os.PathSeparator))); len(splits) > 1 {
|
||||
realFp = splits[1]
|
||||
}
|
||||
}
|
||||
// If path is a directory and has a prefix verify if the file pointer
|
||||
// has the prefix if it does not skip the directory.
|
||||
if fl.Mode().IsDir() {
|
||||
if resources.Prefix != "" {
|
||||
if !strings.HasPrefix(fp, filepath.Join(p.root, resources.Prefix)) {
|
||||
return ErrSkipDir
|
||||
}
|
||||
}
|
||||
}
|
||||
// If path is a directory and has a marker verify if the file split file pointer
|
||||
// is lesser than the Marker top level directory if yes skip it.
|
||||
if fl.Mode().IsDir() {
|
||||
if resources.Marker != "" {
|
||||
if realFp != "" {
|
||||
if runtime.GOOS == "windows" {
|
||||
if realFp < strings.Split(resources.Marker, string(os.PathSeparator))[0] {
|
||||
return ErrSkipDir
|
||||
}
|
||||
} else {
|
||||
if realFp < strings.Split(resources.Marker, string(os.PathSeparator))[0] {
|
||||
return ErrSkipDir
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
// If regular file verify
|
||||
if fl.Mode().IsRegular() {
|
||||
// If marker is present this will be used to check if filepointer is
|
||||
// lexically higher than then Marker
|
||||
if realFp != "" {
|
||||
if resources.Marker != "" {
|
||||
if realFp > resources.Marker {
|
||||
files = append(files, contentInfo{
|
||||
Prefix: realFp,
|
||||
Size: fl.Size(),
|
||||
Mode: fl.Mode(),
|
||||
ModTime: fl.ModTime(),
|
||||
FileInfo: fl,
|
||||
})
|
||||
}
|
||||
} else {
|
||||
files = append(files, contentInfo{
|
||||
Prefix: realFp,
|
||||
Size: fl.Size(),
|
||||
Mode: fl.Mode(),
|
||||
ModTime: fl.ModTime(),
|
||||
FileInfo: fl,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
// If file is a symlink follow it and populate values.
|
||||
if fl.Mode()&os.ModeSymlink == os.ModeSymlink {
|
||||
st, err := os.Stat(fp)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
// If marker is present this will be used to check if filepointer is
|
||||
// lexically higher than then Marker
|
||||
if realFp != "" {
|
||||
if resources.Marker != "" {
|
||||
if realFp > resources.Marker {
|
||||
files = append(files, contentInfo{
|
||||
Prefix: realFp,
|
||||
Size: st.Size(),
|
||||
Mode: st.Mode(),
|
||||
ModTime: st.ModTime(),
|
||||
FileInfo: st,
|
||||
})
|
||||
}
|
||||
} else {
|
||||
files = append(files, contentInfo{
|
||||
Prefix: realFp,
|
||||
Size: st.Size(),
|
||||
Mode: st.Mode(),
|
||||
ModTime: st.ModTime(),
|
||||
FileInfo: st,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
p.files = files
|
||||
return nil
|
||||
}
|
||||
// If no delimiter is specified, crawl through everything.
|
||||
err := Walk(rootPrefix, getAllFiles)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return nil, resources, probe.NewError(ObjectNotFound{Bucket: bucket, Object: resources.Prefix})
|
||||
}
|
||||
return nil, resources, probe.NewError(err)
|
||||
}
|
||||
}
|
||||
|
||||
var metadataList []ObjectMetadata
|
||||
var metadata ObjectMetadata
|
||||
|
||||
// Filter objects
|
||||
for _, content := range p.files {
|
||||
if len(metadataList) == resources.Maxkeys {
|
||||
resources.IsTruncated = true
|
||||
if resources.IsTruncated && resources.Delimiter != "" {
|
||||
resources.NextMarker = metadataList[len(metadataList)-1].Object
|
||||
}
|
||||
break
|
||||
}
|
||||
if content.Prefix > resources.Marker {
|
||||
var err *probe.Error
|
||||
metadata, resources, err = fs.filterObjects(bucket, content, resources)
|
||||
if err != nil {
|
||||
return nil, resources, err.Trace()
|
||||
}
|
||||
if metadata.Bucket != "" {
|
||||
metadataList = append(metadataList, metadata)
|
||||
}
|
||||
}
|
||||
}
|
||||
return metadataList, resources, nil
|
||||
}
|
||||
@@ -22,7 +22,7 @@ import (
|
||||
"github.com/minio/minio-xl/pkg/probe"
|
||||
)
|
||||
|
||||
func (fs API) filterObjects(bucket string, content contentInfo, resources BucketResourcesMetadata) (ObjectMetadata, BucketResourcesMetadata, *probe.Error) {
|
||||
func (fs Filesystem) filterObjects(bucket string, content contentInfo, resources BucketResourcesMetadata) (ObjectMetadata, BucketResourcesMetadata, *probe.Error) {
|
||||
var err *probe.Error
|
||||
var metadata ObjectMetadata
|
||||
|
||||
|
||||
@@ -39,9 +39,10 @@ import (
|
||||
"github.com/minio/minio-xl/pkg/crypto/sha256"
|
||||
"github.com/minio/minio-xl/pkg/crypto/sha512"
|
||||
"github.com/minio/minio-xl/pkg/probe"
|
||||
"github.com/minio/minio/pkg/disk"
|
||||
)
|
||||
|
||||
func (fs API) isValidUploadID(object, uploadID string) bool {
|
||||
func (fs Filesystem) isValidUploadID(object, uploadID string) bool {
|
||||
s, ok := fs.multiparts.ActiveSession[object]
|
||||
if !ok {
|
||||
return false
|
||||
@@ -53,7 +54,7 @@ func (fs API) isValidUploadID(object, uploadID string) bool {
|
||||
}
|
||||
|
||||
// ListMultipartUploads - list incomplete multipart sessions for a given BucketMultipartResourcesMetadata
|
||||
func (fs API) ListMultipartUploads(bucket string, resources BucketMultipartResourcesMetadata) (BucketMultipartResourcesMetadata, *probe.Error) {
|
||||
func (fs Filesystem) ListMultipartUploads(bucket string, resources BucketMultipartResourcesMetadata) (BucketMultipartResourcesMetadata, *probe.Error) {
|
||||
fs.lock.Lock()
|
||||
defer fs.lock.Unlock()
|
||||
if !IsValidBucket(bucket) {
|
||||
@@ -113,7 +114,7 @@ func (fs API) ListMultipartUploads(bucket string, resources BucketMultipartResou
|
||||
return resources, nil
|
||||
}
|
||||
|
||||
func (fs API) concatParts(parts *CompleteMultipartUpload, objectPath string, mw io.Writer) *probe.Error {
|
||||
func (fs Filesystem) concatParts(parts *CompleteMultipartUpload, objectPath string, mw io.Writer) *probe.Error {
|
||||
for _, part := range parts.Part {
|
||||
recvMD5 := part.ETag
|
||||
partFile, err := os.OpenFile(objectPath+fmt.Sprintf("$%d", part.PartNumber), os.O_RDONLY, 0600)
|
||||
@@ -143,9 +144,19 @@ func (fs API) concatParts(parts *CompleteMultipartUpload, objectPath string, mw
|
||||
}
|
||||
|
||||
// NewMultipartUpload - initiate a new multipart session
|
||||
func (fs API) NewMultipartUpload(bucket, object string) (string, *probe.Error) {
|
||||
func (fs Filesystem) NewMultipartUpload(bucket, object string) (string, *probe.Error) {
|
||||
fs.lock.Lock()
|
||||
defer fs.lock.Unlock()
|
||||
|
||||
stfs, err := disk.Stat(fs.path)
|
||||
if err != nil {
|
||||
return "", probe.NewError(err)
|
||||
}
|
||||
|
||||
if int64((float64(stfs.Free)/float64(stfs.Total))*100) <= fs.minFreeDisk {
|
||||
return "", probe.NewError(RootPathFull{Path: fs.path})
|
||||
}
|
||||
|
||||
if !IsValidBucket(bucket) {
|
||||
return "", probe.NewError(BucketNameInvalid{Bucket: bucket})
|
||||
}
|
||||
@@ -154,7 +165,7 @@ func (fs API) NewMultipartUpload(bucket, object string) (string, *probe.Error) {
|
||||
}
|
||||
|
||||
bucketPath := filepath.Join(fs.path, bucket)
|
||||
_, err := os.Stat(bucketPath)
|
||||
_, err = os.Stat(bucketPath)
|
||||
// check bucket exists
|
||||
if os.IsNotExist(err) {
|
||||
return "", probe.NewError(BucketNotFound{Bucket: bucket})
|
||||
@@ -208,10 +219,19 @@ func (a partNumber) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||
func (a partNumber) Less(i, j int) bool { return a[i].PartNumber < a[j].PartNumber }
|
||||
|
||||
// CreateObjectPart - create a part in a multipart session
|
||||
func (fs API) CreateObjectPart(bucket, object, uploadID, expectedMD5Sum string, partID int, size int64, data io.Reader, signature *Signature) (string, *probe.Error) {
|
||||
func (fs Filesystem) CreateObjectPart(bucket, object, uploadID, expectedMD5Sum string, partID int, size int64, data io.Reader, signature *Signature) (string, *probe.Error) {
|
||||
fs.lock.Lock()
|
||||
defer fs.lock.Unlock()
|
||||
|
||||
stfs, err := disk.Stat(fs.path)
|
||||
if err != nil {
|
||||
return "", probe.NewError(err)
|
||||
}
|
||||
|
||||
if int64((float64(stfs.Free)/float64(stfs.Total))*100) <= fs.minFreeDisk {
|
||||
return "", probe.NewError(RootPathFull{Path: fs.path})
|
||||
}
|
||||
|
||||
if partID <= 0 {
|
||||
return "", probe.NewError(errors.New("invalid part id, cannot be zero or less than zero"))
|
||||
}
|
||||
@@ -230,7 +250,8 @@ func (fs API) CreateObjectPart(bucket, object, uploadID, expectedMD5Sum string,
|
||||
}
|
||||
|
||||
if strings.TrimSpace(expectedMD5Sum) != "" {
|
||||
expectedMD5SumBytes, err := base64.StdEncoding.DecodeString(strings.TrimSpace(expectedMD5Sum))
|
||||
var expectedMD5SumBytes []byte
|
||||
expectedMD5SumBytes, err = base64.StdEncoding.DecodeString(strings.TrimSpace(expectedMD5Sum))
|
||||
if err != nil {
|
||||
// pro-actively close the connection
|
||||
return "", probe.NewError(InvalidDigest{Md5: expectedMD5Sum})
|
||||
@@ -239,7 +260,7 @@ func (fs API) CreateObjectPart(bucket, object, uploadID, expectedMD5Sum string,
|
||||
}
|
||||
|
||||
bucketPath := filepath.Join(fs.path, bucket)
|
||||
if _, err := os.Stat(bucketPath); err != nil {
|
||||
if _, err = os.Stat(bucketPath); err != nil {
|
||||
// check bucket exists
|
||||
if os.IsNotExist(err) {
|
||||
return "", probe.NewError(BucketNotFound{Bucket: bucket})
|
||||
@@ -321,7 +342,7 @@ func (fs API) CreateObjectPart(bucket, object, uploadID, expectedMD5Sum string,
|
||||
}
|
||||
|
||||
// CompleteMultipartUpload - complete a multipart upload and persist the data
|
||||
func (fs API) CompleteMultipartUpload(bucket, object, uploadID string, data io.Reader, signature *Signature) (ObjectMetadata, *probe.Error) {
|
||||
func (fs Filesystem) CompleteMultipartUpload(bucket, object, uploadID string, data io.Reader, signature *Signature) (ObjectMetadata, *probe.Error) {
|
||||
fs.lock.Lock()
|
||||
defer fs.lock.Unlock()
|
||||
|
||||
@@ -424,7 +445,7 @@ func (fs API) CompleteMultipartUpload(bucket, object, uploadID string, data io.R
|
||||
}
|
||||
|
||||
// ListObjectParts - list parts from incomplete multipart session for a given ObjectResourcesMetadata
|
||||
func (fs API) ListObjectParts(bucket, object string, resources ObjectResourcesMetadata) (ObjectResourcesMetadata, *probe.Error) {
|
||||
func (fs Filesystem) ListObjectParts(bucket, object string, resources ObjectResourcesMetadata) (ObjectResourcesMetadata, *probe.Error) {
|
||||
fs.lock.Lock()
|
||||
defer fs.lock.Unlock()
|
||||
|
||||
@@ -493,7 +514,7 @@ func (fs API) ListObjectParts(bucket, object string, resources ObjectResourcesMe
|
||||
}
|
||||
|
||||
// AbortMultipartUpload - abort an incomplete multipart session
|
||||
func (fs API) AbortMultipartUpload(bucket, object, uploadID string) *probe.Error {
|
||||
func (fs Filesystem) AbortMultipartUpload(bucket, object, uploadID string) *probe.Error {
|
||||
fs.lock.Lock()
|
||||
defer fs.lock.Unlock()
|
||||
|
||||
|
||||
@@ -32,12 +32,13 @@ import (
|
||||
"github.com/minio/minio-xl/pkg/atomic"
|
||||
"github.com/minio/minio-xl/pkg/crypto/sha256"
|
||||
"github.com/minio/minio-xl/pkg/probe"
|
||||
"github.com/minio/minio/pkg/disk"
|
||||
)
|
||||
|
||||
/// Object Operations
|
||||
|
||||
// GetObject - GET object
|
||||
func (fs API) GetObject(w io.Writer, bucket, object string, start, length int64) (int64, *probe.Error) {
|
||||
func (fs Filesystem) GetObject(w io.Writer, bucket, object string, start, length int64) (int64, *probe.Error) {
|
||||
fs.lock.Lock()
|
||||
defer fs.lock.Unlock()
|
||||
|
||||
@@ -91,7 +92,7 @@ func (fs API) GetObject(w io.Writer, bucket, object string, start, length int64)
|
||||
}
|
||||
|
||||
// GetObjectMetadata - HEAD object
|
||||
func (fs API) GetObjectMetadata(bucket, object string) (ObjectMetadata, *probe.Error) {
|
||||
func (fs Filesystem) GetObjectMetadata(bucket, object string) (ObjectMetadata, *probe.Error) {
|
||||
fs.lock.Lock()
|
||||
defer fs.lock.Unlock()
|
||||
|
||||
@@ -161,16 +162,25 @@ func isMD5SumEqual(expectedMD5Sum, actualMD5Sum string) *probe.Error {
|
||||
}
|
||||
|
||||
// CreateObject - PUT object
|
||||
func (fs API) CreateObject(bucket, object, expectedMD5Sum string, size int64, data io.Reader, signature *Signature) (ObjectMetadata, *probe.Error) {
|
||||
func (fs Filesystem) CreateObject(bucket, object, expectedMD5Sum string, size int64, data io.Reader, signature *Signature) (ObjectMetadata, *probe.Error) {
|
||||
fs.lock.Lock()
|
||||
defer fs.lock.Unlock()
|
||||
|
||||
stfs, err := disk.Stat(fs.path)
|
||||
if err != nil {
|
||||
return ObjectMetadata{}, probe.NewError(err)
|
||||
}
|
||||
|
||||
if int64((float64(stfs.Free)/float64(stfs.Total))*100) <= fs.minFreeDisk {
|
||||
return ObjectMetadata{}, probe.NewError(RootPathFull{Path: fs.path})
|
||||
}
|
||||
|
||||
// check bucket name valid
|
||||
if !IsValidBucket(bucket) {
|
||||
return ObjectMetadata{}, probe.NewError(BucketNameInvalid{Bucket: bucket})
|
||||
}
|
||||
// check bucket exists
|
||||
if _, err := os.Stat(filepath.Join(fs.path, bucket)); os.IsNotExist(err) {
|
||||
if _, err = os.Stat(filepath.Join(fs.path, bucket)); os.IsNotExist(err) {
|
||||
return ObjectMetadata{}, probe.NewError(BucketNotFound{Bucket: bucket})
|
||||
}
|
||||
// verify object path legal
|
||||
@@ -181,7 +191,8 @@ func (fs API) CreateObject(bucket, object, expectedMD5Sum string, size int64, da
|
||||
// get object path
|
||||
objectPath := filepath.Join(fs.path, bucket, object)
|
||||
if strings.TrimSpace(expectedMD5Sum) != "" {
|
||||
expectedMD5SumBytes, err := base64.StdEncoding.DecodeString(strings.TrimSpace(expectedMD5Sum))
|
||||
var expectedMD5SumBytes []byte
|
||||
expectedMD5SumBytes, err = base64.StdEncoding.DecodeString(strings.TrimSpace(expectedMD5Sum))
|
||||
if err != nil {
|
||||
// pro-actively close the connection
|
||||
return ObjectMetadata{}, probe.NewError(InvalidDigest{Md5: expectedMD5Sum})
|
||||
@@ -252,7 +263,7 @@ func (fs API) CreateObject(bucket, object, expectedMD5Sum string, size int64, da
|
||||
}
|
||||
|
||||
// DeleteObject - delete and object
|
||||
func (fs API) DeleteObject(bucket, object string) *probe.Error {
|
||||
func (fs Filesystem) DeleteObject(bucket, object string) *probe.Error {
|
||||
fs.lock.Lock()
|
||||
defer fs.lock.Unlock()
|
||||
|
||||
|
||||
421
pkg/fs/fs.go
421
pkg/fs/fs.go
@@ -18,22 +18,18 @@ package fs
|
||||
|
||||
import (
|
||||
"os"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"io/ioutil"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/minio/minio-xl/pkg/probe"
|
||||
)
|
||||
|
||||
// API - local variables
|
||||
type API struct {
|
||||
path string
|
||||
lock *sync.Mutex
|
||||
multiparts *Multiparts
|
||||
// Filesystem - local variables
|
||||
type Filesystem struct {
|
||||
path string
|
||||
minFreeDisk int64
|
||||
lock *sync.Mutex
|
||||
multiparts *Multiparts
|
||||
}
|
||||
|
||||
// MultipartSession holds active session information
|
||||
@@ -51,7 +47,7 @@ type Multiparts struct {
|
||||
}
|
||||
|
||||
// New instantiate a new donut
|
||||
func New(path string) (CloudStorage, *probe.Error) {
|
||||
func New() (Filesystem, *probe.Error) {
|
||||
var err *probe.Error
|
||||
// load multiparts session from disk
|
||||
var multiparts *Multiparts
|
||||
@@ -63,412 +59,27 @@ func New(path string) (CloudStorage, *probe.Error) {
|
||||
ActiveSession: make(map[string]*MultipartSession),
|
||||
}
|
||||
if err := SaveMultipartsSession(multiparts); err != nil {
|
||||
return nil, err.Trace()
|
||||
return Filesystem{}, err.Trace()
|
||||
}
|
||||
} else {
|
||||
return nil, err.Trace()
|
||||
return Filesystem{}, err.Trace()
|
||||
}
|
||||
}
|
||||
a := API{
|
||||
path: path,
|
||||
lock: new(sync.Mutex),
|
||||
}
|
||||
a := Filesystem{lock: new(sync.Mutex)}
|
||||
a.multiparts = multiparts
|
||||
return a, nil
|
||||
}
|
||||
|
||||
/// Bucket Operations
|
||||
|
||||
// DeleteBucket - delete bucket
|
||||
func (fs API) DeleteBucket(bucket string) *probe.Error {
|
||||
// SetRootPath - set root path
|
||||
func (fs *Filesystem) SetRootPath(path string) {
|
||||
fs.lock.Lock()
|
||||
defer fs.lock.Unlock()
|
||||
|
||||
// verify bucket path legal
|
||||
if !IsValidBucket(bucket) {
|
||||
return probe.NewError(BucketNameInvalid{Bucket: bucket})
|
||||
}
|
||||
bucketDir := filepath.Join(fs.path, bucket)
|
||||
// check bucket exists
|
||||
if _, err := os.Stat(bucketDir); os.IsNotExist(err) {
|
||||
return probe.NewError(BucketNotFound{Bucket: bucket})
|
||||
}
|
||||
if err := RemoveAllDirs(bucketDir); err != nil {
|
||||
if err == ErrDirNotEmpty || strings.Contains(err.Error(), "directory not empty") {
|
||||
return probe.NewError(BucketNotEmpty{Bucket: bucket})
|
||||
}
|
||||
return probe.NewError(err)
|
||||
}
|
||||
if err := os.Remove(bucketDir); err != nil {
|
||||
if strings.Contains(err.Error(), "directory not empty") {
|
||||
return probe.NewError(BucketNotEmpty{Bucket: bucket})
|
||||
}
|
||||
return probe.NewError(err)
|
||||
}
|
||||
return nil
|
||||
fs.path = path
|
||||
}
|
||||
|
||||
// ListBuckets - Get service
|
||||
func (fs API) ListBuckets() ([]BucketMetadata, *probe.Error) {
|
||||
// SetMinFreeDisk - set min free disk
|
||||
func (fs *Filesystem) SetMinFreeDisk(minFreeDisk int64) {
|
||||
fs.lock.Lock()
|
||||
defer fs.lock.Unlock()
|
||||
|
||||
files, err := ioutil.ReadDir(fs.path)
|
||||
if err != nil {
|
||||
return []BucketMetadata{}, probe.NewError(err)
|
||||
}
|
||||
|
||||
var metadataList []BucketMetadata
|
||||
for _, file := range files {
|
||||
if !file.IsDir() {
|
||||
// if files found ignore them
|
||||
continue
|
||||
}
|
||||
if file.IsDir() {
|
||||
// if directories found with odd names, skip them too
|
||||
if !IsValidBucket(file.Name()) {
|
||||
continue
|
||||
}
|
||||
}
|
||||
metadata := BucketMetadata{
|
||||
Name: file.Name(),
|
||||
Created: file.ModTime(),
|
||||
}
|
||||
metadataList = append(metadataList, metadata)
|
||||
}
|
||||
return metadataList, nil
|
||||
}
|
||||
|
||||
// MakeBucket - PUT Bucket
|
||||
func (fs API) MakeBucket(bucket, acl string) *probe.Error {
|
||||
fs.lock.Lock()
|
||||
defer fs.lock.Unlock()
|
||||
|
||||
// verify bucket path legal
|
||||
if !IsValidBucket(bucket) {
|
||||
return probe.NewError(BucketNameInvalid{Bucket: bucket})
|
||||
}
|
||||
|
||||
// get bucket path
|
||||
bucketDir := filepath.Join(fs.path, bucket)
|
||||
|
||||
// check if bucket exists
|
||||
if _, err := os.Stat(bucketDir); err == nil {
|
||||
return probe.NewError(BucketExists{
|
||||
Bucket: bucket,
|
||||
})
|
||||
}
|
||||
|
||||
// make bucket
|
||||
err := os.Mkdir(bucketDir, aclToPerm(acl))
|
||||
if err != nil {
|
||||
return probe.NewError(err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetBucketMetadata -
|
||||
func (fs API) GetBucketMetadata(bucket string) (BucketMetadata, *probe.Error) {
|
||||
fs.lock.Lock()
|
||||
defer fs.lock.Unlock()
|
||||
if !IsValidBucket(bucket) {
|
||||
return BucketMetadata{}, probe.NewError(BucketNameInvalid{Bucket: bucket})
|
||||
}
|
||||
// get bucket path
|
||||
bucketDir := filepath.Join(fs.path, bucket)
|
||||
bucketMetadata := BucketMetadata{}
|
||||
fi, err := os.Stat(bucketDir)
|
||||
// check if bucket exists
|
||||
if os.IsNotExist(err) {
|
||||
return BucketMetadata{}, probe.NewError(BucketNotFound{Bucket: bucket})
|
||||
}
|
||||
if err != nil {
|
||||
return BucketMetadata{}, probe.NewError(err)
|
||||
}
|
||||
|
||||
bucketMetadata.Name = fi.Name()
|
||||
bucketMetadata.Created = fi.ModTime()
|
||||
bucketMetadata.ACL = permToACL(fi.Mode())
|
||||
return bucketMetadata, nil
|
||||
}
|
||||
|
||||
// permToACL - convert perm to meaningful ACL
|
||||
func permToACL(mode os.FileMode) BucketACL {
|
||||
switch mode.Perm() {
|
||||
case os.FileMode(0700):
|
||||
return BucketACL("private")
|
||||
case os.FileMode(0500):
|
||||
return BucketACL("public-read")
|
||||
case os.FileMode(0777):
|
||||
return BucketACL("public-read-write")
|
||||
default:
|
||||
return BucketACL("private")
|
||||
}
|
||||
}
|
||||
|
||||
// aclToPerm - convert acl to filesystem mode
|
||||
func aclToPerm(acl string) os.FileMode {
|
||||
switch acl {
|
||||
case "private":
|
||||
return os.FileMode(0700)
|
||||
case "public-read":
|
||||
return os.FileMode(0500)
|
||||
case "public-read-write":
|
||||
return os.FileMode(0777)
|
||||
default:
|
||||
return os.FileMode(0700)
|
||||
}
|
||||
}
|
||||
|
||||
// SetBucketMetadata -
|
||||
func (fs API) SetBucketMetadata(bucket string, metadata map[string]string) *probe.Error {
|
||||
fs.lock.Lock()
|
||||
defer fs.lock.Unlock()
|
||||
if !IsValidBucket(bucket) {
|
||||
return probe.NewError(BucketNameInvalid{Bucket: bucket})
|
||||
}
|
||||
acl := metadata["acl"]
|
||||
if !IsValidBucketACL(acl) {
|
||||
return probe.NewError(InvalidACL{ACL: acl})
|
||||
}
|
||||
// get bucket path
|
||||
bucketDir := filepath.Join(fs.path, bucket)
|
||||
err := os.Chmod(bucketDir, aclToPerm(acl))
|
||||
if err != nil {
|
||||
return probe.NewError(err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ListObjects - GET bucket (list objects)
|
||||
func (fs API) ListObjects(bucket string, resources BucketResourcesMetadata) ([]ObjectMetadata, BucketResourcesMetadata, *probe.Error) {
|
||||
if !IsValidBucket(bucket) {
|
||||
return nil, resources, probe.NewError(BucketNameInvalid{Bucket: bucket})
|
||||
}
|
||||
if resources.Prefix != "" && IsValidObjectName(resources.Prefix) == false {
|
||||
return nil, resources, probe.NewError(ObjectNameInvalid{Bucket: bucket, Object: resources.Prefix})
|
||||
}
|
||||
|
||||
p := bucketDir{}
|
||||
rootPrefix := filepath.Join(fs.path, bucket)
|
||||
// check bucket exists
|
||||
if _, err := os.Stat(rootPrefix); os.IsNotExist(err) {
|
||||
return nil, resources, probe.NewError(BucketNotFound{Bucket: bucket})
|
||||
}
|
||||
|
||||
p.root = rootPrefix
|
||||
/// automatically treat "/" delimiter as "\\" delimiter on windows due to its path constraints.
|
||||
if resources.Delimiter == "/" {
|
||||
if runtime.GOOS == "windows" {
|
||||
resources.Delimiter = string(os.PathSeparator)
|
||||
}
|
||||
}
|
||||
|
||||
// if delimiter is supplied and not prefix then we are the very top level, list everything and move on.
|
||||
if resources.Delimiter != "" && resources.Prefix == "" {
|
||||
files, err := ioutil.ReadDir(rootPrefix)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return nil, resources, probe.NewError(BucketNotFound{Bucket: bucket})
|
||||
}
|
||||
return nil, resources, probe.NewError(err)
|
||||
}
|
||||
for _, fl := range files {
|
||||
p.files = append(p.files, contentInfo{
|
||||
Prefix: fl.Name(),
|
||||
Size: fl.Size(),
|
||||
Mode: fl.Mode(),
|
||||
ModTime: fl.ModTime(),
|
||||
FileInfo: fl,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// If delimiter and prefix is supplied make sure that paging doesn't go deep, treat it as simple directory listing.
|
||||
if resources.Delimiter != "" && resources.Prefix != "" {
|
||||
if !strings.HasSuffix(resources.Prefix, resources.Delimiter) {
|
||||
fl, err := os.Stat(filepath.Join(rootPrefix, resources.Prefix))
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return nil, resources, probe.NewError(ObjectNotFound{Bucket: bucket, Object: resources.Prefix})
|
||||
}
|
||||
return nil, resources, probe.NewError(err)
|
||||
}
|
||||
p.files = append(p.files, contentInfo{
|
||||
Prefix: resources.Prefix,
|
||||
Size: fl.Size(),
|
||||
Mode: os.ModeDir,
|
||||
ModTime: fl.ModTime(),
|
||||
FileInfo: fl,
|
||||
})
|
||||
} else {
|
||||
files, err := ioutil.ReadDir(filepath.Join(rootPrefix, resources.Prefix))
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return nil, resources, probe.NewError(ObjectNotFound{Bucket: bucket, Object: resources.Prefix})
|
||||
}
|
||||
return nil, resources, probe.NewError(err)
|
||||
}
|
||||
for _, fl := range files {
|
||||
prefix := fl.Name()
|
||||
if resources.Prefix != "" {
|
||||
prefix = filepath.Join(resources.Prefix, fl.Name())
|
||||
}
|
||||
p.files = append(p.files, contentInfo{
|
||||
Prefix: prefix,
|
||||
Size: fl.Size(),
|
||||
Mode: fl.Mode(),
|
||||
ModTime: fl.ModTime(),
|
||||
FileInfo: fl,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
if resources.Delimiter == "" {
|
||||
var files []contentInfo
|
||||
getAllFiles := func(fp string, fl os.FileInfo, err error) error {
|
||||
// If any error return back quickly
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if strings.HasSuffix(fp, "$multiparts") {
|
||||
return nil
|
||||
}
|
||||
// if file pointer equals to rootPrefix - discard it
|
||||
if fp == p.root {
|
||||
return nil
|
||||
}
|
||||
if len(files) > resources.Maxkeys {
|
||||
return ErrSkipFile
|
||||
}
|
||||
// Split the root prefix from the incoming file pointer
|
||||
realFp := ""
|
||||
if runtime.GOOS == "windows" {
|
||||
if splits := strings.Split(fp, (p.root + string(os.PathSeparator))); len(splits) > 1 {
|
||||
realFp = splits[1]
|
||||
}
|
||||
} else {
|
||||
if splits := strings.Split(fp, (p.root + string(os.PathSeparator))); len(splits) > 1 {
|
||||
realFp = splits[1]
|
||||
}
|
||||
}
|
||||
// If path is a directory and has a prefix verify if the file pointer
|
||||
// has the prefix if it does not skip the directory.
|
||||
if fl.Mode().IsDir() {
|
||||
if resources.Prefix != "" {
|
||||
if !strings.HasPrefix(fp, filepath.Join(p.root, resources.Prefix)) {
|
||||
return ErrSkipDir
|
||||
}
|
||||
}
|
||||
}
|
||||
// If path is a directory and has a marker verify if the file split file pointer
|
||||
// is lesser than the Marker top level directory if yes skip it.
|
||||
if fl.Mode().IsDir() {
|
||||
if resources.Marker != "" {
|
||||
if realFp != "" {
|
||||
if runtime.GOOS == "windows" {
|
||||
if realFp < strings.Split(resources.Marker, string(os.PathSeparator))[0] {
|
||||
return ErrSkipDir
|
||||
}
|
||||
} else {
|
||||
if realFp < strings.Split(resources.Marker, string(os.PathSeparator))[0] {
|
||||
return ErrSkipDir
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
// If regular file verify
|
||||
if fl.Mode().IsRegular() {
|
||||
// If marker is present this will be used to check if filepointer is
|
||||
// lexically higher than then Marker
|
||||
if realFp != "" {
|
||||
if resources.Marker != "" {
|
||||
if realFp > resources.Marker {
|
||||
files = append(files, contentInfo{
|
||||
Prefix: realFp,
|
||||
Size: fl.Size(),
|
||||
Mode: fl.Mode(),
|
||||
ModTime: fl.ModTime(),
|
||||
FileInfo: fl,
|
||||
})
|
||||
}
|
||||
} else {
|
||||
files = append(files, contentInfo{
|
||||
Prefix: realFp,
|
||||
Size: fl.Size(),
|
||||
Mode: fl.Mode(),
|
||||
ModTime: fl.ModTime(),
|
||||
FileInfo: fl,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
// If file is a symlink follow it and populate values.
|
||||
if fl.Mode()&os.ModeSymlink == os.ModeSymlink {
|
||||
st, err := os.Stat(fp)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
// If marker is present this will be used to check if filepointer is
|
||||
// lexically higher than then Marker
|
||||
if realFp != "" {
|
||||
if resources.Marker != "" {
|
||||
if realFp > resources.Marker {
|
||||
files = append(files, contentInfo{
|
||||
Prefix: realFp,
|
||||
Size: st.Size(),
|
||||
Mode: st.Mode(),
|
||||
ModTime: st.ModTime(),
|
||||
FileInfo: st,
|
||||
})
|
||||
}
|
||||
} else {
|
||||
files = append(files, contentInfo{
|
||||
Prefix: realFp,
|
||||
Size: st.Size(),
|
||||
Mode: st.Mode(),
|
||||
ModTime: st.ModTime(),
|
||||
FileInfo: st,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
p.files = files
|
||||
return nil
|
||||
}
|
||||
// If no delimiter is specified, crawl through everything.
|
||||
err := Walk(rootPrefix, getAllFiles)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return nil, resources, probe.NewError(ObjectNotFound{Bucket: bucket, Object: resources.Prefix})
|
||||
}
|
||||
return nil, resources, probe.NewError(err)
|
||||
}
|
||||
}
|
||||
|
||||
var metadataList []ObjectMetadata
|
||||
var metadata ObjectMetadata
|
||||
|
||||
// Filter objects
|
||||
for _, content := range p.files {
|
||||
if len(metadataList) == resources.Maxkeys {
|
||||
resources.IsTruncated = true
|
||||
if resources.IsTruncated && resources.Delimiter != "" {
|
||||
resources.NextMarker = metadataList[len(metadataList)-1].Object
|
||||
}
|
||||
break
|
||||
}
|
||||
if content.Prefix > resources.Marker {
|
||||
var err *probe.Error
|
||||
metadata, resources, err = fs.filterObjects(bucket, content, resources)
|
||||
if err != nil {
|
||||
return nil, resources, err.Trace()
|
||||
}
|
||||
if metadata.Bucket != "" {
|
||||
metadataList = append(metadataList, metadata)
|
||||
}
|
||||
}
|
||||
}
|
||||
return metadataList, resources, nil
|
||||
fs.minFreeDisk = minFreeDisk
|
||||
}
|
||||
|
||||
@@ -33,14 +33,16 @@ var _ = Suite(&MySuite{})
|
||||
|
||||
func (s *MySuite) TestAPISuite(c *C) {
|
||||
var storageList []string
|
||||
create := func() CloudStorage {
|
||||
create := func() Filesystem {
|
||||
configPath, err := ioutil.TempDir(os.TempDir(), "minio-")
|
||||
c.Check(err, IsNil)
|
||||
path, err := ioutil.TempDir(os.TempDir(), "minio-")
|
||||
c.Check(err, IsNil)
|
||||
SetFSMultipartsConfigPath(filepath.Join(configPath, "multiparts.json"))
|
||||
storageList = append(storageList, path)
|
||||
store, perr := New(path)
|
||||
store, perr := New()
|
||||
store.SetRootPath(path)
|
||||
store.SetMinFreeDisk(0)
|
||||
c.Check(perr, IsNil)
|
||||
return store
|
||||
}
|
||||
|
||||
@@ -1,66 +0,0 @@
|
||||
/*
|
||||
* Minio Cloud Storage, (C) 2015 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package fs
|
||||
|
||||
import (
|
||||
"io"
|
||||
|
||||
"github.com/minio/minio-xl/pkg/probe"
|
||||
)
|
||||
|
||||
// CloudStorage is a fs cloud storage interface
|
||||
type CloudStorage interface {
|
||||
// Storage service operations
|
||||
GetBucketMetadata(bucket string) (BucketMetadata, *probe.Error)
|
||||
SetBucketMetadata(bucket string, metadata map[string]string) *probe.Error
|
||||
ListBuckets() ([]BucketMetadata, *probe.Error)
|
||||
MakeBucket(bucket, acl string) *probe.Error
|
||||
DeleteBucket(bucket string) *probe.Error
|
||||
|
||||
// Bucket operations
|
||||
ListObjects(bucket string, resources BucketResourcesMetadata) ([]ObjectMetadata, BucketResourcesMetadata, *probe.Error)
|
||||
|
||||
// Object operations
|
||||
GetObject(w io.Writer, bucket, object string, start, length int64) (int64, *probe.Error)
|
||||
GetObjectMetadata(bucket, object string) (ObjectMetadata, *probe.Error)
|
||||
// bucket, object, expectedMD5Sum, size, reader, metadata, signature
|
||||
CreateObject(bucket, object, md5sum string, size int64, data io.Reader, signature *Signature) (ObjectMetadata, *probe.Error)
|
||||
DeleteObject(bucket, object string) *probe.Error
|
||||
|
||||
// Multipart API
|
||||
Multipart
|
||||
|
||||
// ACL API
|
||||
ACL
|
||||
}
|
||||
|
||||
// Multipart API
|
||||
type Multipart interface {
|
||||
NewMultipartUpload(bucket, object string) (string, *probe.Error)
|
||||
AbortMultipartUpload(bucket, object, uploadID string) *probe.Error
|
||||
CreateObjectPart(bucket, object, uploadID, md5sum string, partID int, size int64, data io.Reader, signature *Signature) (string, *probe.Error)
|
||||
CompleteMultipartUpload(bucket, object, uploadID string, data io.Reader, signature *Signature) (ObjectMetadata, *probe.Error)
|
||||
ListMultipartUploads(bucket string, resources BucketMultipartResourcesMetadata) (BucketMultipartResourcesMetadata, *probe.Error)
|
||||
ListObjectParts(bucket, object string, objectResources ObjectResourcesMetadata) (ObjectResourcesMetadata, *probe.Error)
|
||||
}
|
||||
|
||||
// ACL API
|
||||
type ACL interface {
|
||||
IsPublicBucket(bucket string) bool
|
||||
IsPrivateBucket(bucket string) bool
|
||||
IsReadOnlyBucket(bucket string) bool
|
||||
}
|
||||
Reference in New Issue
Block a user