mirror of
https://github.com/minio/minio.git
synced 2024-12-25 14:45:54 -05:00
Merge pull request #1314 from minio/split-fs
Split fs code into storage layer and object layer.
This commit is contained in:
commit
b2ec7da9f9
@ -63,7 +63,7 @@ func setObjectHeaders(w http.ResponseWriter, objInfo ObjectInfo, contentRange *h
|
||||
setCommonHeaders(w)
|
||||
|
||||
// set object-related metadata headers
|
||||
lastModified := objInfo.ModifiedTime.UTC().Format(http.TimeFormat)
|
||||
lastModified := objInfo.ModTime.UTC().Format(http.TimeFormat)
|
||||
w.Header().Set("Last-Modified", lastModified)
|
||||
|
||||
w.Header().Set("Content-Type", objInfo.ContentType)
|
||||
|
@ -260,7 +260,7 @@ func generateListObjectsResponse(bucket, prefix, marker, delimiter string, maxKe
|
||||
continue
|
||||
}
|
||||
content.Key = object.Name
|
||||
content.LastModified = object.ModifiedTime.UTC().Format(timeFormatAMZ)
|
||||
content.LastModified = object.ModTime.UTC().Format(timeFormatAMZ)
|
||||
if object.MD5Sum != "" {
|
||||
content.ETag = "\"" + object.MD5Sum + "\""
|
||||
}
|
||||
|
@ -18,13 +18,13 @@ package main
|
||||
|
||||
import router "github.com/gorilla/mux"
|
||||
|
||||
// objectStorageAPI container for S3 compatible API.
|
||||
type objectStorageAPI struct {
|
||||
ObjectAPI ObjectAPI
|
||||
// objectAPIHandler implements and provides http handlers for S3 API.
|
||||
type objectAPIHandlers struct {
|
||||
ObjectAPI *objectAPI
|
||||
}
|
||||
|
||||
// registerAPIRouter - registers S3 compatible APIs.
|
||||
func registerAPIRouter(mux *router.Router, api objectStorageAPI) {
|
||||
func registerAPIRouter(mux *router.Router, api objectAPIHandlers) {
|
||||
// API Router
|
||||
apiRouter := mux.NewRoute().PathPrefix("/").Subrouter()
|
||||
|
||||
|
@ -74,7 +74,7 @@ func enforceBucketPolicy(action string, bucket string, reqURL *url.URL) (s3Error
|
||||
// GetBucketLocationHandler - GET Bucket location.
|
||||
// -------------------------
|
||||
// This operation returns bucket location.
|
||||
func (api objectStorageAPI) GetBucketLocationHandler(w http.ResponseWriter, r *http.Request) {
|
||||
func (api objectAPIHandlers) GetBucketLocationHandler(w http.ResponseWriter, r *http.Request) {
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
|
||||
@ -152,7 +152,7 @@ func (api objectStorageAPI) GetBucketLocationHandler(w http.ResponseWriter, r *h
|
||||
// completed or aborted. This operation returns at most 1,000 multipart
|
||||
// uploads in the response.
|
||||
//
|
||||
func (api objectStorageAPI) ListMultipartUploadsHandler(w http.ResponseWriter, r *http.Request) {
|
||||
func (api objectAPIHandlers) ListMultipartUploadsHandler(w http.ResponseWriter, r *http.Request) {
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
|
||||
@ -223,7 +223,7 @@ func (api objectStorageAPI) ListMultipartUploadsHandler(w http.ResponseWriter, r
|
||||
// of the objects in a bucket. You can use the request parameters as selection
|
||||
// criteria to return a subset of the objects in a bucket.
|
||||
//
|
||||
func (api objectStorageAPI) ListObjectsHandler(w http.ResponseWriter, r *http.Request) {
|
||||
func (api objectAPIHandlers) ListObjectsHandler(w http.ResponseWriter, r *http.Request) {
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
|
||||
@ -289,8 +289,6 @@ func (api objectStorageAPI) ListObjectsHandler(w http.ResponseWriter, r *http.Re
|
||||
writeErrorResponse(w, r, ErrInvalidBucketName, r.URL.Path)
|
||||
case BucketNotFound:
|
||||
writeErrorResponse(w, r, ErrNoSuchBucket, r.URL.Path)
|
||||
case ObjectNotFound:
|
||||
writeErrorResponse(w, r, ErrNoSuchKey, r.URL.Path)
|
||||
case ObjectNameInvalid:
|
||||
writeErrorResponse(w, r, ErrNoSuchKey, r.URL.Path)
|
||||
default:
|
||||
@ -303,7 +301,7 @@ func (api objectStorageAPI) ListObjectsHandler(w http.ResponseWriter, r *http.Re
|
||||
// -----------
|
||||
// This implementation of the GET operation returns a list of all buckets
|
||||
// owned by the authenticated sender of the request.
|
||||
func (api objectStorageAPI) ListBucketsHandler(w http.ResponseWriter, r *http.Request) {
|
||||
func (api objectAPIHandlers) ListBucketsHandler(w http.ResponseWriter, r *http.Request) {
|
||||
// List buckets does not support bucket policies.
|
||||
switch getRequestAuthType(r) {
|
||||
default:
|
||||
@ -354,7 +352,7 @@ func (api objectStorageAPI) ListBucketsHandler(w http.ResponseWriter, r *http.Re
|
||||
}
|
||||
|
||||
// DeleteMultipleObjectsHandler - deletes multiple objects.
|
||||
func (api objectStorageAPI) DeleteMultipleObjectsHandler(w http.ResponseWriter, r *http.Request) {
|
||||
func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter, r *http.Request) {
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
|
||||
@ -465,7 +463,7 @@ func (api objectStorageAPI) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
||||
// PutBucketHandler - PUT Bucket
|
||||
// ----------
|
||||
// This implementation of the PUT operation creates a new bucket for authenticated request
|
||||
func (api objectStorageAPI) PutBucketHandler(w http.ResponseWriter, r *http.Request) {
|
||||
func (api objectAPIHandlers) PutBucketHandler(w http.ResponseWriter, r *http.Request) {
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
|
||||
@ -530,7 +528,7 @@ func extractHTTPFormValues(reader *multipart.Reader) (io.Reader, map[string]stri
|
||||
// ----------
|
||||
// This implementation of the POST operation handles object creation with a specified
|
||||
// signature policy in multipart/form-data
|
||||
func (api objectStorageAPI) PostPolicyBucketHandler(w http.ResponseWriter, r *http.Request) {
|
||||
func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *http.Request) {
|
||||
// Here the parameter is the size of the form data that should
|
||||
// be loaded in memory, the remaining being put in temporary files.
|
||||
reader, e := r.MultipartReader()
|
||||
@ -560,7 +558,7 @@ func (api objectStorageAPI) PostPolicyBucketHandler(w http.ResponseWriter, r *ht
|
||||
writeErrorResponse(w, r, apiErr, r.URL.Path)
|
||||
return
|
||||
}
|
||||
objInfo, err := api.ObjectAPI.PutObject(bucket, object, -1, fileBody, nil)
|
||||
md5Sum, err := api.ObjectAPI.PutObject(bucket, object, -1, fileBody, nil)
|
||||
if err != nil {
|
||||
errorIf(err.Trace(), "PutObject failed.", nil)
|
||||
switch err.ToGoError().(type) {
|
||||
@ -579,8 +577,8 @@ func (api objectStorageAPI) PostPolicyBucketHandler(w http.ResponseWriter, r *ht
|
||||
}
|
||||
return
|
||||
}
|
||||
if objInfo.MD5Sum != "" {
|
||||
w.Header().Set("ETag", "\""+objInfo.MD5Sum+"\"")
|
||||
if md5Sum != "" {
|
||||
w.Header().Set("ETag", "\""+md5Sum+"\"")
|
||||
}
|
||||
writeSuccessResponse(w, nil)
|
||||
}
|
||||
@ -591,7 +589,7 @@ func (api objectStorageAPI) PostPolicyBucketHandler(w http.ResponseWriter, r *ht
|
||||
// The operation returns a 200 OK if the bucket exists and you
|
||||
// have permission to access it. Otherwise, the operation might
|
||||
// return responses such as 404 Not Found and 403 Forbidden.
|
||||
func (api objectStorageAPI) HeadBucketHandler(w http.ResponseWriter, r *http.Request) {
|
||||
func (api objectAPIHandlers) HeadBucketHandler(w http.ResponseWriter, r *http.Request) {
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
|
||||
@ -630,7 +628,7 @@ func (api objectStorageAPI) HeadBucketHandler(w http.ResponseWriter, r *http.Req
|
||||
}
|
||||
|
||||
// DeleteBucketHandler - Delete bucket
|
||||
func (api objectStorageAPI) DeleteBucketHandler(w http.ResponseWriter, r *http.Request) {
|
||||
func (api objectAPIHandlers) DeleteBucketHandler(w http.ResponseWriter, r *http.Request) {
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
|
||||
|
@ -127,7 +127,7 @@ func bucketPolicyConditionMatch(conditions map[string]string, statement policySt
|
||||
// -----------------
|
||||
// This implementation of the PUT operation uses the policy
|
||||
// subresource to add to or replace a policy on a bucket
|
||||
func (api objectStorageAPI) PutBucketPolicyHandler(w http.ResponseWriter, r *http.Request) {
|
||||
func (api objectAPIHandlers) PutBucketPolicyHandler(w http.ResponseWriter, r *http.Request) {
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
|
||||
@ -201,7 +201,7 @@ func (api objectStorageAPI) PutBucketPolicyHandler(w http.ResponseWriter, r *htt
|
||||
// -----------------
|
||||
// This implementation of the DELETE operation uses the policy
|
||||
// subresource to add to remove a policy on a bucket.
|
||||
func (api objectStorageAPI) DeleteBucketPolicyHandler(w http.ResponseWriter, r *http.Request) {
|
||||
func (api objectAPIHandlers) DeleteBucketPolicyHandler(w http.ResponseWriter, r *http.Request) {
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
|
||||
@ -238,7 +238,7 @@ func (api objectStorageAPI) DeleteBucketPolicyHandler(w http.ResponseWriter, r *
|
||||
// -----------------
|
||||
// This operation uses the policy
|
||||
// subresource to return the policy of a specified bucket.
|
||||
func (api objectStorageAPI) GetBucketPolicyHandler(w http.ResponseWriter, r *http.Request) {
|
||||
func (api objectAPIHandlers) GetBucketPolicyHandler(w http.ResponseWriter, r *http.Request) {
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
|
||||
|
@ -1,182 +0,0 @@
|
||||
/*
|
||||
* Minio Cloud Storage, (C) 2015-2016 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/minio/minio/pkg/probe"
|
||||
)
|
||||
|
||||
const (
|
||||
// listObjectsLimit - maximum list objects limit.
|
||||
listObjectsLimit = 1000
|
||||
)
|
||||
|
||||
// isDirExist - returns whether given directory is exist or not.
|
||||
func isDirExist(dirname string) (bool, error) {
|
||||
fi, e := os.Lstat(dirname)
|
||||
if e != nil {
|
||||
if os.IsNotExist(e) {
|
||||
return false, nil
|
||||
}
|
||||
return false, e
|
||||
}
|
||||
return fi.IsDir(), nil
|
||||
}
|
||||
|
||||
func (fs *Filesystem) saveTreeWalk(params listObjectParams, walker *treeWalker) {
|
||||
fs.listObjectMapMutex.Lock()
|
||||
defer fs.listObjectMapMutex.Unlock()
|
||||
|
||||
walkers, _ := fs.listObjectMap[params]
|
||||
walkers = append(walkers, walker)
|
||||
|
||||
fs.listObjectMap[params] = walkers
|
||||
}
|
||||
|
||||
func (fs *Filesystem) lookupTreeWalk(params listObjectParams) *treeWalker {
|
||||
fs.listObjectMapMutex.Lock()
|
||||
defer fs.listObjectMapMutex.Unlock()
|
||||
|
||||
if walkChs, ok := fs.listObjectMap[params]; ok {
|
||||
for i, walkCh := range walkChs {
|
||||
if !walkCh.timedOut {
|
||||
newWalkChs := walkChs[i+1:]
|
||||
if len(newWalkChs) > 0 {
|
||||
fs.listObjectMap[params] = newWalkChs
|
||||
} else {
|
||||
delete(fs.listObjectMap, params)
|
||||
}
|
||||
return walkCh
|
||||
}
|
||||
}
|
||||
// As all channels are timed out, delete the map entry
|
||||
delete(fs.listObjectMap, params)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ListObjects - lists all objects for a given prefix, returns up to
|
||||
// maxKeys number of objects per call.
|
||||
func (fs Filesystem) ListObjects(bucket, prefix, marker, delimiter string, maxKeys int) (ListObjectsInfo, *probe.Error) {
|
||||
result := ListObjectsInfo{}
|
||||
|
||||
// Input validation.
|
||||
bucket, e := fs.checkBucketArg(bucket)
|
||||
if e != nil {
|
||||
return result, probe.NewError(e)
|
||||
}
|
||||
bucketDir := filepath.Join(fs.diskPath, bucket)
|
||||
|
||||
if !IsValidObjectPrefix(prefix) {
|
||||
return result, probe.NewError(ObjectNameInvalid{Bucket: bucket, Object: prefix})
|
||||
}
|
||||
|
||||
// Verify if delimiter is anything other than '/', which we do not support.
|
||||
if delimiter != "" && delimiter != "/" {
|
||||
return result, probe.NewError(fmt.Errorf("delimiter '%s' is not supported. Only '/' is supported", delimiter))
|
||||
}
|
||||
|
||||
// Verify if marker has prefix.
|
||||
if marker != "" {
|
||||
if !strings.HasPrefix(marker, prefix) {
|
||||
return result, probe.NewError(fmt.Errorf("Invalid combination of marker '%s' and prefix '%s'", marker, prefix))
|
||||
}
|
||||
}
|
||||
|
||||
// Return empty response for a valid request when maxKeys is 0.
|
||||
if maxKeys == 0 {
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// Over flowing maxkeys - reset to listObjectsLimit.
|
||||
if maxKeys < 0 || maxKeys > listObjectsLimit {
|
||||
maxKeys = listObjectsLimit
|
||||
}
|
||||
|
||||
// Verify if prefix exists.
|
||||
prefixDir := filepath.Dir(filepath.FromSlash(prefix))
|
||||
rootDir := filepath.Join(bucketDir, prefixDir)
|
||||
if status, e := isDirExist(rootDir); !status {
|
||||
if e == nil {
|
||||
// Prefix does not exist, not an error just respond empty
|
||||
// list response.
|
||||
return result, nil
|
||||
}
|
||||
// Rest errors should be treated as failure.
|
||||
return result, probe.NewError(e)
|
||||
}
|
||||
|
||||
recursive := true
|
||||
if delimiter == "/" {
|
||||
recursive = false
|
||||
}
|
||||
|
||||
// Maximum 1000 objects returned in a single to listObjects.
|
||||
// Further calls will set right marker value to continue reading the rest of the objectList.
|
||||
// popTreeWalker returns nil if the call to ListObject is done for the first time.
|
||||
// On further calls to ListObjects to retrive more objects within the timeout period,
|
||||
// popTreeWalker returns the channel from which rest of the objects can be retrieved.
|
||||
walker := fs.lookupTreeWalk(listObjectParams{bucket, delimiter, marker, prefix})
|
||||
if walker == nil {
|
||||
walker = startTreeWalk(fs.diskPath, bucket, filepath.FromSlash(prefix), filepath.FromSlash(marker), recursive)
|
||||
}
|
||||
|
||||
nextMarker := ""
|
||||
for i := 0; i < maxKeys; {
|
||||
walkResult, ok := <-walker.ch
|
||||
if !ok {
|
||||
// Closed channel.
|
||||
return result, nil
|
||||
}
|
||||
// For any walk error return right away.
|
||||
if walkResult.err != nil {
|
||||
return ListObjectsInfo{}, probe.NewError(walkResult.err)
|
||||
}
|
||||
objInfo := walkResult.objectInfo
|
||||
objInfo.Name = filepath.ToSlash(objInfo.Name)
|
||||
|
||||
// Skip temporary files.
|
||||
if strings.Contains(objInfo.Name, "$multiparts") || strings.Contains(objInfo.Name, "$tmpobject") {
|
||||
continue
|
||||
}
|
||||
|
||||
// For objects being directory and delimited we set Prefixes.
|
||||
if objInfo.IsDir {
|
||||
result.Prefixes = append(result.Prefixes, objInfo.Name)
|
||||
} else {
|
||||
result.Objects = append(result.Objects, objInfo)
|
||||
}
|
||||
|
||||
// We have listed everything return.
|
||||
if walkResult.end {
|
||||
return result, nil
|
||||
}
|
||||
nextMarker = objInfo.Name
|
||||
i++
|
||||
}
|
||||
// We haven't exhaused the list yet, set IsTruncated to 'true' so
|
||||
// that the client can send another request.
|
||||
result.IsTruncated = true
|
||||
result.NextMarker = nextMarker
|
||||
fs.saveTreeWalk(listObjectParams{bucket, delimiter, nextMarker, prefix}, walker)
|
||||
return result, nil
|
||||
}
|
163
fs-bucket.go
163
fs-bucket.go
@ -1,163 +0,0 @@
|
||||
/*
|
||||
* Minio Cloud Storage, (C) 2015 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/minio/minio/pkg/probe"
|
||||
)
|
||||
|
||||
/// Bucket Operations
|
||||
|
||||
// DeleteBucket - delete a bucket.
|
||||
func (fs Filesystem) DeleteBucket(bucket string) *probe.Error {
|
||||
// Verify bucket is valid.
|
||||
if !IsValidBucketName(bucket) {
|
||||
return probe.NewError(BucketNameInvalid{Bucket: bucket})
|
||||
}
|
||||
bucket = getActualBucketname(fs.diskPath, bucket)
|
||||
bucketDir := filepath.Join(fs.diskPath, bucket)
|
||||
if e := os.Remove(bucketDir); e != nil {
|
||||
// Error if there was no bucket in the first place.
|
||||
if os.IsNotExist(e) {
|
||||
return probe.NewError(BucketNotFound{Bucket: bucket})
|
||||
}
|
||||
// On windows the string is slightly different, handle it here.
|
||||
if strings.Contains(e.Error(), "directory is not empty") {
|
||||
return probe.NewError(BucketNotEmpty{Bucket: bucket})
|
||||
}
|
||||
// Hopefully for all other operating systems, this is
|
||||
// assumed to be consistent.
|
||||
if strings.Contains(e.Error(), "directory not empty") {
|
||||
return probe.NewError(BucketNotEmpty{Bucket: bucket})
|
||||
}
|
||||
return probe.NewError(e)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ListBuckets - Get service.
|
||||
func (fs Filesystem) ListBuckets() ([]BucketInfo, *probe.Error) {
|
||||
files, e := ioutil.ReadDir(fs.diskPath)
|
||||
if e != nil {
|
||||
return []BucketInfo{}, probe.NewError(e)
|
||||
}
|
||||
var buckets []BucketInfo
|
||||
for _, file := range files {
|
||||
if !file.IsDir() {
|
||||
// If not directory, ignore all file types.
|
||||
continue
|
||||
}
|
||||
// If directories are found with odd names, skip them.
|
||||
dirName := strings.ToLower(file.Name())
|
||||
if !IsValidBucketName(dirName) {
|
||||
continue
|
||||
}
|
||||
bucket := BucketInfo{
|
||||
Name: dirName,
|
||||
Created: file.ModTime(),
|
||||
}
|
||||
buckets = append(buckets, bucket)
|
||||
}
|
||||
// Remove duplicated entries.
|
||||
buckets = removeDuplicateBuckets(buckets)
|
||||
return buckets, nil
|
||||
}
|
||||
|
||||
// removeDuplicateBuckets - remove duplicate buckets.
|
||||
func removeDuplicateBuckets(buckets []BucketInfo) []BucketInfo {
|
||||
length := len(buckets) - 1
|
||||
for i := 0; i < length; i++ {
|
||||
for j := i + 1; j <= length; j++ {
|
||||
if buckets[i].Name == buckets[j].Name {
|
||||
if buckets[i].Created.Sub(buckets[j].Created) > 0 {
|
||||
buckets[i] = buckets[length]
|
||||
} else {
|
||||
buckets[j] = buckets[length]
|
||||
}
|
||||
buckets = buckets[0:length]
|
||||
length--
|
||||
j--
|
||||
}
|
||||
}
|
||||
}
|
||||
return buckets
|
||||
}
|
||||
|
||||
// MakeBucket - PUT Bucket
|
||||
func (fs Filesystem) MakeBucket(bucket string) *probe.Error {
|
||||
if _, e := fs.checkBucketArg(bucket); e == nil {
|
||||
return probe.NewError(BucketExists{Bucket: bucket})
|
||||
} else if _, ok := e.(BucketNameInvalid); ok {
|
||||
return probe.NewError(BucketNameInvalid{Bucket: bucket})
|
||||
}
|
||||
bucketDir := filepath.Join(fs.diskPath, bucket)
|
||||
|
||||
// Make bucket.
|
||||
if e := os.Mkdir(bucketDir, 0700); e != nil {
|
||||
return probe.NewError(e)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// getActualBucketname - will convert incoming bucket names to
|
||||
// corresponding actual bucketnames on the backend in a platform
|
||||
// compatible way for all operating systems.
|
||||
func getActualBucketname(fsPath, bucket string) string {
|
||||
fd, e := os.Open(fsPath)
|
||||
if e != nil {
|
||||
return bucket
|
||||
}
|
||||
buckets, e := fd.Readdirnames(-1)
|
||||
if e != nil {
|
||||
return bucket
|
||||
}
|
||||
for _, b := range buckets {
|
||||
// Verify if lowercase version of the bucket is equal
|
||||
// to the incoming bucket, then use the proper name.
|
||||
if strings.ToLower(b) == bucket {
|
||||
return b
|
||||
}
|
||||
}
|
||||
return bucket
|
||||
}
|
||||
|
||||
// GetBucketInfo - get bucket metadata.
|
||||
func (fs Filesystem) GetBucketInfo(bucket string) (BucketInfo, *probe.Error) {
|
||||
if !IsValidBucketName(bucket) {
|
||||
return BucketInfo{}, probe.NewError(BucketNameInvalid{Bucket: bucket})
|
||||
}
|
||||
bucket = getActualBucketname(fs.diskPath, bucket)
|
||||
// Get bucket path.
|
||||
bucketDir := filepath.Join(fs.diskPath, bucket)
|
||||
fi, e := os.Stat(bucketDir)
|
||||
if e != nil {
|
||||
// Check if bucket exists.
|
||||
if os.IsNotExist(e) {
|
||||
return BucketInfo{}, probe.NewError(BucketNotFound{Bucket: bucket})
|
||||
}
|
||||
return BucketInfo{}, probe.NewError(e)
|
||||
}
|
||||
bucketMetadata := BucketInfo{}
|
||||
bucketMetadata.Name = fi.Name()
|
||||
bucketMetadata.Created = fi.ModTime()
|
||||
return bucketMetadata, nil
|
||||
}
|
@ -1,256 +0,0 @@
|
||||
/*
|
||||
* Minio Cloud Storage, (C) 2016 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// The test not just includes asserting the correctness of the output,
|
||||
// But also includes test cases for which the function should fail.
|
||||
// For those cases for which it fails, its also asserted whether the function fails as expected.
|
||||
func TestGetBucketInfo(t *testing.T) {
|
||||
// Make a temporary directory to use as the fs.
|
||||
directory, e := ioutil.TempDir("", "minio-metadata-test")
|
||||
if e != nil {
|
||||
t.Fatal(e)
|
||||
}
|
||||
defer os.RemoveAll(directory)
|
||||
|
||||
// Create the fs.
|
||||
fs, err := newFS(directory)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Creating few buckets.
|
||||
for i := 0; i < 4; i++ {
|
||||
err = fs.MakeBucket("meta-test-bucket." + strconv.Itoa(i))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
testCases := []struct {
|
||||
bucketName string
|
||||
metaData BucketInfo
|
||||
e error
|
||||
shouldPass bool
|
||||
}{
|
||||
// Test cases with invalid bucket names.
|
||||
{".test", BucketInfo{}, BucketNameInvalid{Bucket: ".test"}, false},
|
||||
{"Test", BucketInfo{}, BucketNameInvalid{Bucket: "Test"}, false},
|
||||
{"---", BucketInfo{}, BucketNameInvalid{Bucket: "---"}, false},
|
||||
{"ad", BucketInfo{}, BucketNameInvalid{Bucket: "ad"}, false},
|
||||
// Test cases with non-existent buckets.
|
||||
{"volatile-bucket-1", BucketInfo{}, BucketNotFound{Bucket: "volatile-bucket-1"}, false},
|
||||
{"volatile-bucket-2", BucketInfo{}, BucketNotFound{Bucket: "volatile-bucket-2"}, false},
|
||||
// Test cases with existing buckets.
|
||||
{"meta-test-bucket.0", BucketInfo{Name: "meta-test-bucket.0"}, nil, true},
|
||||
{"meta-test-bucket.1", BucketInfo{Name: "meta-test-bucket.1"}, nil, true},
|
||||
{"meta-test-bucket.2", BucketInfo{Name: "meta-test-bucket.2"}, nil, true},
|
||||
{"meta-test-bucket.3", BucketInfo{Name: "meta-test-bucket.3"}, nil, true},
|
||||
}
|
||||
for i, testCase := range testCases {
|
||||
// The err returned is of type *probe.Error.
|
||||
bucketInfo, err := fs.GetBucketInfo(testCase.bucketName)
|
||||
|
||||
if err != nil && testCase.shouldPass {
|
||||
t.Errorf("Test %d: Expected to pass, but failed with: <ERROR> %s", i+1, err.Cause.Error())
|
||||
}
|
||||
if err == nil && !testCase.shouldPass {
|
||||
t.Errorf("Test %d: Expected to fail with <ERROR> \"%s\", but passed instead", i+1, testCase.e.Error())
|
||||
|
||||
}
|
||||
// Failed as expected, but does it fail for the expected reason.
|
||||
if err != nil && !testCase.shouldPass {
|
||||
if testCase.e.Error() != err.Cause.Error() {
|
||||
t.Errorf("Test %d: Expected to fail with error \"%s\", but instead failed with error \"%s\" instead", i+1, testCase.e.Error(), err.Cause.Error())
|
||||
}
|
||||
}
|
||||
// Since there are cases for which GetBucketInfo fails, this is necessary.
|
||||
// Test passes as expected, but the output values are verified for correctness here.
|
||||
if err == nil && testCase.shouldPass {
|
||||
if testCase.bucketName != bucketInfo.Name {
|
||||
t.Errorf("Test %d: Expected the bucket name to be \"%s\", but found \"%s\" instead", i+1, testCase.bucketName, bucketInfo.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestListBuckets(t *testing.T) {
|
||||
// Make a temporary directory to use as the fs.
|
||||
directory, e := ioutil.TempDir("", "minio-benchmark")
|
||||
if e != nil {
|
||||
t.Fatal(e)
|
||||
}
|
||||
defer os.RemoveAll(directory)
|
||||
|
||||
// Create the fs.
|
||||
fs, err := newFS(directory)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Create a few buckets.
|
||||
for i := 0; i < 10; i++ {
|
||||
err = fs.MakeBucket("testbucket." + strconv.Itoa(i))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// List, and ensure that they are all there.
|
||||
metadatas, err := fs.ListBuckets()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if len(metadatas) != 10 {
|
||||
t.Errorf("incorrect length of metadatas (%d)\n", len(metadatas))
|
||||
}
|
||||
|
||||
// Iterate over the buckets, ensuring that the name is correct.
|
||||
for i := 0; i < len(metadatas); i++ {
|
||||
if !strings.Contains(metadatas[i].Name, "testbucket") {
|
||||
t.Fail()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestDeleteBucket(t *testing.T) {
|
||||
// Make a temporary directory to use as the fs.
|
||||
directory, e := ioutil.TempDir("", "minio-benchmark")
|
||||
if e != nil {
|
||||
t.Fatal(e)
|
||||
}
|
||||
defer os.RemoveAll(directory)
|
||||
|
||||
// Create the fs.
|
||||
fs, err := newFS(directory)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Deleting a bucket that doesn't exist should error.
|
||||
err = fs.DeleteBucket("bucket")
|
||||
if !strings.Contains(err.Cause.Error(), "Bucket not found:") {
|
||||
t.Fail()
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkListBuckets(b *testing.B) {
|
||||
// Make a temporary directory to use as the fs.
|
||||
directory, e := ioutil.TempDir("", "minio-benchmark")
|
||||
if e != nil {
|
||||
b.Fatal(e)
|
||||
}
|
||||
defer os.RemoveAll(directory)
|
||||
|
||||
// Create the fs.
|
||||
fs, err := newFS(directory)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
// Create a few buckets.
|
||||
for i := 0; i < 20; i++ {
|
||||
err = fs.MakeBucket("bucket." + strconv.Itoa(i))
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
b.ResetTimer()
|
||||
|
||||
// List the buckets over and over and over.
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, err = fs.ListBuckets()
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkDeleteBucket(b *testing.B) {
|
||||
// Make a temporary directory to use as the fs.
|
||||
directory, e := ioutil.TempDir("", "minio-benchmark")
|
||||
if e != nil {
|
||||
b.Fatal(e)
|
||||
}
|
||||
defer os.RemoveAll(directory)
|
||||
|
||||
// Create the fs.
|
||||
fs, err := newFS(directory)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
b.ResetTimer()
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
// Creating buckets takes time, so stop and start the timer.
|
||||
b.StopTimer()
|
||||
|
||||
// Create and delete the bucket over and over.
|
||||
err = fs.MakeBucket("bucket")
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
b.StartTimer()
|
||||
|
||||
err = fs.DeleteBucket("bucket")
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkGetBucketInfo(b *testing.B) {
|
||||
// Make a temporary directory to use as the fs.
|
||||
directory, e := ioutil.TempDir("", "minio-benchmark")
|
||||
if e != nil {
|
||||
b.Fatal(e)
|
||||
}
|
||||
defer os.RemoveAll(directory)
|
||||
|
||||
// Create the fs.
|
||||
fs, err := newFS(directory)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
// Put up a bucket with some metadata.
|
||||
err = fs.MakeBucket("bucket")
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
b.ResetTimer()
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
// Retrieve the metadata!
|
||||
_, err := fs.GetBucketInfo("bucket")
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
@ -54,7 +54,8 @@ func (d byDirentName) Len() int { return len(d) }
|
||||
func (d byDirentName) Swap(i, j int) { d[i], d[j] = d[j], d[i] }
|
||||
func (d byDirentName) Less(i, j int) bool { return d[i].name < d[j].name }
|
||||
|
||||
// Using sort.Search() internally to jump to the file entry containing the prefix.
|
||||
// Using sort.Search() internally to jump to the file entry containing
|
||||
// the prefix.
|
||||
func searchDirents(dirents []fsDirent, x string) int {
|
||||
processFunc := func(i int) bool {
|
||||
return dirents[i].name >= x
|
||||
@ -64,7 +65,7 @@ func searchDirents(dirents []fsDirent, x string) int {
|
||||
|
||||
// Tree walk result carries results of tree walking.
|
||||
type treeWalkResult struct {
|
||||
objectInfo ObjectInfo
|
||||
fileInfo FileInfo
|
||||
err error
|
||||
end bool
|
||||
}
|
||||
@ -77,42 +78,42 @@ type treeWalker struct {
|
||||
timedOut bool
|
||||
}
|
||||
|
||||
// treeWalk walks FS directory tree recursively pushing ObjectInfo into the channel as and when it encounters files.
|
||||
// treeWalk walks FS directory tree recursively pushing fileInfo into the channel as and when it encounters files.
|
||||
func treeWalk(bucketDir, prefixDir, entryPrefixMatch, marker string, recursive bool, send func(treeWalkResult) bool, count *int) bool {
|
||||
// Example:
|
||||
// if prefixDir="one/two/three/" and marker="four/five.txt" treeWalk is recursively
|
||||
// called with prefixDir="one/two/three/four/" and marker="five.txt"
|
||||
|
||||
// Convert dirent to ObjectInfo
|
||||
direntToObjectInfo := func(dirent fsDirent) (ObjectInfo, error) {
|
||||
objectInfo := ObjectInfo{}
|
||||
// Convert dirent to FileInfo
|
||||
direntToFileInfo := func(dirent fsDirent) (FileInfo, error) {
|
||||
fileInfo := FileInfo{}
|
||||
// Convert to full object name.
|
||||
objectInfo.Name = filepath.Join(prefixDir, dirent.name)
|
||||
fileInfo.Name = filepath.Join(prefixDir, dirent.name)
|
||||
if dirent.modTime.IsZero() && dirent.size == 0 {
|
||||
// ModifiedTime and Size are zero, Stat() and figure out
|
||||
// the actual values that need to be set.
|
||||
fi, err := os.Stat(filepath.Join(bucketDir, prefixDir, dirent.name))
|
||||
if err != nil {
|
||||
return ObjectInfo{}, err
|
||||
return FileInfo{}, err
|
||||
}
|
||||
// Fill size and modtime.
|
||||
objectInfo.ModifiedTime = fi.ModTime()
|
||||
objectInfo.Size = fi.Size()
|
||||
objectInfo.IsDir = fi.IsDir()
|
||||
fileInfo.ModTime = fi.ModTime()
|
||||
fileInfo.Size = fi.Size()
|
||||
fileInfo.Mode = fi.Mode()
|
||||
} else {
|
||||
// If ModifiedTime or Size are set then use them
|
||||
// If ModTime or Size are set then use them
|
||||
// without attempting another Stat operation.
|
||||
objectInfo.ModifiedTime = dirent.modTime
|
||||
objectInfo.Size = dirent.size
|
||||
objectInfo.IsDir = dirent.IsDir()
|
||||
fileInfo.ModTime = dirent.modTime
|
||||
fileInfo.Size = dirent.size
|
||||
fileInfo.Mode = dirent.mode
|
||||
}
|
||||
if objectInfo.IsDir {
|
||||
if fileInfo.Mode.IsDir() {
|
||||
// Add os.PathSeparator suffix again for directories as
|
||||
// filepath.Join would have removed it.
|
||||
objectInfo.Size = 0
|
||||
objectInfo.Name += string(os.PathSeparator)
|
||||
fileInfo.Size = 0
|
||||
fileInfo.Name += string(os.PathSeparator)
|
||||
}
|
||||
return objectInfo, nil
|
||||
return fileInfo, nil
|
||||
}
|
||||
|
||||
var markerBase, markerDir string
|
||||
@ -126,12 +127,25 @@ func treeWalk(bucketDir, prefixDir, entryPrefixMatch, marker string, recursive b
|
||||
}
|
||||
}
|
||||
|
||||
// readDirAll returns entries that begins with entryPrefixMatch
|
||||
dirents, err := readDirAll(filepath.Join(bucketDir, prefixDir), entryPrefixMatch)
|
||||
// Entry prefix match function.
|
||||
prefixMatchFn := func(dirent fsDirent) bool {
|
||||
if dirent.IsDir() || dirent.IsRegular() {
|
||||
// Does dirent name has reserved prefixes or suffixes.
|
||||
hasReserved := hasReservedPrefix(dirent.name) || hasReservedSuffix(dirent.name)
|
||||
// All dirents which match prefix and do not have reserved
|
||||
// keywords in them are valid entries.
|
||||
return strings.HasPrefix(dirent.name, entryPrefixMatch) && !hasReserved
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// scandir returns entries that begins with entryPrefixMatch
|
||||
dirents, err := scandir(filepath.Join(bucketDir, prefixDir), prefixMatchFn, true)
|
||||
if err != nil {
|
||||
send(treeWalkResult{err: err})
|
||||
return false
|
||||
}
|
||||
|
||||
// example:
|
||||
// If markerDir="four/" searchDirents() returns the index of "four/" in the sorted
|
||||
// dirents list. We skip all the dirent entries till "four/"
|
||||
@ -158,13 +172,13 @@ func treeWalk(bucketDir, prefixDir, entryPrefixMatch, marker string, recursive b
|
||||
}
|
||||
continue
|
||||
}
|
||||
objectInfo, err := direntToObjectInfo(dirent)
|
||||
fileInfo, err := direntToFileInfo(dirent)
|
||||
if err != nil {
|
||||
send(treeWalkResult{err: err})
|
||||
return false
|
||||
}
|
||||
*count--
|
||||
if !send(treeWalkResult{objectInfo: objectInfo}) {
|
||||
if !send(treeWalkResult{fileInfo: fileInfo}) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
@ -182,7 +196,7 @@ func startTreeWalk(fsPath, bucket, prefix, marker string, recursive bool) *treeW
|
||||
// if prefix is "one/two/th" and marker is "one/two/three/four/five.txt"
|
||||
// treeWalk is called with prefixDir="one/two/" and marker="three/four/five.txt"
|
||||
// and entryPrefixMatch="th"
|
||||
ch := make(chan treeWalkResult, listObjectsLimit)
|
||||
ch := make(chan treeWalkResult, fsListLimit)
|
||||
walkNotify := treeWalker{ch: ch}
|
||||
entryPrefixMatch := prefix
|
||||
prefixDir := ""
|
||||
@ -196,8 +210,6 @@ func startTreeWalk(fsPath, bucket, prefix, marker string, recursive bool) *treeW
|
||||
go func() {
|
||||
defer close(ch)
|
||||
send := func(walkResult treeWalkResult) bool {
|
||||
// Add the bucket.
|
||||
walkResult.objectInfo.Bucket = bucket
|
||||
if count == 0 {
|
||||
walkResult.end = true
|
||||
}
|
||||
|
@ -23,7 +23,6 @@ import (
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"sort"
|
||||
"strings"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
@ -48,7 +47,7 @@ func clen(n []byte) int {
|
||||
|
||||
// parseDirents - inspired from
|
||||
// https://golang.org/src/syscall/syscall_<os>.go
|
||||
func parseDirents(buf []byte) []fsDirent {
|
||||
func parseDirents(dirPath string, buf []byte) []fsDirent {
|
||||
bufidx := 0
|
||||
dirents := []fsDirent{}
|
||||
for bufidx < len(buf) {
|
||||
@ -87,8 +86,16 @@ func parseDirents(buf []byte) []fsDirent {
|
||||
case syscall.DT_SOCK:
|
||||
mode = os.ModeSocket
|
||||
case syscall.DT_UNKNOWN:
|
||||
// On Linux XFS does not implement d_type for on disk
|
||||
// format << v5. Fall back to Stat().
|
||||
if fi, err := os.Stat(filepath.Join(dirPath, name)); err == nil {
|
||||
mode = fi.Mode()
|
||||
} else {
|
||||
// Caller listing would fail, if Stat failed but we
|
||||
// won't crash the server.
|
||||
mode = 0xffffffff
|
||||
}
|
||||
}
|
||||
|
||||
dirents = append(dirents, fsDirent{
|
||||
name: name,
|
||||
@ -98,37 +105,6 @@ func parseDirents(buf []byte) []fsDirent {
|
||||
return dirents
|
||||
}
|
||||
|
||||
// Read all directory entries, returns a list of lexically sorted entries.
|
||||
func readDirAll(readDirPath, entryPrefixMatch string) ([]fsDirent, error) {
|
||||
buf := make([]byte, readDirentBufSize)
|
||||
f, err := os.Open(readDirPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer f.Close()
|
||||
dirents := []fsDirent{}
|
||||
for {
|
||||
nbuf, err := syscall.ReadDirent(int(f.Fd()), buf)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if nbuf <= 0 {
|
||||
break
|
||||
}
|
||||
for _, dirent := range parseDirents(buf[:nbuf]) {
|
||||
if dirent.IsDir() {
|
||||
dirent.name += string(os.PathSeparator)
|
||||
dirent.size = 0
|
||||
}
|
||||
if strings.HasPrefix(dirent.name, entryPrefixMatch) {
|
||||
dirents = append(dirents, dirent)
|
||||
}
|
||||
}
|
||||
}
|
||||
sort.Sort(byDirentName(dirents))
|
||||
return dirents, nil
|
||||
}
|
||||
|
||||
// scans the directory dirPath, calling filter() on each directory
|
||||
// entry. Entries for which filter() returns true are stored, lexically
|
||||
// sorted using sort.Sort(). If filter is NULL, all entries are selected.
|
||||
@ -151,12 +127,13 @@ func scandir(dirPath string, filter func(fsDirent) bool, namesOnly bool) ([]fsDi
|
||||
if nbuf <= 0 {
|
||||
break
|
||||
}
|
||||
for _, dirent := range parseDirents(buf[:nbuf]) {
|
||||
for _, dirent := range parseDirents(dirPath, buf[:nbuf]) {
|
||||
if !namesOnly {
|
||||
dirent.name = filepath.Join(dirPath, dirent.name)
|
||||
}
|
||||
if dirent.IsDir() {
|
||||
dirent.name += string(os.PathSeparator)
|
||||
dirent.size = 0
|
||||
}
|
||||
if filter == nil || filter(dirent) {
|
||||
dirents = append(dirents, dirent)
|
||||
@ -165,6 +142,5 @@ func scandir(dirPath string, filter func(fsDirent) bool, namesOnly bool) ([]fsDi
|
||||
}
|
||||
|
||||
sort.Sort(byDirentName(dirents))
|
||||
|
||||
return dirents, nil
|
||||
}
|
||||
|
@ -23,46 +23,8 @@ import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Read all directory entries, returns a list of lexically sorted entries.
|
||||
func readDirAll(readDirPath, entryPrefixMatch string) ([]fsDirent, error) {
|
||||
f, err := os.Open(readDirPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer f.Close()
|
||||
var dirents []fsDirent
|
||||
for {
|
||||
fis, err := f.Readdir(1000)
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
for _, fi := range fis {
|
||||
dirent := fsDirent{
|
||||
name: fi.Name(),
|
||||
modTime: fi.ModTime(),
|
||||
size: fi.Size(),
|
||||
mode: fi.Mode(),
|
||||
}
|
||||
if dirent.IsDir() {
|
||||
dirent.name += string(os.PathSeparator)
|
||||
dirent.size = 0
|
||||
}
|
||||
if strings.HasPrefix(fi.Name(), entryPrefixMatch) {
|
||||
dirents = append(dirents, dirent)
|
||||
}
|
||||
}
|
||||
}
|
||||
// Sort dirents.
|
||||
sort.Sort(byDirentName(dirents))
|
||||
return dirents, nil
|
||||
}
|
||||
|
||||
// scans the directory dirPath, calling filter() on each directory
|
||||
// entry. Entries for which filter() returns true are stored, lexically
|
||||
// sorted using sort.Sort(). If filter is NULL, all entries are selected.
|
||||
@ -103,6 +65,5 @@ func scandir(dirPath string, filter func(fsDirent) bool, namesOnly bool) ([]fsDi
|
||||
}
|
||||
|
||||
sort.Sort(byDirentName(dirents))
|
||||
|
||||
return dirents, nil
|
||||
}
|
||||
|
@ -1,280 +0,0 @@
|
||||
/*
|
||||
* Minio Cloud Storage, (C) 2016 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
func scanMultipartDir(bucketDir, prefixPath, markerPath, uploadIDMarker string, recursive bool) <-chan multipartObjectInfo {
|
||||
objectInfoCh := make(chan multipartObjectInfo, listObjectsLimit)
|
||||
|
||||
// TODO: check if bucketDir is absolute path
|
||||
scanDir := bucketDir
|
||||
dirDepth := bucketDir
|
||||
|
||||
if prefixPath != "" {
|
||||
if !filepath.IsAbs(prefixPath) {
|
||||
tmpPrefixPath := filepath.Join(bucketDir, prefixPath)
|
||||
if strings.HasSuffix(prefixPath, string(os.PathSeparator)) {
|
||||
tmpPrefixPath += string(os.PathSeparator)
|
||||
}
|
||||
prefixPath = tmpPrefixPath
|
||||
}
|
||||
|
||||
// TODO: check if prefixPath starts with bucketDir
|
||||
|
||||
// Case #1: if prefixPath is /mnt/mys3/mybucket/2012/photos/paris, then
|
||||
// dirDepth is /mnt/mys3/mybucket/2012/photos
|
||||
// Case #2: if prefixPath is /mnt/mys3/mybucket/2012/photos/, then
|
||||
// dirDepth is /mnt/mys3/mybucket/2012/photos
|
||||
dirDepth = filepath.Dir(prefixPath)
|
||||
scanDir = dirDepth
|
||||
} else {
|
||||
prefixPath = bucketDir
|
||||
}
|
||||
|
||||
if markerPath != "" {
|
||||
if !filepath.IsAbs(markerPath) {
|
||||
tmpMarkerPath := filepath.Join(bucketDir, markerPath)
|
||||
if strings.HasSuffix(markerPath, string(os.PathSeparator)) {
|
||||
tmpMarkerPath += string(os.PathSeparator)
|
||||
}
|
||||
|
||||
markerPath = tmpMarkerPath
|
||||
}
|
||||
|
||||
// TODO: check markerPath must be a file
|
||||
if uploadIDMarker != "" {
|
||||
markerPath = filepath.Join(markerPath, uploadIDMarker+multipartUploadIDSuffix)
|
||||
}
|
||||
|
||||
// TODO: check if markerPath starts with bucketDir
|
||||
// TODO: check if markerPath starts with prefixPath
|
||||
|
||||
// Case #1: if markerPath is /mnt/mys3/mybucket/2012/photos/gophercon.png, then
|
||||
// scanDir is /mnt/mys3/mybucket/2012/photos
|
||||
// Case #2: if markerPath is /mnt/mys3/mybucket/2012/photos/gophercon.png/1fbd117a-268a-4ed0-85c9-8cc3888cbf20.uploadid, then
|
||||
// scanDir is /mnt/mys3/mybucket/2012/photos/gophercon.png
|
||||
// Case #3: if markerPath is /mnt/mys3/mybucket/2012/photos/, then
|
||||
// scanDir is /mnt/mys3/mybucket/2012/photos
|
||||
|
||||
scanDir = filepath.Dir(markerPath)
|
||||
} else {
|
||||
markerPath = bucketDir
|
||||
}
|
||||
|
||||
// Have bucketDir ends with os.PathSeparator
|
||||
if !strings.HasSuffix(bucketDir, string(os.PathSeparator)) {
|
||||
bucketDir += string(os.PathSeparator)
|
||||
}
|
||||
|
||||
// Remove os.PathSeparator if scanDir ends with
|
||||
if strings.HasSuffix(scanDir, string(os.PathSeparator)) {
|
||||
scanDir = filepath.Dir(scanDir)
|
||||
}
|
||||
|
||||
// goroutine - retrieves directory entries, makes ObjectInfo and sends into the channel.
|
||||
go func() {
|
||||
defer close(objectInfoCh)
|
||||
|
||||
// send function - returns true if ObjectInfo is sent
|
||||
// within (time.Second * 15) else false on timeout.
|
||||
send := func(oi multipartObjectInfo) bool {
|
||||
timer := time.After(time.Second * 15)
|
||||
select {
|
||||
case objectInfoCh <- oi:
|
||||
return true
|
||||
case <-timer:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// filter function - filters directory entries matching multipart uploadids, prefix and marker
|
||||
direntFilterFn := func(dirent fsDirent) bool {
|
||||
// check if dirent is a directory (or) dirent is a regular file and it's name ends with Upload ID suffix string
|
||||
if dirent.IsDir() || (dirent.IsRegular() && strings.HasSuffix(dirent.name, multipartUploadIDSuffix)) {
|
||||
// return if dirent's name starts with prefixPath and lexically higher than markerPath
|
||||
return strings.HasPrefix(dirent.name, prefixPath) && dirent.name > markerPath
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// filter function - filters directory entries matching multipart uploadids
|
||||
subDirentFilterFn := func(dirent fsDirent) bool {
|
||||
// check if dirent is a directory (or) dirent is a regular file and it's name ends with Upload ID suffix string
|
||||
return dirent.IsDir() || (dirent.IsRegular() && strings.HasSuffix(dirent.name, multipartUploadIDSuffix))
|
||||
}
|
||||
|
||||
// lastObjInfo is used to save last object info which is sent at last with End=true
|
||||
var lastObjInfo *multipartObjectInfo
|
||||
|
||||
sendError := func(err error) {
|
||||
if lastObjInfo != nil {
|
||||
if !send(*lastObjInfo) {
|
||||
// as we got error sending lastObjInfo, we can't send the error
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
send(multipartObjectInfo{Err: err, End: true})
|
||||
return
|
||||
}
|
||||
|
||||
for {
|
||||
dirents, err := scandir(scanDir, direntFilterFn, false)
|
||||
if err != nil {
|
||||
sendError(err)
|
||||
return
|
||||
}
|
||||
|
||||
var dirent fsDirent
|
||||
for len(dirents) > 0 {
|
||||
dirent, dirents = dirents[0], dirents[1:]
|
||||
if dirent.IsRegular() {
|
||||
// Handle uploadid file
|
||||
name := strings.Replace(filepath.Dir(dirent.name), bucketDir, "", 1)
|
||||
if name == "" {
|
||||
// This should not happen ie uploadid file should not be in bucket directory
|
||||
sendError(errors.New("Corrupted metadata"))
|
||||
return
|
||||
}
|
||||
|
||||
uploadID := strings.Split(filepath.Base(dirent.name), multipartUploadIDSuffix)[0]
|
||||
|
||||
// Solaris and older unixes have modTime to be
|
||||
// empty, fallback to os.Stat() to fill missing values.
|
||||
if dirent.modTime.IsZero() {
|
||||
if fi, e := os.Stat(dirent.name); e == nil {
|
||||
dirent.modTime = fi.ModTime()
|
||||
} else {
|
||||
sendError(e)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
objInfo := multipartObjectInfo{
|
||||
Name: name,
|
||||
UploadID: uploadID,
|
||||
ModifiedTime: dirent.modTime,
|
||||
}
|
||||
|
||||
// as we got new object info, send last object info and keep new object info as last object info
|
||||
if lastObjInfo != nil {
|
||||
if !send(*lastObjInfo) {
|
||||
return
|
||||
}
|
||||
}
|
||||
lastObjInfo = &objInfo
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
// Fetch sub dirents.
|
||||
subDirents, err := scandir(dirent.name, subDirentFilterFn, false)
|
||||
if err != nil {
|
||||
sendError(err)
|
||||
return
|
||||
}
|
||||
|
||||
subDirFound := false
|
||||
uploadIDDirents := []fsDirent{}
|
||||
// If subDirents has a directory, then current dirent needs to be sent
|
||||
for _, subdirent := range subDirents {
|
||||
if subdirent.IsDir() {
|
||||
subDirFound = true
|
||||
|
||||
if recursive {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !recursive && subdirent.IsRegular() {
|
||||
uploadIDDirents = append(uploadIDDirents, subdirent)
|
||||
}
|
||||
}
|
||||
|
||||
// Send directory only for non-recursive listing
|
||||
if !recursive && (subDirFound || len(subDirents) == 0) {
|
||||
// Solaris and older unixes have modTime to be
|
||||
// empty, fallback to os.Stat() to fill missing values.
|
||||
if dirent.modTime.IsZero() {
|
||||
if fi, e := os.Stat(dirent.name); e == nil {
|
||||
dirent.modTime = fi.ModTime()
|
||||
} else {
|
||||
sendError(e)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
objInfo := multipartObjectInfo{
|
||||
Name: strings.Replace(dirent.name, bucketDir, "", 1),
|
||||
ModifiedTime: dirent.modTime,
|
||||
IsDir: true,
|
||||
}
|
||||
|
||||
// as we got new object info, send last object info and keep new object info as last object info
|
||||
if lastObjInfo != nil {
|
||||
if !send(*lastObjInfo) {
|
||||
return
|
||||
}
|
||||
}
|
||||
lastObjInfo = &objInfo
|
||||
}
|
||||
|
||||
if recursive {
|
||||
dirents = append(subDirents, dirents...)
|
||||
} else {
|
||||
dirents = append(uploadIDDirents, dirents...)
|
||||
}
|
||||
}
|
||||
|
||||
if !recursive {
|
||||
break
|
||||
}
|
||||
|
||||
markerPath = scanDir + string(os.PathSeparator)
|
||||
if scanDir = filepath.Dir(scanDir); scanDir < dirDepth {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if lastObjInfo != nil {
|
||||
// we got last object
|
||||
lastObjInfo.End = true
|
||||
if !send(*lastObjInfo) {
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return objectInfoCh
|
||||
}
|
||||
|
||||
// multipartObjectInfo - Multipart object info
|
||||
type multipartObjectInfo struct {
|
||||
Name string
|
||||
UploadID string
|
||||
ModifiedTime time.Time
|
||||
IsDir bool
|
||||
Err error
|
||||
End bool
|
||||
}
|
685
fs-multipart.go
685
fs-multipart.go
@ -1,685 +0,0 @@
|
||||
/*
|
||||
* Minio Cloud Storage, (C) 2015,2016 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"crypto/md5"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/minio/minio/pkg/mimedb"
|
||||
"github.com/minio/minio/pkg/probe"
|
||||
"github.com/minio/minio/pkg/safe"
|
||||
"github.com/skyrings/skyring-common/tools/uuid"
|
||||
)
|
||||
|
||||
const (
|
||||
minioMetaDir = ".minio"
|
||||
multipartUploadIDSuffix = ".uploadid"
|
||||
)
|
||||
|
||||
// Removes files and its parent directories up to a given level.
|
||||
func removeFileTree(fileName string, level string) error {
|
||||
if e := os.Remove(fileName); e != nil {
|
||||
return e
|
||||
}
|
||||
|
||||
for fileDir := filepath.Dir(fileName); fileDir > level; fileDir = filepath.Dir(fileDir) {
|
||||
if status, e := isDirEmpty(fileDir); e != nil {
|
||||
return e
|
||||
} else if !status {
|
||||
break
|
||||
}
|
||||
if e := os.Remove(fileDir); e != nil {
|
||||
return e
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Takes an input stream and safely writes to disk, additionally
|
||||
// verifies checksum.
|
||||
func safeWriteFile(fileName string, data io.Reader, size int64, md5sum string) error {
|
||||
safeFile, e := safe.CreateFileWithSuffix(fileName, "-")
|
||||
if e != nil {
|
||||
return e
|
||||
}
|
||||
|
||||
md5Hasher := md5.New()
|
||||
multiWriter := io.MultiWriter(md5Hasher, safeFile)
|
||||
if size > 0 {
|
||||
if _, e = io.CopyN(multiWriter, data, size); e != nil {
|
||||
// Closes the file safely and removes it in a single atomic operation.
|
||||
safeFile.CloseAndRemove()
|
||||
return e
|
||||
}
|
||||
} else {
|
||||
if _, e = io.Copy(multiWriter, data); e != nil {
|
||||
// Closes the file safely and removes it in a single atomic operation.
|
||||
safeFile.CloseAndRemove()
|
||||
return e
|
||||
}
|
||||
}
|
||||
|
||||
dataMd5sum := hex.EncodeToString(md5Hasher.Sum(nil))
|
||||
if md5sum != "" && !isMD5SumEqual(md5sum, dataMd5sum) {
|
||||
// Closes the file safely and removes it in a single atomic operation.
|
||||
safeFile.CloseAndRemove()
|
||||
return BadDigest{ExpectedMD5: md5sum, CalculatedMD5: dataMd5sum}
|
||||
}
|
||||
|
||||
// Safely close the file and atomically renames it the actual filePath.
|
||||
safeFile.Close()
|
||||
|
||||
// Safely wrote the file.
|
||||
return nil
|
||||
}
|
||||
|
||||
func isFileExist(filename string) (bool, error) {
|
||||
fi, e := os.Lstat(filename)
|
||||
if e != nil {
|
||||
if os.IsNotExist(e) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return false, e
|
||||
}
|
||||
|
||||
return fi.Mode().IsRegular(), nil
|
||||
}
|
||||
|
||||
// Create an s3 compatible MD5sum for complete multipart transaction.
|
||||
func makeS3MD5(md5Strs ...string) (string, *probe.Error) {
|
||||
var finalMD5Bytes []byte
|
||||
for _, md5Str := range md5Strs {
|
||||
md5Bytes, e := hex.DecodeString(md5Str)
|
||||
if e != nil {
|
||||
return "", probe.NewError(e)
|
||||
}
|
||||
finalMD5Bytes = append(finalMD5Bytes, md5Bytes...)
|
||||
}
|
||||
md5Hasher := md5.New()
|
||||
md5Hasher.Write(finalMD5Bytes)
|
||||
s3MD5 := fmt.Sprintf("%s-%d", hex.EncodeToString(md5Hasher.Sum(nil)), len(md5Strs))
|
||||
return s3MD5, nil
|
||||
}
|
||||
|
||||
func (fs Filesystem) newUploadID(bucket, object string) (string, error) {
|
||||
metaObjectDir := filepath.Join(fs.diskPath, minioMetaDir, bucket, object)
|
||||
|
||||
// create metaObjectDir if not exist
|
||||
if status, e := isDirExist(metaObjectDir); e != nil {
|
||||
return "", e
|
||||
} else if !status {
|
||||
if e := os.MkdirAll(metaObjectDir, 0755); e != nil {
|
||||
return "", e
|
||||
}
|
||||
}
|
||||
|
||||
for {
|
||||
uuid, e := uuid.New()
|
||||
if e != nil {
|
||||
return "", e
|
||||
}
|
||||
|
||||
uploadID := uuid.String()
|
||||
uploadIDFile := filepath.Join(metaObjectDir, uploadID+multipartUploadIDSuffix)
|
||||
if _, e := os.Lstat(uploadIDFile); e != nil {
|
||||
if !os.IsNotExist(e) {
|
||||
return "", e
|
||||
}
|
||||
|
||||
// uploadIDFile doesn't exist, so create empty file to reserve the name
|
||||
if e := ioutil.WriteFile(uploadIDFile, []byte{}, 0644); e != nil {
|
||||
return "", e
|
||||
}
|
||||
|
||||
return uploadID, nil
|
||||
}
|
||||
// uploadIDFile already exists.
|
||||
// loop again to try with different uuid generated.
|
||||
}
|
||||
}
|
||||
|
||||
func (fs Filesystem) isUploadIDExist(bucket, object, uploadID string) (bool, error) {
|
||||
return isFileExist(filepath.Join(fs.diskPath, minioMetaDir, bucket, object, uploadID+multipartUploadIDSuffix))
|
||||
}
|
||||
|
||||
func (fs Filesystem) cleanupUploadID(bucket, object, uploadID string) error {
|
||||
metaObjectDir := filepath.Join(fs.diskPath, minioMetaDir, bucket, object)
|
||||
uploadIDPrefix := uploadID + "."
|
||||
|
||||
dirents, e := scandir(metaObjectDir,
|
||||
func(dirent fsDirent) bool {
|
||||
return dirent.IsRegular() && strings.HasPrefix(dirent.name, uploadIDPrefix)
|
||||
},
|
||||
true)
|
||||
|
||||
if e != nil {
|
||||
return e
|
||||
}
|
||||
|
||||
for _, dirent := range dirents {
|
||||
if e := os.Remove(filepath.Join(metaObjectDir, dirent.name)); e != nil {
|
||||
return e
|
||||
}
|
||||
}
|
||||
|
||||
if status, e := isDirEmpty(metaObjectDir); e != nil {
|
||||
return e
|
||||
} else if status {
|
||||
if e := removeFileTree(metaObjectDir, filepath.Join(fs.diskPath, minioMetaDir, bucket)); e != nil {
|
||||
return e
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (fs Filesystem) checkMultipartArgs(bucket, object string) (string, error) {
|
||||
bucket, e := fs.checkBucketArg(bucket)
|
||||
if e != nil {
|
||||
return "", e
|
||||
}
|
||||
|
||||
if !IsValidObjectName(object) {
|
||||
return "", ObjectNameInvalid{Object: object}
|
||||
}
|
||||
|
||||
return bucket, nil
|
||||
}
|
||||
|
||||
// NewMultipartUpload - initiate a new multipart session
|
||||
func (fs Filesystem) NewMultipartUpload(bucket, object string) (string, *probe.Error) {
|
||||
if bucketDirName, e := fs.checkMultipartArgs(bucket, object); e == nil {
|
||||
bucket = bucketDirName
|
||||
} else {
|
||||
return "", probe.NewError(e)
|
||||
}
|
||||
|
||||
if e := checkDiskFree(fs.diskPath, fs.minFreeDisk); e != nil {
|
||||
return "", probe.NewError(e)
|
||||
}
|
||||
|
||||
uploadID, e := fs.newUploadID(bucket, object)
|
||||
if e != nil {
|
||||
return "", probe.NewError(e)
|
||||
}
|
||||
|
||||
return uploadID, nil
|
||||
}
|
||||
|
||||
// PutObjectPart - create a part in a multipart session
|
||||
func (fs Filesystem) PutObjectPart(bucket, object, uploadID string, partNumber int, size int64, data io.Reader, md5Hex string) (string, *probe.Error) {
|
||||
if bucketDirName, e := fs.checkMultipartArgs(bucket, object); e == nil {
|
||||
bucket = bucketDirName
|
||||
} else {
|
||||
return "", probe.NewError(e)
|
||||
}
|
||||
|
||||
if status, e := fs.isUploadIDExist(bucket, object, uploadID); e != nil {
|
||||
//return "", probe.NewError(InternalError{Err: err})
|
||||
return "", probe.NewError(e)
|
||||
} else if !status {
|
||||
return "", probe.NewError(InvalidUploadID{UploadID: uploadID})
|
||||
}
|
||||
|
||||
// Part id cannot be negative.
|
||||
if partNumber <= 0 {
|
||||
return "", probe.NewError(errors.New("invalid part id, cannot be zero or less than zero"))
|
||||
}
|
||||
|
||||
if partNumber > 10000 {
|
||||
return "", probe.NewError(errors.New("invalid part id, should be not more than 10000"))
|
||||
}
|
||||
|
||||
if e := checkDiskFree(fs.diskPath, fs.minFreeDisk); e != nil {
|
||||
return "", probe.NewError(e)
|
||||
}
|
||||
|
||||
partSuffix := fmt.Sprintf("%s.%d.%s", uploadID, partNumber, md5Hex)
|
||||
partFilePath := filepath.Join(fs.diskPath, minioMetaDir, bucket, object, partSuffix)
|
||||
if e := safeWriteFile(partFilePath, data, size, md5Hex); e != nil {
|
||||
return "", probe.NewError(e)
|
||||
}
|
||||
return md5Hex, nil
|
||||
}
|
||||
|
||||
// AbortMultipartUpload - abort an incomplete multipart session
|
||||
func (fs Filesystem) AbortMultipartUpload(bucket, object, uploadID string) *probe.Error {
|
||||
if bucketDirName, e := fs.checkMultipartArgs(bucket, object); e == nil {
|
||||
bucket = bucketDirName
|
||||
} else {
|
||||
return probe.NewError(e)
|
||||
}
|
||||
|
||||
if status, e := fs.isUploadIDExist(bucket, object, uploadID); e != nil {
|
||||
//return probe.NewError(InternalError{Err: err})
|
||||
return probe.NewError(e)
|
||||
} else if !status {
|
||||
return probe.NewError(InvalidUploadID{UploadID: uploadID})
|
||||
}
|
||||
|
||||
if e := fs.cleanupUploadID(bucket, object, uploadID); e != nil {
|
||||
return probe.NewError(e)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// CompleteMultipartUpload - complete a multipart upload and persist the data
|
||||
func (fs Filesystem) CompleteMultipartUpload(bucket, object, uploadID string, parts []completePart) (ObjectInfo, *probe.Error) {
|
||||
if bucketDirName, e := fs.checkMultipartArgs(bucket, object); e == nil {
|
||||
bucket = bucketDirName
|
||||
} else {
|
||||
return ObjectInfo{}, probe.NewError(e)
|
||||
}
|
||||
|
||||
if status, e := fs.isUploadIDExist(bucket, object, uploadID); e != nil {
|
||||
//return probe.NewError(InternalError{Err: err})
|
||||
return ObjectInfo{}, probe.NewError(e)
|
||||
} else if !status {
|
||||
return ObjectInfo{}, probe.NewError(InvalidUploadID{UploadID: uploadID})
|
||||
}
|
||||
|
||||
if e := checkDiskFree(fs.diskPath, fs.minFreeDisk); e != nil {
|
||||
return ObjectInfo{}, probe.NewError(e)
|
||||
}
|
||||
|
||||
metaObjectDir := filepath.Join(fs.diskPath, minioMetaDir, bucket, object)
|
||||
|
||||
var md5Sums []string
|
||||
for _, part := range parts {
|
||||
partNumber := part.PartNumber
|
||||
md5sum := strings.Trim(part.ETag, "\"")
|
||||
partFile := filepath.Join(metaObjectDir, uploadID+"."+strconv.Itoa(partNumber)+"."+md5sum)
|
||||
if status, err := isFileExist(partFile); err != nil {
|
||||
return ObjectInfo{}, probe.NewError(err)
|
||||
} else if !status {
|
||||
return ObjectInfo{}, probe.NewError(InvalidPart{})
|
||||
}
|
||||
md5Sums = append(md5Sums, md5sum)
|
||||
}
|
||||
|
||||
// Save the s3 md5.
|
||||
s3MD5, err := makeS3MD5(md5Sums...)
|
||||
if err != nil {
|
||||
return ObjectInfo{}, err.Trace(md5Sums...)
|
||||
}
|
||||
|
||||
completeObjectFile := filepath.Join(metaObjectDir, uploadID+".complete.")
|
||||
safeFile, e := safe.CreateFileWithSuffix(completeObjectFile, "-")
|
||||
if e != nil {
|
||||
return ObjectInfo{}, probe.NewError(e)
|
||||
}
|
||||
for _, part := range parts {
|
||||
partNumber := part.PartNumber
|
||||
// Trim off the odd double quotes from ETag in the beginning and end.
|
||||
md5sum := strings.TrimPrefix(part.ETag, "\"")
|
||||
md5sum = strings.TrimSuffix(md5sum, "\"")
|
||||
partFileStr := filepath.Join(metaObjectDir, fmt.Sprintf("%s.%d.%s", uploadID, partNumber, md5sum))
|
||||
var partFile *os.File
|
||||
partFile, e = os.Open(partFileStr)
|
||||
if e != nil {
|
||||
// Remove the complete file safely.
|
||||
safeFile.CloseAndRemove()
|
||||
return ObjectInfo{}, probe.NewError(e)
|
||||
} else if _, e = io.Copy(safeFile, partFile); e != nil {
|
||||
// Remove the complete file safely.
|
||||
safeFile.CloseAndRemove()
|
||||
return ObjectInfo{}, probe.NewError(e)
|
||||
}
|
||||
partFile.Close() // Close part file after successful copy.
|
||||
}
|
||||
// All parts concatenated, safely close the temp file.
|
||||
safeFile.Close()
|
||||
|
||||
// Stat to gather fresh stat info.
|
||||
objSt, e := os.Stat(completeObjectFile)
|
||||
if e != nil {
|
||||
return ObjectInfo{}, probe.NewError(e)
|
||||
}
|
||||
|
||||
bucketPath := filepath.Join(fs.diskPath, bucket)
|
||||
objectPath := filepath.Join(bucketPath, object)
|
||||
if e = os.MkdirAll(filepath.Dir(objectPath), 0755); e != nil {
|
||||
os.Remove(completeObjectFile)
|
||||
return ObjectInfo{}, probe.NewError(e)
|
||||
}
|
||||
if e = os.Rename(completeObjectFile, objectPath); e != nil {
|
||||
os.Remove(completeObjectFile)
|
||||
return ObjectInfo{}, probe.NewError(e)
|
||||
}
|
||||
|
||||
fs.cleanupUploadID(bucket, object, uploadID) // TODO: handle and log the error
|
||||
|
||||
contentType := "application/octet-stream"
|
||||
if objectExt := filepath.Ext(objectPath); objectExt != "" {
|
||||
if content, ok := mimedb.DB[strings.ToLower(strings.TrimPrefix(objectExt, "."))]; ok {
|
||||
contentType = content.ContentType
|
||||
}
|
||||
}
|
||||
|
||||
newObject := ObjectInfo{
|
||||
Bucket: bucket,
|
||||
Name: object,
|
||||
ModifiedTime: objSt.ModTime(),
|
||||
Size: objSt.Size(),
|
||||
ContentType: contentType,
|
||||
MD5Sum: s3MD5,
|
||||
}
|
||||
|
||||
return newObject, nil
|
||||
}
|
||||
|
||||
func (fs *Filesystem) saveListMultipartObjectCh(params listMultipartObjectParams, ch <-chan multipartObjectInfo) {
|
||||
fs.listMultipartObjectMapMutex.Lock()
|
||||
defer fs.listMultipartObjectMapMutex.Unlock()
|
||||
|
||||
channels := []<-chan multipartObjectInfo{ch}
|
||||
if _, ok := fs.listMultipartObjectMap[params]; ok {
|
||||
channels = append(fs.listMultipartObjectMap[params], ch)
|
||||
}
|
||||
|
||||
fs.listMultipartObjectMap[params] = channels
|
||||
}
|
||||
|
||||
func (fs *Filesystem) lookupListMultipartObjectCh(params listMultipartObjectParams) <-chan multipartObjectInfo {
|
||||
fs.listMultipartObjectMapMutex.Lock()
|
||||
defer fs.listMultipartObjectMapMutex.Unlock()
|
||||
|
||||
if channels, ok := fs.listMultipartObjectMap[params]; ok {
|
||||
var channel <-chan multipartObjectInfo
|
||||
channel, channels = channels[0], channels[1:]
|
||||
if len(channels) > 0 {
|
||||
fs.listMultipartObjectMap[params] = channels
|
||||
} else {
|
||||
// do not store empty channel list
|
||||
delete(fs.listMultipartObjectMap, params)
|
||||
}
|
||||
|
||||
return channel
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ListMultipartUploads - list incomplete multipart sessions for a given BucketMultipartResourcesMetadata
|
||||
func (fs Filesystem) ListMultipartUploads(bucket, objectPrefix, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (ListMultipartsInfo, *probe.Error) {
|
||||
result := ListMultipartsInfo{}
|
||||
|
||||
bucket, e := fs.checkBucketArg(bucket)
|
||||
if e != nil {
|
||||
return result, probe.NewError(e)
|
||||
}
|
||||
|
||||
if !IsValidObjectPrefix(objectPrefix) {
|
||||
return result, probe.NewError(ObjectNameInvalid{Bucket: bucket, Object: objectPrefix})
|
||||
}
|
||||
|
||||
prefixPath := filepath.FromSlash(objectPrefix)
|
||||
|
||||
// Verify if delimiter is anything other than '/', which we do not support.
|
||||
if delimiter != "" && delimiter != "/" {
|
||||
return result, probe.NewError(fmt.Errorf("delimiter '%s' is not supported", delimiter))
|
||||
}
|
||||
|
||||
if keyMarker != "" && !strings.HasPrefix(keyMarker, objectPrefix) {
|
||||
return result, probe.NewError(fmt.Errorf("Invalid combination of marker '%s' and prefix '%s'", keyMarker, objectPrefix))
|
||||
}
|
||||
|
||||
markerPath := filepath.FromSlash(keyMarker)
|
||||
if uploadIDMarker != "" {
|
||||
if strings.HasSuffix(markerPath, string(os.PathSeparator)) {
|
||||
return result, probe.NewError(fmt.Errorf("Invalid combination of uploadID marker '%s' and marker '%s'", uploadIDMarker, keyMarker))
|
||||
}
|
||||
id, e := uuid.Parse(uploadIDMarker)
|
||||
if e != nil {
|
||||
return result, probe.NewError(e)
|
||||
}
|
||||
if id.IsZero() {
|
||||
return result, probe.NewError(fmt.Errorf("Invalid upload ID marker %s", uploadIDMarker))
|
||||
}
|
||||
}
|
||||
|
||||
// Return empty response if maxUploads is zero
|
||||
if maxUploads == 0 {
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// set listObjectsLimit to maxUploads for out-of-range limit
|
||||
if maxUploads < 0 || maxUploads > listObjectsLimit {
|
||||
maxUploads = listObjectsLimit
|
||||
}
|
||||
|
||||
recursive := true
|
||||
if delimiter == "/" {
|
||||
recursive = false
|
||||
}
|
||||
|
||||
metaBucketDir := filepath.Join(fs.diskPath, minioMetaDir, bucket)
|
||||
|
||||
// Lookup of if listMultipartObjectChannel is available for given
|
||||
// parameters, else create a new one.
|
||||
savedChannel := true
|
||||
multipartObjectInfoCh := fs.lookupListMultipartObjectCh(listMultipartObjectParams{
|
||||
bucket: bucket,
|
||||
delimiter: delimiter,
|
||||
keyMarker: markerPath,
|
||||
prefix: prefixPath,
|
||||
uploadIDMarker: uploadIDMarker,
|
||||
})
|
||||
|
||||
if multipartObjectInfoCh == nil {
|
||||
multipartObjectInfoCh = scanMultipartDir(metaBucketDir, objectPrefix, keyMarker, uploadIDMarker, recursive)
|
||||
savedChannel = false
|
||||
}
|
||||
|
||||
var objInfo *multipartObjectInfo
|
||||
nextKeyMarker := ""
|
||||
nextUploadIDMarker := ""
|
||||
for i := 0; i < maxUploads; {
|
||||
// read the channel
|
||||
if oi, ok := <-multipartObjectInfoCh; ok {
|
||||
objInfo = &oi
|
||||
} else {
|
||||
// closed channel
|
||||
if i == 0 {
|
||||
// first read
|
||||
if !savedChannel {
|
||||
// its valid to have a closed new channel for first read
|
||||
multipartObjectInfoCh = nil
|
||||
break
|
||||
}
|
||||
|
||||
// invalid saved channel amd create new channel
|
||||
multipartObjectInfoCh = scanMultipartDir(metaBucketDir, objectPrefix, keyMarker,
|
||||
uploadIDMarker, recursive)
|
||||
} else {
|
||||
// TODO: FIX: there is a chance of infinite loop if we get closed channel always
|
||||
// the channel got closed due to timeout
|
||||
// create a new channel
|
||||
multipartObjectInfoCh = scanMultipartDir(metaBucketDir, objectPrefix, nextKeyMarker,
|
||||
nextUploadIDMarker, recursive)
|
||||
}
|
||||
|
||||
// make it as new channel
|
||||
savedChannel = false
|
||||
continue
|
||||
}
|
||||
|
||||
if objInfo.Err != nil {
|
||||
if os.IsNotExist(objInfo.Err) {
|
||||
return ListMultipartsInfo{}, nil
|
||||
}
|
||||
|
||||
return ListMultipartsInfo{}, probe.NewError(objInfo.Err)
|
||||
}
|
||||
|
||||
// backward compatibility check
|
||||
if strings.Contains(objInfo.Name, "$multiparts") || strings.Contains(objInfo.Name, "$tmpobject") {
|
||||
continue
|
||||
}
|
||||
|
||||
// Directories are listed only if recursive is false
|
||||
if objInfo.IsDir {
|
||||
result.CommonPrefixes = append(result.CommonPrefixes, objInfo.Name)
|
||||
} else {
|
||||
result.Uploads = append(result.Uploads, uploadMetadata{
|
||||
Object: objInfo.Name,
|
||||
UploadID: objInfo.UploadID,
|
||||
Initiated: objInfo.ModifiedTime,
|
||||
})
|
||||
}
|
||||
|
||||
nextKeyMarker = objInfo.Name
|
||||
nextUploadIDMarker = objInfo.UploadID
|
||||
i++
|
||||
|
||||
if objInfo.End {
|
||||
// as we received last object, do not save this channel for later use
|
||||
multipartObjectInfoCh = nil
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if multipartObjectInfoCh != nil {
|
||||
// we haven't received last object
|
||||
result.IsTruncated = true
|
||||
result.NextKeyMarker = nextKeyMarker
|
||||
result.NextUploadIDMarker = nextUploadIDMarker
|
||||
|
||||
// save this channel for later use
|
||||
fs.saveListMultipartObjectCh(listMultipartObjectParams{
|
||||
bucket: bucket,
|
||||
delimiter: delimiter,
|
||||
keyMarker: nextKeyMarker,
|
||||
prefix: objectPrefix,
|
||||
uploadIDMarker: nextUploadIDMarker,
|
||||
}, multipartObjectInfoCh)
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// ListObjectParts - list parts from incomplete multipart session for a given ObjectResourcesMetadata
|
||||
func (fs Filesystem) ListObjectParts(bucket, object, uploadID string, partNumberMarker, maxParts int) (ListPartsInfo, *probe.Error) {
|
||||
if bucketDirName, err := fs.checkMultipartArgs(bucket, object); err == nil {
|
||||
bucket = bucketDirName
|
||||
} else {
|
||||
return ListPartsInfo{}, probe.NewError(err)
|
||||
}
|
||||
|
||||
if status, err := fs.isUploadIDExist(bucket, object, uploadID); err != nil {
|
||||
//return probe.NewError(InternalError{Err: err})
|
||||
return ListPartsInfo{}, probe.NewError(err)
|
||||
} else if !status {
|
||||
return ListPartsInfo{}, probe.NewError(InvalidUploadID{UploadID: uploadID})
|
||||
}
|
||||
|
||||
// return empty ListPartsInfo
|
||||
if maxParts == 0 {
|
||||
return ListPartsInfo{}, nil
|
||||
}
|
||||
|
||||
if maxParts < 0 || maxParts > 1000 {
|
||||
maxParts = 1000
|
||||
}
|
||||
|
||||
metaObjectDir := filepath.Join(fs.diskPath, minioMetaDir, bucket, object)
|
||||
uploadIDPrefix := uploadID + "."
|
||||
|
||||
dirents, e := scandir(metaObjectDir,
|
||||
func(dirent fsDirent) bool {
|
||||
// Part file is a regular file and has to be started with 'UPLOADID.'
|
||||
if !(dirent.IsRegular() && strings.HasPrefix(dirent.name, uploadIDPrefix)) {
|
||||
return false
|
||||
}
|
||||
|
||||
// Valid part file has to be 'UPLOADID.PARTNUMBER.MD5SUM'
|
||||
tokens := strings.Split(dirent.name, ".")
|
||||
if len(tokens) != 3 {
|
||||
return false
|
||||
}
|
||||
|
||||
if partNumber, err := strconv.Atoi(tokens[1]); err == nil {
|
||||
if partNumber >= 1 && partNumber <= 10000 && partNumber > partNumberMarker {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
},
|
||||
true)
|
||||
if e != nil {
|
||||
return ListPartsInfo{}, probe.NewError(e)
|
||||
}
|
||||
|
||||
isTruncated := false
|
||||
nextPartNumberMarker := 0
|
||||
|
||||
parts := []partInfo{}
|
||||
for i := range dirents {
|
||||
if i == maxParts {
|
||||
isTruncated = true
|
||||
break
|
||||
}
|
||||
|
||||
// In some OS modTime is empty and use os.Stat() to fill missing values
|
||||
if dirents[i].modTime.IsZero() {
|
||||
if fi, e := os.Stat(filepath.Join(metaObjectDir, dirents[i].name)); e == nil {
|
||||
dirents[i].modTime = fi.ModTime()
|
||||
dirents[i].size = fi.Size()
|
||||
} else {
|
||||
return ListPartsInfo{}, probe.NewError(e)
|
||||
}
|
||||
}
|
||||
|
||||
tokens := strings.Split(dirents[i].name, ".")
|
||||
partNumber, _ := strconv.Atoi(tokens[1])
|
||||
md5sum := tokens[2]
|
||||
parts = append(parts, partInfo{
|
||||
PartNumber: partNumber,
|
||||
LastModified: dirents[i].modTime,
|
||||
ETag: md5sum,
|
||||
Size: dirents[i].size,
|
||||
})
|
||||
}
|
||||
|
||||
if isTruncated {
|
||||
nextPartNumberMarker = 0
|
||||
}
|
||||
|
||||
return ListPartsInfo{
|
||||
Bucket: bucket,
|
||||
Object: object,
|
||||
UploadID: uploadID,
|
||||
PartNumberMarker: partNumberMarker,
|
||||
NextPartNumberMarker: nextPartNumberMarker,
|
||||
MaxParts: maxParts,
|
||||
IsTruncated: isTruncated,
|
||||
Parts: parts,
|
||||
}, nil
|
||||
}
|
338
fs-object.go
338
fs-object.go
@ -1,338 +0,0 @@
|
||||
/*
|
||||
* Minio Cloud Storage, (C) 2015 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/md5"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"encoding/hex"
|
||||
"runtime"
|
||||
|
||||
"github.com/minio/minio/pkg/mimedb"
|
||||
"github.com/minio/minio/pkg/probe"
|
||||
"github.com/minio/minio/pkg/safe"
|
||||
)
|
||||
|
||||
/// Object Operations
|
||||
|
||||
// GetObject - GET object
|
||||
func (fs Filesystem) GetObject(bucket, object string, startOffset int64) (io.ReadCloser, *probe.Error) {
|
||||
// Input validation.
|
||||
bucket, e := fs.checkBucketArg(bucket)
|
||||
if e != nil {
|
||||
return nil, probe.NewError(e)
|
||||
}
|
||||
if !IsValidObjectName(object) {
|
||||
return nil, probe.NewError(ObjectNameInvalid{Bucket: bucket, Object: object})
|
||||
}
|
||||
|
||||
objectPath := filepath.Join(fs.diskPath, bucket, object)
|
||||
file, e := os.Open(objectPath)
|
||||
if e != nil {
|
||||
// If the object doesn't exist, the bucket might not exist either. Stat for
|
||||
// the bucket and give a better error message if that is true.
|
||||
if os.IsNotExist(e) {
|
||||
_, e = os.Stat(filepath.Join(fs.diskPath, bucket))
|
||||
if os.IsNotExist(e) {
|
||||
return nil, probe.NewError(BucketNotFound{Bucket: bucket})
|
||||
}
|
||||
return nil, probe.NewError(ObjectNotFound{Bucket: bucket, Object: object})
|
||||
}
|
||||
return nil, probe.NewError(e)
|
||||
}
|
||||
// Initiate a cached stat operation on the file handler.
|
||||
st, e := file.Stat()
|
||||
if e != nil {
|
||||
return nil, probe.NewError(e)
|
||||
}
|
||||
// Object path is a directory prefix, return object not found error.
|
||||
if st.IsDir() {
|
||||
return nil, probe.NewError(ObjectExistsAsPrefix{
|
||||
Bucket: bucket,
|
||||
Prefix: object,
|
||||
})
|
||||
}
|
||||
|
||||
// Seek to a starting offset.
|
||||
_, e = file.Seek(startOffset, os.SEEK_SET)
|
||||
if e != nil {
|
||||
// When the "handle is invalid", the file might be a directory on Windows.
|
||||
if runtime.GOOS == "windows" && strings.Contains(e.Error(), "handle is invalid") {
|
||||
return nil, probe.NewError(ObjectNotFound{Bucket: bucket, Object: object})
|
||||
}
|
||||
return nil, probe.NewError(e)
|
||||
}
|
||||
return file, nil
|
||||
}
|
||||
|
||||
// GetObjectInfo - get object info.
|
||||
func (fs Filesystem) GetObjectInfo(bucket, object string) (ObjectInfo, *probe.Error) {
|
||||
// Input validation.
|
||||
bucket, e := fs.checkBucketArg(bucket)
|
||||
if e != nil {
|
||||
return ObjectInfo{}, probe.NewError(e)
|
||||
}
|
||||
|
||||
if !IsValidObjectName(object) {
|
||||
return ObjectInfo{}, probe.NewError(ObjectNameInvalid{Bucket: bucket, Object: object})
|
||||
}
|
||||
|
||||
info, err := getObjectInfo(fs.diskPath, bucket, object)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err.ToGoError()) {
|
||||
return ObjectInfo{}, probe.NewError(ObjectNotFound{Bucket: bucket, Object: object})
|
||||
}
|
||||
return ObjectInfo{}, err.Trace(bucket, object)
|
||||
}
|
||||
if info.IsDir {
|
||||
return ObjectInfo{}, probe.NewError(ObjectNotFound{Bucket: bucket, Object: object})
|
||||
}
|
||||
return info, nil
|
||||
}
|
||||
|
||||
// getObjectInfo - get object stat info.
|
||||
func getObjectInfo(rootPath, bucket, object string) (ObjectInfo, *probe.Error) {
|
||||
// Do not use filepath.Join() since filepath.Join strips off any
|
||||
// object names with '/', use them as is in a static manner so
|
||||
// that we can send a proper 'ObjectNotFound' reply back upon os.Stat().
|
||||
var objectPath string
|
||||
// For windows use its special os.PathSeparator == "\\"
|
||||
if runtime.GOOS == "windows" {
|
||||
objectPath = rootPath + string(os.PathSeparator) + bucket + string(os.PathSeparator) + object
|
||||
} else {
|
||||
objectPath = rootPath + string(os.PathSeparator) + bucket + string(os.PathSeparator) + object
|
||||
}
|
||||
stat, e := os.Stat(objectPath)
|
||||
if e != nil {
|
||||
return ObjectInfo{}, probe.NewError(e)
|
||||
}
|
||||
contentType := "application/octet-stream"
|
||||
if runtime.GOOS == "windows" {
|
||||
object = filepath.ToSlash(object)
|
||||
}
|
||||
|
||||
if objectExt := filepath.Ext(object); objectExt != "" {
|
||||
content, ok := mimedb.DB[strings.ToLower(strings.TrimPrefix(objectExt, "."))]
|
||||
if ok {
|
||||
contentType = content.ContentType
|
||||
}
|
||||
}
|
||||
objInfo := ObjectInfo{
|
||||
Bucket: bucket,
|
||||
Name: object,
|
||||
ModifiedTime: stat.ModTime(),
|
||||
Size: stat.Size(),
|
||||
ContentType: contentType,
|
||||
IsDir: stat.Mode().IsDir(),
|
||||
}
|
||||
return objInfo, nil
|
||||
}
|
||||
|
||||
// isMD5SumEqual - returns error if md5sum mismatches, success its `nil`
|
||||
func isMD5SumEqual(expectedMD5Sum, actualMD5Sum string) bool {
|
||||
// Verify the md5sum.
|
||||
if expectedMD5Sum != "" && actualMD5Sum != "" {
|
||||
// Decode md5sum to bytes from their hexadecimal
|
||||
// representations.
|
||||
expectedMD5SumBytes, err := hex.DecodeString(expectedMD5Sum)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
actualMD5SumBytes, err := hex.DecodeString(actualMD5Sum)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
// Verify md5sum bytes are equal after successful decoding.
|
||||
if !bytes.Equal(expectedMD5SumBytes, actualMD5SumBytes) {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// PutObject - create an object.
|
||||
func (fs Filesystem) PutObject(bucket string, object string, size int64, data io.Reader, metadata map[string]string) (ObjectInfo, *probe.Error) {
|
||||
// Check bucket name valid.
|
||||
bucket, e := fs.checkBucketArg(bucket)
|
||||
if e != nil {
|
||||
return ObjectInfo{}, probe.NewError(e)
|
||||
}
|
||||
|
||||
bucketDir := filepath.Join(fs.diskPath, bucket)
|
||||
|
||||
// Verify object path legal.
|
||||
if !IsValidObjectName(object) {
|
||||
return ObjectInfo{}, probe.NewError(ObjectNameInvalid{Bucket: bucket, Object: object})
|
||||
}
|
||||
|
||||
if e = checkDiskFree(fs.diskPath, fs.minFreeDisk); e != nil {
|
||||
return ObjectInfo{}, probe.NewError(e)
|
||||
}
|
||||
|
||||
// Get object path.
|
||||
objectPath := filepath.Join(bucketDir, object)
|
||||
|
||||
// md5Hex representation.
|
||||
var md5Hex string
|
||||
if len(metadata) != 0 {
|
||||
md5Hex = metadata["md5Sum"]
|
||||
}
|
||||
|
||||
// Write object.
|
||||
safeFile, e := safe.CreateFileWithPrefix(objectPath, md5Hex+"$tmpobject")
|
||||
if e != nil {
|
||||
switch e := e.(type) {
|
||||
case *os.PathError:
|
||||
if e.Op == "mkdir" {
|
||||
if strings.Contains(e.Error(), "not a directory") {
|
||||
return ObjectInfo{}, probe.NewError(ObjectExistsAsPrefix{Bucket: bucket, Prefix: object})
|
||||
}
|
||||
}
|
||||
return ObjectInfo{}, probe.NewError(e)
|
||||
default:
|
||||
return ObjectInfo{}, probe.NewError(e)
|
||||
}
|
||||
}
|
||||
|
||||
// Initialize md5 writer.
|
||||
md5Writer := md5.New()
|
||||
|
||||
// Instantiate a new multi writer.
|
||||
multiWriter := io.MultiWriter(md5Writer, safeFile)
|
||||
|
||||
// Instantiate checksum hashers and create a multiwriter.
|
||||
if size > 0 {
|
||||
if _, e = io.CopyN(multiWriter, data, size); e != nil {
|
||||
safeFile.CloseAndRemove()
|
||||
return ObjectInfo{}, probe.NewError(e)
|
||||
}
|
||||
} else {
|
||||
if _, e = io.Copy(multiWriter, data); e != nil {
|
||||
safeFile.CloseAndRemove()
|
||||
return ObjectInfo{}, probe.NewError(e)
|
||||
}
|
||||
}
|
||||
|
||||
newMD5Hex := hex.EncodeToString(md5Writer.Sum(nil))
|
||||
if md5Hex != "" {
|
||||
if newMD5Hex != md5Hex {
|
||||
return ObjectInfo{}, probe.NewError(BadDigest{md5Hex, newMD5Hex})
|
||||
}
|
||||
}
|
||||
|
||||
// Set stat again to get the latest metadata.
|
||||
st, e := os.Stat(safeFile.Name())
|
||||
if e != nil {
|
||||
return ObjectInfo{}, probe.NewError(e)
|
||||
}
|
||||
|
||||
contentType := "application/octet-stream"
|
||||
if objectExt := filepath.Ext(objectPath); objectExt != "" {
|
||||
content, ok := mimedb.DB[strings.ToLower(strings.TrimPrefix(objectExt, "."))]
|
||||
if ok {
|
||||
contentType = content.ContentType
|
||||
}
|
||||
}
|
||||
newObject := ObjectInfo{
|
||||
Bucket: bucket,
|
||||
Name: object,
|
||||
ModifiedTime: st.ModTime(),
|
||||
Size: st.Size(),
|
||||
MD5Sum: newMD5Hex,
|
||||
ContentType: contentType,
|
||||
}
|
||||
|
||||
// Safely close and atomically rename the file.
|
||||
safeFile.Close()
|
||||
|
||||
return newObject, nil
|
||||
}
|
||||
|
||||
// deleteObjectPath - delete object path if its empty.
|
||||
func deleteObjectPath(basePath, deletePath, bucket, object string) *probe.Error {
|
||||
if basePath == deletePath {
|
||||
return nil
|
||||
}
|
||||
// Verify if the path exists.
|
||||
pathSt, e := os.Stat(deletePath)
|
||||
if e != nil {
|
||||
if os.IsNotExist(e) {
|
||||
return probe.NewError(ObjectNotFound{Bucket: bucket, Object: object})
|
||||
}
|
||||
return probe.NewError(e)
|
||||
}
|
||||
if pathSt.IsDir() {
|
||||
// Verify if directory is empty.
|
||||
empty, e := isDirEmpty(deletePath)
|
||||
if e != nil {
|
||||
return probe.NewError(e)
|
||||
}
|
||||
if !empty {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
// Attempt to remove path.
|
||||
if e := os.Remove(deletePath); e != nil {
|
||||
return probe.NewError(e)
|
||||
}
|
||||
// Recursively go down the next path and delete again.
|
||||
if err := deleteObjectPath(basePath, filepath.Dir(deletePath), bucket, object); err != nil {
|
||||
return err.Trace(basePath, deletePath, bucket, object)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeleteObject - delete object.
|
||||
func (fs Filesystem) DeleteObject(bucket, object string) *probe.Error {
|
||||
// Check bucket name valid
|
||||
bucket, e := fs.checkBucketArg(bucket)
|
||||
if e != nil {
|
||||
return probe.NewError(e)
|
||||
}
|
||||
|
||||
bucketDir := filepath.Join(fs.diskPath, bucket)
|
||||
|
||||
// Verify object path legal
|
||||
if !IsValidObjectName(object) {
|
||||
return probe.NewError(ObjectNameInvalid{Bucket: bucket, Object: object})
|
||||
}
|
||||
|
||||
// Do not use filepath.Join() since filepath.Join strips off any
|
||||
// object names with '/', use them as is in a static manner so
|
||||
// that we can send a proper 'ObjectNotFound' reply back upon os.Stat().
|
||||
var objectPath string
|
||||
if runtime.GOOS == "windows" {
|
||||
objectPath = fs.diskPath + string(os.PathSeparator) + bucket + string(os.PathSeparator) + object
|
||||
} else {
|
||||
objectPath = fs.diskPath + string(os.PathSeparator) + bucket + string(os.PathSeparator) + object
|
||||
}
|
||||
// Delete object path if its empty.
|
||||
err := deleteObjectPath(bucketDir, objectPath, bucket, object)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err.ToGoError()) {
|
||||
return probe.NewError(ObjectNotFound{Bucket: bucket, Object: object})
|
||||
}
|
||||
return err.Trace(bucketDir, objectPath, bucket, object)
|
||||
}
|
||||
return nil
|
||||
}
|
87
fs-utils.go
87
fs-utils.go
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Minio Cloud Storage, (C) 2015, 2016 Minio, Inc.
|
||||
* Minio Cloud Storage, (C) 2016 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@ -18,48 +18,83 @@ package main
|
||||
|
||||
import (
|
||||
"regexp"
|
||||
"runtime"
|
||||
"strings"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
// validBucket regexp.
|
||||
var validBucket = regexp.MustCompile(`^[a-z0-9][a-z0-9\.\-]{1,61}[a-z0-9]$`)
|
||||
// validVolname regexp.
|
||||
var validVolname = regexp.MustCompile(`^.{3,63}$`)
|
||||
|
||||
// IsValidBucketName verifies a bucket name in accordance with Amazon's
|
||||
// requirements. It must be 3-63 characters long, can contain dashes
|
||||
// and periods, but must begin and end with a lowercase letter or a number.
|
||||
// See: http://docs.aws.amazon.com/AmazonS3/latest/dev/BucketRestrictions.html
|
||||
func IsValidBucketName(bucket string) bool {
|
||||
if len(bucket) < 3 || len(bucket) > 63 {
|
||||
// isValidVolname verifies a volname name in accordance with object
|
||||
// layer requirements.
|
||||
func isValidVolname(volname string) bool {
|
||||
if !validVolname.MatchString(volname) {
|
||||
return false
|
||||
}
|
||||
if bucket[0] == '.' || bucket[len(bucket)-1] == '.' {
|
||||
return false
|
||||
switch runtime.GOOS {
|
||||
case "windows":
|
||||
// Volname shouldn't have reserved characters on windows in it.
|
||||
return !strings.ContainsAny(volname, "/\\:*?\"<>|")
|
||||
default:
|
||||
// Volname shouldn't have '/' in it.
|
||||
return !strings.ContainsAny(volname, "/")
|
||||
}
|
||||
return validBucket.MatchString(bucket)
|
||||
}
|
||||
|
||||
// IsValidObjectName verifies an object name in accordance with Amazon's
|
||||
// requirements. It cannot exceed 1024 characters and must be a valid UTF8
|
||||
// string.
|
||||
// See: http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html
|
||||
func IsValidObjectName(object string) bool {
|
||||
if len(object) > 1024 || len(object) == 0 {
|
||||
// Keeping this as lower bound value supporting Linux, Darwin and Windows operating systems.
|
||||
const pathMax = 4096
|
||||
|
||||
// isValidPath verifies if a path name is in accordance with FS limitations.
|
||||
func isValidPath(path string) bool {
|
||||
// TODO: Make this FSType or Operating system specific.
|
||||
if len(path) > pathMax || len(path) == 0 {
|
||||
return false
|
||||
}
|
||||
if !utf8.ValidString(object) {
|
||||
if !utf8.ValidString(path) {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// IsValidObjectPrefix verifies whether the prefix is a valid object name.
|
||||
// Its valid to have a empty prefix.
|
||||
func IsValidObjectPrefix(object string) bool {
|
||||
// isValidPrefix verifies where the prefix is a valid path.
|
||||
func isValidPrefix(prefix string) bool {
|
||||
// Prefix can be empty.
|
||||
if object == "" {
|
||||
if prefix == "" {
|
||||
return true
|
||||
}
|
||||
// Verify if prefix is a valid object name.
|
||||
return IsValidObjectName(object)
|
||||
|
||||
// Verify if prefix is a valid path.
|
||||
return isValidPath(prefix)
|
||||
}
|
||||
|
||||
// List of reserved words for files, includes old and new ones.
|
||||
var reservedKeywords = []string{
|
||||
"$multiparts",
|
||||
"$tmpobject",
|
||||
"$tmpfile",
|
||||
// Add new reserved words if any used in future.
|
||||
}
|
||||
|
||||
// hasReservedPrefix - returns true if name has a reserved keyword suffixed.
|
||||
func hasReservedSuffix(name string) (isReserved bool) {
|
||||
for _, reservedKey := range reservedKeywords {
|
||||
if strings.HasSuffix(name, reservedKey) {
|
||||
isReserved = true
|
||||
break
|
||||
}
|
||||
isReserved = false
|
||||
}
|
||||
return isReserved
|
||||
}
|
||||
|
||||
// hasReservedPrefix - has reserved prefix.
|
||||
func hasReservedPrefix(name string) (isReserved bool) {
|
||||
for _, reservedKey := range reservedKeywords {
|
||||
if strings.HasPrefix(name, reservedKey) {
|
||||
isReserved = true
|
||||
break
|
||||
}
|
||||
isReserved = false
|
||||
}
|
||||
return isReserved
|
||||
}
|
||||
|
557
fs.go
557
fs.go
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Minio Cloud Storage, (C) 2015, 2016 Minio, Inc.
|
||||
* Minio Cloud Storage, (C) 2016 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@ -17,96 +17,529 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
"syscall"
|
||||
|
||||
"github.com/minio/minio/pkg/disk"
|
||||
"github.com/minio/minio/pkg/probe"
|
||||
"github.com/minio/minio/pkg/safe"
|
||||
)
|
||||
|
||||
// listObjectParams - list object params used for list object map
|
||||
type listObjectParams struct {
|
||||
const (
|
||||
fsListLimit = 1000
|
||||
)
|
||||
|
||||
// listParams - list object params used for list object map
|
||||
type listParams struct {
|
||||
bucket string
|
||||
delimiter string
|
||||
recursive bool
|
||||
marker string
|
||||
prefix string
|
||||
}
|
||||
|
||||
// listMultipartObjectParams - list multipart object params used for list multipart object map
|
||||
type listMultipartObjectParams struct {
|
||||
bucket string
|
||||
delimiter string
|
||||
keyMarker string
|
||||
prefix string
|
||||
uploadIDMarker string
|
||||
}
|
||||
|
||||
// Filesystem - local variables
|
||||
type Filesystem struct {
|
||||
// fsStorage - implements StorageAPI interface.
|
||||
type fsStorage struct {
|
||||
diskPath string
|
||||
diskInfo disk.Info
|
||||
minFreeDisk int64
|
||||
rwLock *sync.RWMutex
|
||||
listObjectMap map[listObjectParams][]*treeWalker
|
||||
listObjectMap map[listParams][]*treeWalker
|
||||
listObjectMapMutex *sync.Mutex
|
||||
listMultipartObjectMap map[listMultipartObjectParams][]<-chan multipartObjectInfo
|
||||
listMultipartObjectMapMutex *sync.Mutex
|
||||
}
|
||||
|
||||
// newFS instantiate a new filesystem.
|
||||
func newFS(diskPath string) (ObjectAPI, *probe.Error) {
|
||||
fs := &Filesystem{
|
||||
rwLock: &sync.RWMutex{},
|
||||
// isDirEmpty - returns whether given directory is empty or not.
|
||||
func isDirEmpty(dirname string) (status bool, err error) {
|
||||
f, err := os.Open(dirname)
|
||||
if err == nil {
|
||||
defer f.Close()
|
||||
if _, err = f.Readdirnames(1); err == io.EOF {
|
||||
status = true
|
||||
err = nil
|
||||
}
|
||||
fs.diskPath = diskPath
|
||||
}
|
||||
return status, err
|
||||
}
|
||||
|
||||
/// Defaults
|
||||
// Minium free disk required for i/o operations to succeed.
|
||||
fs.minFreeDisk = 5
|
||||
// isDirExist - returns whether given directory exists or not.
|
||||
func isDirExist(dirname string) (bool, error) {
|
||||
fi, e := os.Stat(dirname)
|
||||
if e != nil {
|
||||
if os.IsNotExist(e) {
|
||||
return false, nil
|
||||
}
|
||||
return false, e
|
||||
}
|
||||
return fi.IsDir(), nil
|
||||
}
|
||||
|
||||
// Initialize list object map.
|
||||
fs.listObjectMap = make(map[listObjectParams][]*treeWalker)
|
||||
fs.listObjectMapMutex = &sync.Mutex{}
|
||||
|
||||
// Initialize list multipart map.
|
||||
fs.listMultipartObjectMap = make(map[listMultipartObjectParams][]<-chan multipartObjectInfo)
|
||||
fs.listMultipartObjectMapMutex = &sync.Mutex{}
|
||||
|
||||
// Return here.
|
||||
// Initialize a new storage disk.
|
||||
func newFS(diskPath string) (StorageAPI, error) {
|
||||
if diskPath == "" {
|
||||
return nil, errInvalidArgument
|
||||
}
|
||||
st, e := os.Stat(diskPath)
|
||||
if e != nil {
|
||||
return nil, e
|
||||
}
|
||||
if !st.IsDir() {
|
||||
return nil, syscall.ENOTDIR
|
||||
}
|
||||
diskInfo, e := disk.GetInfo(diskPath)
|
||||
if e != nil {
|
||||
return nil, e
|
||||
}
|
||||
fs := fsStorage{
|
||||
diskPath: diskPath,
|
||||
diskInfo: diskInfo,
|
||||
minFreeDisk: 5, // Minimum 5% disk should be free.
|
||||
listObjectMap: make(map[listParams][]*treeWalker),
|
||||
listObjectMapMutex: &sync.Mutex{},
|
||||
}
|
||||
return fs, nil
|
||||
}
|
||||
|
||||
func (fs Filesystem) checkBucketArg(bucket string) (string, error) {
|
||||
if !IsValidBucketName(bucket) {
|
||||
return "", BucketNameInvalid{Bucket: bucket}
|
||||
// checkDiskFree verifies if disk path has sufficient minium free disk space.
|
||||
func checkDiskFree(diskPath string, minFreeDisk int64) (err error) {
|
||||
di, err := disk.GetInfo(diskPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
bucket = getActualBucketname(fs.diskPath, bucket)
|
||||
if status, e := isDirExist(filepath.Join(fs.diskPath, bucket)); !status {
|
||||
if e == nil {
|
||||
return "", BucketNotFound{Bucket: bucket}
|
||||
} else if os.IsNotExist(e) {
|
||||
return "", BucketNotFound{Bucket: bucket}
|
||||
} else {
|
||||
return "", e
|
||||
}
|
||||
}
|
||||
return bucket, nil
|
||||
}
|
||||
|
||||
func checkDiskFree(diskPath string, minFreeDisk int64) error {
|
||||
di, e := disk.GetInfo(diskPath)
|
||||
if e != nil {
|
||||
return e
|
||||
}
|
||||
// Remove 5% from total space for cumulative disk space used for journalling, inodes etc.
|
||||
// Remove 5% from total space for cumulative disk
|
||||
// space used for journalling, inodes etc.
|
||||
availableDiskSpace := (float64(di.Free) / (float64(di.Total) - (0.05 * float64(di.Total)))) * 100
|
||||
if int64(availableDiskSpace) <= minFreeDisk {
|
||||
return RootPathFull{Path: diskPath}
|
||||
return errDiskPathFull
|
||||
}
|
||||
|
||||
// Success.
|
||||
return nil
|
||||
}
|
||||
|
||||
// removeDuplicateVols - remove duplicate volumes.
|
||||
func removeDuplicateVols(vols []VolInfo) []VolInfo {
|
||||
length := len(vols) - 1
|
||||
for i := 0; i < length; i++ {
|
||||
for j := i + 1; j <= length; j++ {
|
||||
if vols[i].Name == vols[j].Name {
|
||||
// Pick the latest volume, if there is a duplicate.
|
||||
if vols[i].Created.Sub(vols[j].Created) > 0 {
|
||||
vols[i] = vols[length]
|
||||
} else {
|
||||
vols[j] = vols[length]
|
||||
}
|
||||
vols = vols[0:length]
|
||||
length--
|
||||
j--
|
||||
}
|
||||
}
|
||||
}
|
||||
return vols
|
||||
}
|
||||
|
||||
// gets all the unique directories from diskPath.
|
||||
func getAllUniqueVols(dirPath string) ([]VolInfo, error) {
|
||||
volumeFn := func(dirent fsDirent) bool {
|
||||
// Return all directories.
|
||||
return dirent.IsDir()
|
||||
}
|
||||
namesOnly := false // Returned dirent names are absolute.
|
||||
dirents, err := scandir(dirPath, volumeFn, namesOnly)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var volsInfo []VolInfo
|
||||
for _, dirent := range dirents {
|
||||
fi, err := os.Stat(dirent.name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
volsInfo = append(volsInfo, VolInfo{
|
||||
Name: fi.Name(),
|
||||
// As os.Stat() doesn't carry other than ModTime(), use
|
||||
// ModTime() as CreatedTime.
|
||||
Created: fi.ModTime(),
|
||||
})
|
||||
}
|
||||
volsInfo = removeDuplicateVols(volsInfo)
|
||||
return volsInfo, nil
|
||||
}
|
||||
|
||||
// getVolumeDir - will convert incoming volume names to
|
||||
// corresponding valid volume names on the backend in a platform
|
||||
// compatible way for all operating systems. If volume is not found
|
||||
// an error is generated.
|
||||
func (s fsStorage) getVolumeDir(volume string) (string, error) {
|
||||
if !isValidVolname(volume) {
|
||||
return "", errInvalidArgument
|
||||
}
|
||||
volumeDir := filepath.Join(s.diskPath, volume)
|
||||
_, err := os.Stat(volumeDir)
|
||||
if err == nil {
|
||||
return volumeDir, nil
|
||||
}
|
||||
if os.IsNotExist(err) {
|
||||
var volsInfo []VolInfo
|
||||
volsInfo, err = getAllUniqueVols(s.diskPath)
|
||||
if err != nil {
|
||||
return volumeDir, errVolumeNotFound
|
||||
}
|
||||
for _, vol := range volsInfo {
|
||||
// Verify if lowercase version of the volume
|
||||
// is equal to the incoming volume, then use the proper name.
|
||||
if strings.ToLower(vol.Name) == volume {
|
||||
volumeDir = filepath.Join(s.diskPath, vol.Name)
|
||||
return volumeDir, nil
|
||||
}
|
||||
}
|
||||
return volumeDir, errVolumeNotFound
|
||||
} else if os.IsPermission(err) {
|
||||
return volumeDir, errVolumeAccessDenied
|
||||
}
|
||||
return volumeDir, err
|
||||
}
|
||||
|
||||
// Make a volume entry.
|
||||
func (s fsStorage) MakeVol(volume string) (err error) {
|
||||
volumeDir, err := s.getVolumeDir(volume)
|
||||
if err == nil {
|
||||
// Volume already exists, return error.
|
||||
return errVolumeExists
|
||||
}
|
||||
|
||||
// Validate if disk is free.
|
||||
if e := checkDiskFree(s.diskPath, s.minFreeDisk); e != nil {
|
||||
return e
|
||||
}
|
||||
|
||||
// If volume not found create it.
|
||||
if err == errVolumeNotFound {
|
||||
// Make a volume entry.
|
||||
return os.Mkdir(volumeDir, 0700)
|
||||
}
|
||||
|
||||
// For all other errors return here.
|
||||
return err
|
||||
}
|
||||
|
||||
// ListVols - list volumes.
|
||||
func (s fsStorage) ListVols() (volsInfo []VolInfo, err error) {
|
||||
volsInfo, err = getAllUniqueVols(s.diskPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, vol := range volsInfo {
|
||||
// Volname on case sensitive fs backends can come in as
|
||||
// capitalized, but object layer cannot consume it
|
||||
// directly. Convert it as we see fit.
|
||||
volName := strings.ToLower(vol.Name)
|
||||
volInfo := VolInfo{
|
||||
Name: volName,
|
||||
Created: vol.Created,
|
||||
}
|
||||
volsInfo = append(volsInfo, volInfo)
|
||||
}
|
||||
return volsInfo, nil
|
||||
}
|
||||
|
||||
// StatVol - get volume info.
|
||||
func (s fsStorage) StatVol(volume string) (volInfo VolInfo, err error) {
|
||||
// Verify if volume is valid and it exists.
|
||||
volumeDir, err := s.getVolumeDir(volume)
|
||||
if err != nil {
|
||||
return VolInfo{}, err
|
||||
}
|
||||
// Stat a volume entry.
|
||||
var st os.FileInfo
|
||||
st, err = os.Stat(volumeDir)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return VolInfo{}, errVolumeNotFound
|
||||
}
|
||||
return VolInfo{}, err
|
||||
}
|
||||
// As os.Stat() doesn't carry other than ModTime(), use ModTime()
|
||||
// as CreatedTime.
|
||||
createdTime := st.ModTime()
|
||||
return VolInfo{
|
||||
Name: volume,
|
||||
Created: createdTime,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// DeleteVol - delete a volume.
|
||||
func (s fsStorage) DeleteVol(volume string) error {
|
||||
// Verify if volume is valid and it exists.
|
||||
volumeDir, err := s.getVolumeDir(volume)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = os.Remove(volumeDir)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return errVolumeNotFound
|
||||
} else if strings.Contains(err.Error(), "directory is not empty") {
|
||||
// On windows the string is slightly different, handle it
|
||||
// here.
|
||||
return errVolumeNotEmpty
|
||||
} else if strings.Contains(err.Error(), "directory not empty") {
|
||||
// Hopefully for all other operating systems, this is
|
||||
// assumed to be consistent.
|
||||
return errVolumeNotEmpty
|
||||
}
|
||||
return err
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Save the goroutine reference in the map
|
||||
func (s *fsStorage) saveTreeWalk(params listParams, walker *treeWalker) {
|
||||
s.listObjectMapMutex.Lock()
|
||||
defer s.listObjectMapMutex.Unlock()
|
||||
|
||||
walkers, _ := s.listObjectMap[params]
|
||||
walkers = append(walkers, walker)
|
||||
|
||||
s.listObjectMap[params] = walkers
|
||||
}
|
||||
|
||||
// Lookup the goroutine reference from map
|
||||
func (s *fsStorage) lookupTreeWalk(params listParams) *treeWalker {
|
||||
s.listObjectMapMutex.Lock()
|
||||
defer s.listObjectMapMutex.Unlock()
|
||||
|
||||
if walkChs, ok := s.listObjectMap[params]; ok {
|
||||
for i, walkCh := range walkChs {
|
||||
if !walkCh.timedOut {
|
||||
newWalkChs := walkChs[i+1:]
|
||||
if len(newWalkChs) > 0 {
|
||||
s.listObjectMap[params] = newWalkChs
|
||||
} else {
|
||||
delete(s.listObjectMap, params)
|
||||
}
|
||||
return walkCh
|
||||
}
|
||||
}
|
||||
// As all channels are timed out, delete the map entry
|
||||
delete(s.listObjectMap, params)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetRootPath - get root path.
|
||||
func (fs Filesystem) GetRootPath() string {
|
||||
return fs.diskPath
|
||||
// List operation.
|
||||
func (s fsStorage) ListFiles(volume, prefix, marker string, recursive bool, count int) ([]FileInfo, bool, error) {
|
||||
// Verify if volume is valid and it exists.
|
||||
volumeDir, err := s.getVolumeDir(volume)
|
||||
if err != nil {
|
||||
return nil, true, err
|
||||
}
|
||||
var fileInfos []FileInfo
|
||||
|
||||
if marker != "" {
|
||||
// Verify if marker has prefix.
|
||||
if marker != "" && !strings.HasPrefix(marker, prefix) {
|
||||
return nil, true, errInvalidArgument
|
||||
}
|
||||
}
|
||||
|
||||
// Return empty response for a valid request when count is 0.
|
||||
if count == 0 {
|
||||
return nil, true, nil
|
||||
}
|
||||
|
||||
// Over flowing count - reset to fsListLimit.
|
||||
if count < 0 || count > fsListLimit {
|
||||
count = fsListLimit
|
||||
}
|
||||
|
||||
// Verify if prefix exists.
|
||||
prefixDir := filepath.Dir(filepath.FromSlash(prefix))
|
||||
prefixRootDir := filepath.Join(volumeDir, prefixDir)
|
||||
if status, err := isDirExist(prefixRootDir); !status {
|
||||
if err == nil {
|
||||
// Prefix does not exist, not an error just respond empty list response.
|
||||
return nil, true, nil
|
||||
} else if strings.Contains(err.Error(), "not a directory") {
|
||||
// Prefix exists as a file.
|
||||
return nil, true, nil
|
||||
}
|
||||
// Rest errors should be treated as failure.
|
||||
return nil, true, err
|
||||
}
|
||||
|
||||
// Maximum 1000 files returned in a single call.
|
||||
// Further calls will set right marker value to continue reading the rest of the files.
|
||||
// popTreeWalker returns nil if the call to ListFiles is done for the first time.
|
||||
// On further calls to ListFiles to retrive more files within the timeout period,
|
||||
// popTreeWalker returns the channel from which rest of the objects can be retrieved.
|
||||
walker := s.lookupTreeWalk(listParams{volume, recursive, marker, prefix})
|
||||
if walker == nil {
|
||||
walker = startTreeWalk(s.diskPath, volume, filepath.FromSlash(prefix), filepath.FromSlash(marker), recursive)
|
||||
}
|
||||
nextMarker := ""
|
||||
for i := 0; i < count; {
|
||||
walkResult, ok := <-walker.ch
|
||||
if !ok {
|
||||
// Closed channel.
|
||||
return fileInfos, true, nil
|
||||
}
|
||||
// For any walk error return right away.
|
||||
if walkResult.err != nil {
|
||||
return nil, true, walkResult.err
|
||||
}
|
||||
fileInfo := walkResult.fileInfo
|
||||
fileInfo.Name = filepath.ToSlash(fileInfo.Name)
|
||||
fileInfos = append(fileInfos, fileInfo)
|
||||
// We have listed everything return.
|
||||
if walkResult.end {
|
||||
return fileInfos, true, nil
|
||||
}
|
||||
nextMarker = fileInfo.Name
|
||||
i++
|
||||
}
|
||||
s.saveTreeWalk(listParams{volume, recursive, nextMarker, prefix}, walker)
|
||||
return fileInfos, false, nil
|
||||
}
|
||||
|
||||
// ReadFile - read a file at a given offset.
|
||||
func (s fsStorage) ReadFile(volume string, path string, offset int64) (readCloser io.ReadCloser, err error) {
|
||||
volumeDir, err := s.getVolumeDir(volume)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
filePath := filepath.Join(volumeDir, filepath.FromSlash(path))
|
||||
file, err := os.Open(filePath)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return nil, errFileNotFound
|
||||
} else if os.IsPermission(err) {
|
||||
return nil, errFileAccessDenied
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
st, err := file.Stat()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Verify if its not a regular file, since subsequent Seek is undefined.
|
||||
if !st.Mode().IsRegular() {
|
||||
return nil, errIsNotRegular
|
||||
}
|
||||
// Seek to requested offset.
|
||||
_, err = file.Seek(offset, os.SEEK_SET)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return file, nil
|
||||
}
|
||||
|
||||
// CreateFile - create a file at path.
|
||||
func (s fsStorage) CreateFile(volume, path string) (writeCloser io.WriteCloser, err error) {
|
||||
volumeDir, err := s.getVolumeDir(volume)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := checkDiskFree(s.diskPath, s.minFreeDisk); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
filePath := filepath.Join(volumeDir, path)
|
||||
// Verify if the file already exists and is not of regular type.
|
||||
if st, err := os.Stat(filePath); err == nil {
|
||||
if st.IsDir() {
|
||||
return nil, errIsNotRegular
|
||||
}
|
||||
}
|
||||
return safe.CreateFileWithPrefix(filePath, "$tmpfile")
|
||||
}
|
||||
|
||||
// StatFile - get file info.
|
||||
func (s fsStorage) StatFile(volume, path string) (file FileInfo, err error) {
|
||||
volumeDir, err := s.getVolumeDir(volume)
|
||||
if err != nil {
|
||||
return FileInfo{}, err
|
||||
}
|
||||
|
||||
filePath := filepath.Join(volumeDir, filepath.FromSlash(path))
|
||||
st, err := os.Stat(filePath)
|
||||
if err != nil {
|
||||
// File is really not found.
|
||||
if os.IsNotExist(err) {
|
||||
return FileInfo{}, errFileNotFound
|
||||
}
|
||||
// File path cannot be verified since one of the parents is a file.
|
||||
if strings.Contains(err.Error(), "not a directory") {
|
||||
return FileInfo{}, errIsNotRegular
|
||||
}
|
||||
// Return all errors here.
|
||||
return FileInfo{}, err
|
||||
}
|
||||
// If its a directory its not a regular file.
|
||||
if st.Mode().IsDir() {
|
||||
return FileInfo{}, errIsNotRegular
|
||||
}
|
||||
file = FileInfo{
|
||||
Volume: volume,
|
||||
Name: path,
|
||||
ModTime: st.ModTime(),
|
||||
Size: st.Size(),
|
||||
Mode: st.Mode(),
|
||||
}
|
||||
return file, nil
|
||||
}
|
||||
|
||||
// deleteFile - delete file path if its empty.
|
||||
func deleteFile(basePath, deletePath string) error {
|
||||
if basePath == deletePath {
|
||||
return nil
|
||||
}
|
||||
// Verify if the path exists.
|
||||
pathSt, err := os.Stat(deletePath)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return errFileNotFound
|
||||
} else if os.IsPermission(err) {
|
||||
return errFileAccessDenied
|
||||
}
|
||||
return err
|
||||
}
|
||||
if pathSt.IsDir() {
|
||||
// Verify if directory is empty.
|
||||
empty, err := isDirEmpty(deletePath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !empty {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
// Attempt to remove path.
|
||||
if err := os.Remove(deletePath); err != nil {
|
||||
return err
|
||||
}
|
||||
// Recursively go down the next path and delete again.
|
||||
if err := deleteFile(basePath, filepath.Dir(deletePath)); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeleteFile - delete a file at path.
|
||||
func (s fsStorage) DeleteFile(volume, path string) error {
|
||||
volumeDir, err := s.getVolumeDir(volume)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Following code is needed so that we retain "/" suffix if any in
|
||||
// path argument.
|
||||
filePath := filepath.Join(volumeDir, filepath.FromSlash(path))
|
||||
if strings.HasSuffix(filepath.FromSlash(path), string(os.PathSeparator)) {
|
||||
filePath = filePath + string(os.PathSeparator)
|
||||
}
|
||||
|
||||
// Delete file and delete parent directory as well if its empty.
|
||||
return deleteFile(volumeDir, filePath)
|
||||
}
|
||||
|
44
fs_test.go
44
fs_test.go
@ -1,44 +0,0 @@
|
||||
/*
|
||||
* Minio Cloud Storage, (C) 2015 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
|
||||
. "gopkg.in/check.v1"
|
||||
)
|
||||
|
||||
func (s *MyAPISuite) TestAPISuite(c *C) {
|
||||
var storageList []string
|
||||
create := func() ObjectAPI {
|
||||
path, e := ioutil.TempDir(os.TempDir(), "minio-")
|
||||
c.Check(e, IsNil)
|
||||
storageList = append(storageList, path)
|
||||
store, err := newFS(path)
|
||||
c.Check(err, IsNil)
|
||||
return store
|
||||
}
|
||||
APITestSuite(c, create)
|
||||
defer removeRoots(c, storageList)
|
||||
}
|
||||
|
||||
func removeRoots(c *C, roots []string) {
|
||||
for _, root := range roots {
|
||||
os.RemoveAll(root)
|
||||
}
|
||||
}
|
@ -58,10 +58,10 @@ func (h redirectHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
// Re-direction handled specifically for browsers.
|
||||
if strings.Contains(r.Header.Get("User-Agent"), "Mozilla") {
|
||||
// '/' is redirected to 'locationPrefix/'
|
||||
// '/rpc' is redirected to 'locationPrefix/rpc'
|
||||
// '/webrpc' is redirected to 'locationPrefix/webrpc'
|
||||
// '/login' is redirected to 'locationPrefix/login'
|
||||
switch r.URL.Path {
|
||||
case "/", "/rpc", "/login", "/favicon.ico":
|
||||
case "/", "/webrpc", "/login", "/favicon.ico":
|
||||
location := h.locationPrefix + r.URL.Path
|
||||
// Redirect to new location.
|
||||
http.Redirect(w, r, location, http.StatusTemporaryRedirect)
|
||||
|
243
network-fs.go
Normal file
243
network-fs.go
Normal file
@ -0,0 +1,243 @@
|
||||
/*
|
||||
* Minio Cloud Storage, (C) 2016 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/rpc"
|
||||
"net/url"
|
||||
urlpath "path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
type networkFS struct {
|
||||
netScheme string
|
||||
netAddr string
|
||||
netPath string
|
||||
rpcClient *rpc.Client
|
||||
httpClient *http.Client
|
||||
}
|
||||
|
||||
const (
|
||||
storageRPCPath = reservedBucket + "/rpc/storage"
|
||||
)
|
||||
|
||||
// splits network path into its components Address and Path.
|
||||
func splitNetPath(networkPath string) (netAddr, netPath string) {
|
||||
index := strings.LastIndex(networkPath, ":")
|
||||
netAddr = networkPath[:index]
|
||||
netPath = networkPath[index+1:]
|
||||
return netAddr, netPath
|
||||
}
|
||||
|
||||
// Converts rpc.ServerError to underlying error. This function is
|
||||
// written so that the storageAPI errors are consistent across network
|
||||
// disks as well.
|
||||
func toStorageErr(err error) error {
|
||||
switch err.Error() {
|
||||
case errVolumeNotFound.Error():
|
||||
return errVolumeNotFound
|
||||
case errVolumeExists.Error():
|
||||
return errVolumeExists
|
||||
case errFileNotFound.Error():
|
||||
return errFileNotFound
|
||||
case errIsNotRegular.Error():
|
||||
return errIsNotRegular
|
||||
case errVolumeNotEmpty.Error():
|
||||
return errVolumeNotEmpty
|
||||
case errFileAccessDenied.Error():
|
||||
return errFileAccessDenied
|
||||
case errVolumeAccessDenied.Error():
|
||||
return errVolumeAccessDenied
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Initialize new network file system.
|
||||
func newNetworkFS(networkPath string) (StorageAPI, error) {
|
||||
// Input validation.
|
||||
if networkPath == "" && strings.LastIndex(networkPath, ":") != -1 {
|
||||
return nil, errInvalidArgument
|
||||
}
|
||||
|
||||
// TODO validate netAddr and netPath.
|
||||
netAddr, netPath := splitNetPath(networkPath)
|
||||
|
||||
// Dial minio rpc storage http path.
|
||||
rpcClient, err := rpc.DialHTTPPath("tcp", netAddr, storageRPCPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Initialize http client.
|
||||
httpClient := &http.Client{
|
||||
// Setting a sensible time out of 6minutes to wait for
|
||||
// response headers. Request is pro-actively cancelled
|
||||
// after 6minutes if no response was received from server.
|
||||
Timeout: 6 * time.Minute,
|
||||
Transport: http.DefaultTransport,
|
||||
}
|
||||
|
||||
// Initialize network storage.
|
||||
ndisk := &networkFS{
|
||||
netScheme: "http", // TODO: fix for ssl rpc support.
|
||||
netAddr: netAddr,
|
||||
netPath: netPath,
|
||||
rpcClient: rpcClient,
|
||||
httpClient: httpClient,
|
||||
}
|
||||
|
||||
// Returns successfully here.
|
||||
return ndisk, nil
|
||||
}
|
||||
|
||||
// MakeVol - make a volume.
|
||||
func (n networkFS) MakeVol(volume string) error {
|
||||
reply := GenericReply{}
|
||||
if err := n.rpcClient.Call("Storage.MakeVolHandler", volume, &reply); err != nil {
|
||||
return toStorageErr(err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ListVols - List all volumes.
|
||||
func (n networkFS) ListVols() (vols []VolInfo, err error) {
|
||||
ListVols := ListVolsReply{}
|
||||
err = n.rpcClient.Call("Storage.ListVolsHandler", "", &ListVols)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ListVols.Vols, nil
|
||||
}
|
||||
|
||||
// StatVol - get current Stat volume info.
|
||||
func (n networkFS) StatVol(volume string) (volInfo VolInfo, err error) {
|
||||
if err = n.rpcClient.Call("Storage.StatVolHandler", volume, &volInfo); err != nil {
|
||||
return VolInfo{}, toStorageErr(err)
|
||||
}
|
||||
return volInfo, nil
|
||||
}
|
||||
|
||||
// DeleteVol - Delete a volume.
|
||||
func (n networkFS) DeleteVol(volume string) error {
|
||||
reply := GenericReply{}
|
||||
if err := n.rpcClient.Call("Storage.DeleteVolHandler", volume, &reply); err != nil {
|
||||
return toStorageErr(err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// File operations.
|
||||
|
||||
// CreateFile - create file.
|
||||
func (n networkFS) CreateFile(volume, path string) (writeCloser io.WriteCloser, err error) {
|
||||
writeURL := new(url.URL)
|
||||
writeURL.Scheme = n.netScheme
|
||||
writeURL.Host = n.netAddr
|
||||
writeURL.Path = fmt.Sprintf("%s/upload/%s", storageRPCPath, urlpath.Join(volume, path))
|
||||
|
||||
contentType := "application/octet-stream"
|
||||
readCloser, writeCloser := io.Pipe()
|
||||
go func() {
|
||||
resp, err := n.httpClient.Post(writeURL.String(), contentType, readCloser)
|
||||
if err != nil {
|
||||
readCloser.CloseWithError(err)
|
||||
return
|
||||
}
|
||||
if resp != nil {
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
if resp.StatusCode == http.StatusNotFound {
|
||||
readCloser.CloseWithError(errFileNotFound)
|
||||
return
|
||||
}
|
||||
readCloser.CloseWithError(errors.New("Invalid response."))
|
||||
return
|
||||
}
|
||||
// Close the reader.
|
||||
readCloser.Close()
|
||||
}
|
||||
}()
|
||||
return writeCloser, nil
|
||||
}
|
||||
|
||||
// StatFile - get latest Stat information for a file at path.
|
||||
func (n networkFS) StatFile(volume, path string) (fileInfo FileInfo, err error) {
|
||||
if err = n.rpcClient.Call("Storage.StatFileHandler", StatFileArgs{
|
||||
Vol: volume,
|
||||
Path: path,
|
||||
}, &fileInfo); err != nil {
|
||||
return FileInfo{}, toStorageErr(err)
|
||||
}
|
||||
return fileInfo, nil
|
||||
}
|
||||
|
||||
// ReadFile - reads a file.
|
||||
func (n networkFS) ReadFile(volume string, path string, offset int64) (reader io.ReadCloser, err error) {
|
||||
readURL := new(url.URL)
|
||||
readURL.Scheme = n.netScheme
|
||||
readURL.Host = n.netAddr
|
||||
readURL.Path = fmt.Sprintf("%s/download/%s", storageRPCPath, urlpath.Join(volume, path))
|
||||
readQuery := make(url.Values)
|
||||
readQuery.Set("offset", strconv.FormatInt(offset, 10))
|
||||
readURL.RawQuery = readQuery.Encode()
|
||||
resp, err := n.httpClient.Get(readURL.String())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if resp != nil {
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
if resp.StatusCode == http.StatusNotFound {
|
||||
return nil, errFileNotFound
|
||||
}
|
||||
return nil, errors.New("Invalid response")
|
||||
}
|
||||
}
|
||||
return resp.Body, nil
|
||||
}
|
||||
|
||||
// ListFiles - List all files in a volume.
|
||||
func (n networkFS) ListFiles(volume, prefix, marker string, recursive bool, count int) (files []FileInfo, eof bool, err error) {
|
||||
listFilesReply := ListFilesReply{}
|
||||
if err = n.rpcClient.Call("Storage.ListFilesHandler", ListFilesArgs{
|
||||
Vol: volume,
|
||||
Prefix: prefix,
|
||||
Marker: marker,
|
||||
Recursive: recursive,
|
||||
Count: count,
|
||||
}, &listFilesReply); err != nil {
|
||||
return nil, true, toStorageErr(err)
|
||||
}
|
||||
// Return successfully unmarshalled results.
|
||||
return listFilesReply.Files, listFilesReply.EOF, nil
|
||||
}
|
||||
|
||||
// DeleteFile - Delete a file at path.
|
||||
func (n networkFS) DeleteFile(volume, path string) (err error) {
|
||||
reply := GenericReply{}
|
||||
if err = n.rpcClient.Call("Storage.DeleteFileHandler", DeleteFileArgs{
|
||||
Vol: volume,
|
||||
Path: path,
|
||||
}, &reply); err != nil {
|
||||
return toStorageErr(err)
|
||||
}
|
||||
return nil
|
||||
}
|
@ -1,33 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"io"
|
||||
|
||||
"github.com/minio/minio/pkg/probe"
|
||||
)
|
||||
|
||||
// ObjectAPI interface.
|
||||
type ObjectAPI interface {
|
||||
// Bucket resource API.
|
||||
DeleteBucket(bucket string) *probe.Error
|
||||
ListBuckets() ([]BucketInfo, *probe.Error)
|
||||
MakeBucket(bucket string) *probe.Error
|
||||
GetBucketInfo(bucket string) (BucketInfo, *probe.Error)
|
||||
|
||||
// Bucket query API.
|
||||
ListObjects(bucket, prefix, marker, delimiter string, maxKeys int) (ListObjectsInfo, *probe.Error)
|
||||
ListMultipartUploads(bucket, objectPrefix, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (ListMultipartsInfo, *probe.Error)
|
||||
|
||||
// Object resource API.
|
||||
GetObject(bucket, object string, startOffset int64) (io.ReadCloser, *probe.Error)
|
||||
GetObjectInfo(bucket, object string) (ObjectInfo, *probe.Error)
|
||||
PutObject(bucket string, object string, size int64, data io.Reader, metadata map[string]string) (ObjectInfo, *probe.Error)
|
||||
DeleteObject(bucket, object string) *probe.Error
|
||||
|
||||
// Object query API.
|
||||
NewMultipartUpload(bucket, object string) (string, *probe.Error)
|
||||
PutObjectPart(bucket, object, uploadID string, partID int, size int64, data io.Reader, md5Hex string) (string, *probe.Error)
|
||||
ListObjectParts(bucket, object, uploadID string, partNumberMarker, maxParts int) (ListPartsInfo, *probe.Error)
|
||||
CompleteMultipartUpload(bucket string, object string, uploadID string, parts []completePart) (ObjectInfo, *probe.Error)
|
||||
AbortMultipartUpload(bucket, object, uploadID string) *probe.Error
|
||||
}
|
@ -24,29 +24,34 @@ import (
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/minio/minio/pkg/probe"
|
||||
)
|
||||
|
||||
func TestListObjects(t *testing.T) {
|
||||
// Make a temporary directory to use as the fs.
|
||||
// Make a temporary directory to use as the obj.
|
||||
directory, e := ioutil.TempDir("", "minio-list-object-test")
|
||||
if e != nil {
|
||||
t.Fatal(e)
|
||||
}
|
||||
defer os.RemoveAll(directory)
|
||||
|
||||
// Create the fs.
|
||||
fs, err := newFS(directory)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
// Create the obj.
|
||||
fs, e := newFS(directory)
|
||||
if e != nil {
|
||||
t.Fatal(e)
|
||||
}
|
||||
|
||||
obj := newObjectLayer(fs)
|
||||
var err *probe.Error
|
||||
// This bucket is used for testing ListObject operations.
|
||||
err = fs.MakeBucket("test-bucket-list-object")
|
||||
err = obj.MakeBucket("test-bucket-list-object")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// Will not store any objects in this bucket,
|
||||
// Its to test ListObjects on an empty bucket.
|
||||
err = fs.MakeBucket("empty-bucket")
|
||||
err = obj.MakeBucket("empty-bucket")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -57,36 +62,36 @@ func TestListObjects(t *testing.T) {
|
||||
}
|
||||
defer os.Remove(tmpfile.Name()) // clean up
|
||||
|
||||
_, err = fs.PutObject("test-bucket-list-object", "Asia-maps", int64(len("asia-maps")), bytes.NewBufferString("asia-maps"), nil)
|
||||
_, err = obj.PutObject("test-bucket-list-object", "Asia-maps", int64(len("asia-maps")), bytes.NewBufferString("asia-maps"), nil)
|
||||
if err != nil {
|
||||
t.Fatal(e)
|
||||
}
|
||||
|
||||
_, err = fs.PutObject("test-bucket-list-object", "Asia/India/India-summer-photos-1", int64(len("contentstring")), bytes.NewBufferString("contentstring"), nil)
|
||||
_, err = obj.PutObject("test-bucket-list-object", "Asia/India/India-summer-photos-1", int64(len("contentstring")), bytes.NewBufferString("contentstring"), nil)
|
||||
if err != nil {
|
||||
t.Fatal(e)
|
||||
}
|
||||
|
||||
_, err = fs.PutObject("test-bucket-list-object", "Asia/India/Karnataka/Bangalore/Koramangala/pics", int64(len("contentstring")), bytes.NewBufferString("contentstring"), nil)
|
||||
_, err = obj.PutObject("test-bucket-list-object", "Asia/India/Karnataka/Bangalore/Koramangala/pics", int64(len("contentstring")), bytes.NewBufferString("contentstring"), nil)
|
||||
if err != nil {
|
||||
t.Fatal(e)
|
||||
}
|
||||
|
||||
for i := 0; i < 2; i++ {
|
||||
key := "newPrefix" + strconv.Itoa(i)
|
||||
_, err = fs.PutObject("test-bucket-list-object", key, int64(len(key)), bytes.NewBufferString(key), nil)
|
||||
_, err = obj.PutObject("test-bucket-list-object", key, int64(len(key)), bytes.NewBufferString(key), nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
_, err = fs.PutObject("test-bucket-list-object", "newzen/zen/recurse/again/again/again/pics", int64(len("recurse")), bytes.NewBufferString("recurse"), nil)
|
||||
_, err = obj.PutObject("test-bucket-list-object", "newzen/zen/recurse/again/again/again/pics", int64(len("recurse")), bytes.NewBufferString("recurse"), nil)
|
||||
if err != nil {
|
||||
t.Fatal(e)
|
||||
}
|
||||
|
||||
for i := 0; i < 3; i++ {
|
||||
key := "obj" + strconv.Itoa(i)
|
||||
_, err = fs.PutObject("test-bucket-list-object", key, int64(len(key)), bytes.NewBufferString(key), nil)
|
||||
_, err = obj.PutObject("test-bucket-list-object", key, int64(len(key)), bytes.NewBufferString(key), nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -529,7 +534,7 @@ func TestListObjects(t *testing.T) {
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
result, err := fs.ListObjects(testCase.bucketName, testCase.prefix, testCase.marker, testCase.delimeter, testCase.maxKeys)
|
||||
result, err := obj.ListObjects(testCase.bucketName, testCase.prefix, testCase.marker, testCase.delimeter, testCase.maxKeys)
|
||||
if err != nil && testCase.shouldPass {
|
||||
t.Errorf("Test %d: Expected to pass, but failed with: <ERROR> %s", i+1, err.Cause.Error())
|
||||
}
|
||||
@ -565,28 +570,31 @@ func TestListObjects(t *testing.T) {
|
||||
}
|
||||
|
||||
func BenchmarkListObjects(b *testing.B) {
|
||||
// Make a temporary directory to use as the fs.
|
||||
// Make a temporary directory to use as the obj.
|
||||
directory, e := ioutil.TempDir("", "minio-list-benchmark")
|
||||
if e != nil {
|
||||
b.Fatal(e)
|
||||
}
|
||||
defer os.RemoveAll(directory)
|
||||
|
||||
// Create the fs.
|
||||
fs, err := newFS(directory)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
// Create the obj.
|
||||
fs, e := newFS(directory)
|
||||
if e != nil {
|
||||
b.Fatal(e)
|
||||
}
|
||||
|
||||
obj := newObjectLayer(fs)
|
||||
var err *probe.Error
|
||||
|
||||
// Create a bucket.
|
||||
err = fs.MakeBucket("ls-benchmark-bucket")
|
||||
err = obj.MakeBucket("ls-benchmark-bucket")
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
for i := 0; i < 20000; i++ {
|
||||
key := "obj" + strconv.Itoa(i)
|
||||
_, err = fs.PutObject("ls-benchmark-bucket", key, int64(len(key)), bytes.NewBufferString(key), nil)
|
||||
_, err = obj.PutObject("ls-benchmark-bucket", key, int64(len(key)), bytes.NewBufferString(key), nil)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
@ -596,7 +604,7 @@ func BenchmarkListObjects(b *testing.B) {
|
||||
|
||||
// List the buckets over and over and over.
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, err = fs.ListObjects("ls-benchmark-bucket", "", "obj9000", "", -1)
|
||||
_, err = obj.ListObjects("ls-benchmark-bucket", "", "obj9000", "", -1)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
559
object-api-multipart.go
Normal file
559
object-api-multipart.go
Normal file
@ -0,0 +1,559 @@
|
||||
/*
|
||||
* Minio Cloud Storage, (C) 2016 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"crypto/md5"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"io"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/minio/minio/pkg/probe"
|
||||
"github.com/skyrings/skyring-common/tools/uuid"
|
||||
)
|
||||
|
||||
const (
|
||||
minioMetaVolume = ".minio"
|
||||
)
|
||||
|
||||
// checkLeafDirectory - verifies if a given path is leaf directory if
|
||||
// yes returns all the files inside it.
|
||||
func (o objectAPI) checkLeafDirectory(prefixPath string) (isLeaf bool, fis []FileInfo) {
|
||||
var allFileInfos []FileInfo
|
||||
for {
|
||||
fileInfos, eof, e := o.storage.ListFiles(minioMetaVolume, prefixPath, "", false, 1000)
|
||||
if e != nil {
|
||||
break
|
||||
}
|
||||
allFileInfos = append(allFileInfos, fileInfos...)
|
||||
if eof {
|
||||
break
|
||||
}
|
||||
}
|
||||
for _, fileInfo := range allFileInfos {
|
||||
if fileInfo.Mode.IsDir() {
|
||||
isLeaf = false
|
||||
return isLeaf, nil
|
||||
}
|
||||
fileName := path.Base(fileInfo.Name)
|
||||
if !strings.Contains(fileName, ".") {
|
||||
fis = append(fis, fileInfo)
|
||||
}
|
||||
}
|
||||
isLeaf = true
|
||||
return isLeaf, fis
|
||||
}
|
||||
|
||||
// ListMultipartUploads - list multipart uploads.
|
||||
func (o objectAPI) ListMultipartUploads(bucket, prefix, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (ListMultipartsInfo, *probe.Error) {
|
||||
result := ListMultipartsInfo{}
|
||||
// Verify if bucket is valid.
|
||||
if !IsValidBucketName(bucket) {
|
||||
return ListMultipartsInfo{}, probe.NewError(BucketNameInvalid{Bucket: bucket})
|
||||
}
|
||||
if !IsValidObjectPrefix(prefix) {
|
||||
return ListMultipartsInfo{}, probe.NewError(ObjectNameInvalid{Bucket: bucket, Object: prefix})
|
||||
}
|
||||
if _, e := o.storage.StatVol(minioMetaVolume); e != nil {
|
||||
if e == errVolumeNotFound {
|
||||
e = o.storage.MakeVol(minioMetaVolume)
|
||||
if e != nil {
|
||||
return ListMultipartsInfo{}, probe.NewError(e)
|
||||
}
|
||||
}
|
||||
}
|
||||
// Verify if delimiter is anything other than '/', which we do not support.
|
||||
if delimiter != "" && delimiter != slashPathSeparator {
|
||||
return ListMultipartsInfo{}, probe.NewError(UnsupportedDelimiter{
|
||||
Delimiter: delimiter,
|
||||
})
|
||||
}
|
||||
// Verify if marker has prefix.
|
||||
if keyMarker != "" && !strings.HasPrefix(keyMarker, prefix) {
|
||||
return ListMultipartsInfo{}, probe.NewError(InvalidMarkerPrefixCombination{
|
||||
Marker: keyMarker,
|
||||
Prefix: prefix,
|
||||
})
|
||||
}
|
||||
if uploadIDMarker != "" {
|
||||
if strings.HasSuffix(keyMarker, slashPathSeparator) {
|
||||
return result, probe.NewError(InvalidUploadIDKeyCombination{
|
||||
UploadIDMarker: uploadIDMarker,
|
||||
KeyMarker: keyMarker,
|
||||
})
|
||||
}
|
||||
id, e := uuid.Parse(uploadIDMarker)
|
||||
if e != nil {
|
||||
return result, probe.NewError(e)
|
||||
}
|
||||
if id.IsZero() {
|
||||
return result, probe.NewError(MalformedUploadID{
|
||||
UploadID: uploadIDMarker,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
recursive := true
|
||||
if delimiter == slashPathSeparator {
|
||||
recursive = false
|
||||
}
|
||||
result.IsTruncated = true
|
||||
result.MaxUploads = maxUploads
|
||||
newMaxUploads := 0
|
||||
// not using path.Join() as it strips off the trailing '/'.
|
||||
// Also bucket should always be followed by '/' even if prefix is empty.
|
||||
prefixPath := pathJoin(bucket, prefix)
|
||||
if recursive {
|
||||
keyMarkerPath := ""
|
||||
if keyMarker != "" {
|
||||
keyMarkerPath = path.Join(bucket, keyMarker, uploadIDMarker)
|
||||
}
|
||||
outerLoop:
|
||||
for {
|
||||
fileInfos, eof, e := o.storage.ListFiles(minioMetaVolume, prefixPath, keyMarkerPath, recursive, maxUploads-newMaxUploads)
|
||||
if e != nil {
|
||||
return ListMultipartsInfo{}, probe.NewError(e)
|
||||
}
|
||||
for _, fi := range fileInfos {
|
||||
keyMarkerPath = fi.Name
|
||||
// fi.Name will look like bucket/object/uploadID, extract object and uploadID.
|
||||
uploadID := path.Base(fi.Name)
|
||||
objectName := strings.TrimPrefix(path.Dir(fi.Name), bucket+slashPathSeparator)
|
||||
if strings.Contains(uploadID, ".") {
|
||||
// contains partnumber and md5sum info, skip this.
|
||||
continue
|
||||
}
|
||||
result.Uploads = append(result.Uploads, uploadMetadata{
|
||||
Object: objectName,
|
||||
UploadID: uploadID,
|
||||
Initiated: fi.ModTime,
|
||||
})
|
||||
result.NextKeyMarker = objectName
|
||||
result.NextUploadIDMarker = uploadID
|
||||
newMaxUploads++
|
||||
if newMaxUploads == maxUploads {
|
||||
if eof {
|
||||
result.IsTruncated = false
|
||||
}
|
||||
break outerLoop
|
||||
}
|
||||
}
|
||||
if eof {
|
||||
result.IsTruncated = false
|
||||
break
|
||||
}
|
||||
}
|
||||
if !result.IsTruncated {
|
||||
result.NextKeyMarker = ""
|
||||
result.NextUploadIDMarker = ""
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
var fileInfos []FileInfo
|
||||
// read all the "fileInfos" in the prefix
|
||||
for {
|
||||
marker := ""
|
||||
fis, eof, e := o.storage.ListFiles(minioMetaVolume, prefixPath, marker, recursive, 1000)
|
||||
if e != nil {
|
||||
return ListMultipartsInfo{}, probe.NewError(e)
|
||||
}
|
||||
for _, fi := range fis {
|
||||
marker = fi.Name
|
||||
if fi.Mode.IsDir() {
|
||||
fileInfos = append(fileInfos, fi)
|
||||
}
|
||||
}
|
||||
if eof {
|
||||
break
|
||||
}
|
||||
}
|
||||
// Create "uploads" slice from "fileInfos" slice.
|
||||
var uploads []uploadMetadata
|
||||
for _, fi := range fileInfos {
|
||||
leaf, entries := o.checkLeafDirectory(fi.Name)
|
||||
objectName := strings.TrimPrefix(fi.Name, bucket+slashPathSeparator)
|
||||
if leaf {
|
||||
for _, entry := range entries {
|
||||
if strings.Contains(entry.Name, ".") {
|
||||
continue
|
||||
}
|
||||
uploads = append(uploads, uploadMetadata{
|
||||
Object: strings.TrimSuffix(objectName, slashPathSeparator),
|
||||
UploadID: path.Base(entry.Name),
|
||||
Initiated: entry.ModTime,
|
||||
})
|
||||
}
|
||||
continue
|
||||
}
|
||||
uploads = append(uploads, uploadMetadata{
|
||||
Object: objectName,
|
||||
})
|
||||
}
|
||||
index := 0
|
||||
for i, upload := range uploads {
|
||||
index = i
|
||||
if upload.Object > keyMarker {
|
||||
break
|
||||
}
|
||||
if uploads[index].Object == keyMarker && upload.UploadID > uploadIDMarker {
|
||||
break
|
||||
}
|
||||
}
|
||||
for ; index < len(uploads); index++ {
|
||||
if (len(result.Uploads) + len(result.CommonPrefixes)) == maxUploads {
|
||||
break
|
||||
}
|
||||
result.NextKeyMarker = uploads[index].Object
|
||||
if strings.HasSuffix(uploads[index].Object, slashPathSeparator) {
|
||||
// for a directory entry
|
||||
result.CommonPrefixes = append(result.CommonPrefixes, uploads[index].Object)
|
||||
continue
|
||||
}
|
||||
result.NextUploadIDMarker = uploads[index].UploadID
|
||||
result.Uploads = append(result.Uploads, uploads[index])
|
||||
}
|
||||
if index == len(uploads) {
|
||||
result.IsTruncated = false
|
||||
result.NextKeyMarker = ""
|
||||
result.NextUploadIDMarker = ""
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (o objectAPI) NewMultipartUpload(bucket, object string) (string, *probe.Error) {
|
||||
// Verify if bucket is valid.
|
||||
if !IsValidBucketName(bucket) {
|
||||
return "", probe.NewError(BucketNameInvalid{Bucket: bucket})
|
||||
}
|
||||
if !IsValidObjectName(object) {
|
||||
return "", probe.NewError(ObjectNameInvalid{Bucket: bucket, Object: object})
|
||||
}
|
||||
if _, e := o.storage.StatVol(minioMetaVolume); e != nil {
|
||||
if e == errVolumeNotFound {
|
||||
e = o.storage.MakeVol(minioMetaVolume)
|
||||
if e != nil {
|
||||
return "", probe.NewError(e)
|
||||
}
|
||||
}
|
||||
}
|
||||
for {
|
||||
uuid, e := uuid.New()
|
||||
if e != nil {
|
||||
return "", probe.NewError(e)
|
||||
}
|
||||
uploadID := uuid.String()
|
||||
uploadIDPath := path.Join(bucket, object, uploadID)
|
||||
if _, e = o.storage.StatFile(minioMetaVolume, uploadIDPath); e != nil {
|
||||
if e != errFileNotFound {
|
||||
return "", probe.NewError(e)
|
||||
}
|
||||
// uploadIDPath doesn't exist, so create empty file to reserve the name
|
||||
var w io.WriteCloser
|
||||
if w, e = o.storage.CreateFile(minioMetaVolume, uploadIDPath); e == nil {
|
||||
if e = w.Close(); e != nil {
|
||||
return "", probe.NewError(e)
|
||||
}
|
||||
} else {
|
||||
return "", probe.NewError(e)
|
||||
}
|
||||
return uploadID, nil
|
||||
}
|
||||
// uploadIDPath already exists.
|
||||
// loop again to try with different uuid generated.
|
||||
}
|
||||
}
|
||||
|
||||
// isUploadIDExists - verify if a given uploadID exists and is valid.
|
||||
func (o objectAPI) isUploadIDExists(bucket, object, uploadID string) (bool, error) {
|
||||
uploadIDPath := path.Join(bucket, object, uploadID)
|
||||
st, e := o.storage.StatFile(minioMetaVolume, uploadIDPath)
|
||||
if e != nil {
|
||||
// Upload id does not exist.
|
||||
if e == errFileNotFound {
|
||||
return false, nil
|
||||
}
|
||||
return false, e
|
||||
}
|
||||
// Upload id exists and is a regular file.
|
||||
return st.Mode.IsRegular(), nil
|
||||
}
|
||||
|
||||
func (o objectAPI) PutObjectPart(bucket, object, uploadID string, partID int, size int64, data io.Reader, md5Hex string) (string, *probe.Error) {
|
||||
// Verify if bucket is valid.
|
||||
if !IsValidBucketName(bucket) {
|
||||
return "", probe.NewError(BucketNameInvalid{Bucket: bucket})
|
||||
}
|
||||
if !IsValidObjectName(object) {
|
||||
return "", probe.NewError(ObjectNameInvalid{Bucket: bucket, Object: object})
|
||||
}
|
||||
if status, e := o.isUploadIDExists(bucket, object, uploadID); e != nil {
|
||||
return "", probe.NewError(e)
|
||||
} else if !status {
|
||||
return "", probe.NewError(InvalidUploadID{UploadID: uploadID})
|
||||
}
|
||||
|
||||
partSuffix := fmt.Sprintf("%s.%d.%s", uploadID, partID, md5Hex)
|
||||
fileWriter, e := o.storage.CreateFile(minioMetaVolume, path.Join(bucket, object, partSuffix))
|
||||
if e != nil {
|
||||
if e == errVolumeNotFound {
|
||||
return "", probe.NewError(BucketNotFound{
|
||||
Bucket: bucket,
|
||||
})
|
||||
} else if e == errIsNotRegular {
|
||||
return "", probe.NewError(ObjectExistsAsPrefix{
|
||||
Bucket: bucket,
|
||||
Prefix: object,
|
||||
})
|
||||
}
|
||||
return "", probe.NewError(e)
|
||||
}
|
||||
|
||||
// Initialize md5 writer.
|
||||
md5Writer := md5.New()
|
||||
|
||||
// Instantiate a new multi writer.
|
||||
multiWriter := io.MultiWriter(md5Writer, fileWriter)
|
||||
|
||||
// Instantiate checksum hashers and create a multiwriter.
|
||||
if size > 0 {
|
||||
if _, e = io.CopyN(multiWriter, data, size); e != nil {
|
||||
safeCloseAndRemove(fileWriter)
|
||||
return "", probe.NewError(e)
|
||||
}
|
||||
} else {
|
||||
if _, e = io.Copy(multiWriter, data); e != nil {
|
||||
safeCloseAndRemove(fileWriter)
|
||||
return "", probe.NewError(e)
|
||||
}
|
||||
}
|
||||
|
||||
newMD5Hex := hex.EncodeToString(md5Writer.Sum(nil))
|
||||
if md5Hex != "" {
|
||||
if newMD5Hex != md5Hex {
|
||||
safeCloseAndRemove(fileWriter)
|
||||
return "", probe.NewError(BadDigest{md5Hex, newMD5Hex})
|
||||
}
|
||||
}
|
||||
e = fileWriter.Close()
|
||||
if e != nil {
|
||||
return "", probe.NewError(e)
|
||||
}
|
||||
return newMD5Hex, nil
|
||||
}
|
||||
|
||||
func (o objectAPI) ListObjectParts(bucket, object, uploadID string, partNumberMarker, maxParts int) (ListPartsInfo, *probe.Error) {
|
||||
// Verify if bucket is valid.
|
||||
if !IsValidBucketName(bucket) {
|
||||
return ListPartsInfo{}, probe.NewError(BucketNameInvalid{Bucket: bucket})
|
||||
}
|
||||
if !IsValidObjectName(object) {
|
||||
return ListPartsInfo{}, probe.NewError(ObjectNameInvalid{Bucket: bucket, Object: object})
|
||||
}
|
||||
// Create minio meta volume, if it doesn't exist yet.
|
||||
if _, e := o.storage.StatVol(minioMetaVolume); e != nil {
|
||||
if e == errVolumeNotFound {
|
||||
e = o.storage.MakeVol(minioMetaVolume)
|
||||
if e != nil {
|
||||
return ListPartsInfo{}, probe.NewError(e)
|
||||
}
|
||||
}
|
||||
}
|
||||
if status, e := o.isUploadIDExists(bucket, object, uploadID); e != nil {
|
||||
return ListPartsInfo{}, probe.NewError(e)
|
||||
} else if !status {
|
||||
return ListPartsInfo{}, probe.NewError(InvalidUploadID{UploadID: uploadID})
|
||||
}
|
||||
result := ListPartsInfo{}
|
||||
marker := ""
|
||||
nextPartNumberMarker := 0
|
||||
uploadIDPath := path.Join(bucket, object, uploadID)
|
||||
// Figure out the marker for the next subsequent calls, if the
|
||||
// partNumberMarker is already set.
|
||||
if partNumberMarker > 0 {
|
||||
fileInfos, _, e := o.storage.ListFiles(minioMetaVolume, uploadIDPath+"."+strconv.Itoa(partNumberMarker)+".", "", false, 1)
|
||||
if e != nil {
|
||||
return result, probe.NewError(e)
|
||||
}
|
||||
if len(fileInfos) == 0 {
|
||||
return result, probe.NewError(InvalidPart{})
|
||||
}
|
||||
marker = fileInfos[0].Name
|
||||
}
|
||||
fileInfos, eof, e := o.storage.ListFiles(minioMetaVolume, uploadIDPath+".", marker, false, maxParts)
|
||||
if e != nil {
|
||||
return result, probe.NewError(InvalidPart{})
|
||||
}
|
||||
for _, fileInfo := range fileInfos {
|
||||
fileName := path.Base(fileInfo.Name)
|
||||
splitResult := strings.Split(fileName, ".")
|
||||
partNum, e := strconv.Atoi(splitResult[1])
|
||||
if e != nil {
|
||||
return result, probe.NewError(e)
|
||||
}
|
||||
md5sum := splitResult[2]
|
||||
result.Parts = append(result.Parts, partInfo{
|
||||
PartNumber: partNum,
|
||||
LastModified: fileInfo.ModTime,
|
||||
ETag: md5sum,
|
||||
Size: fileInfo.Size,
|
||||
})
|
||||
nextPartNumberMarker = partNum
|
||||
}
|
||||
result.Bucket = bucket
|
||||
result.Object = object
|
||||
result.UploadID = uploadID
|
||||
result.PartNumberMarker = partNumberMarker
|
||||
result.NextPartNumberMarker = nextPartNumberMarker
|
||||
result.MaxParts = maxParts
|
||||
result.IsTruncated = !eof
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// Create an s3 compatible MD5sum for complete multipart transaction.
|
||||
func makeS3MD5(md5Strs ...string) (string, *probe.Error) {
|
||||
var finalMD5Bytes []byte
|
||||
for _, md5Str := range md5Strs {
|
||||
md5Bytes, e := hex.DecodeString(md5Str)
|
||||
if e != nil {
|
||||
return "", probe.NewError(e)
|
||||
}
|
||||
finalMD5Bytes = append(finalMD5Bytes, md5Bytes...)
|
||||
}
|
||||
md5Hasher := md5.New()
|
||||
md5Hasher.Write(finalMD5Bytes)
|
||||
s3MD5 := fmt.Sprintf("%s-%d", hex.EncodeToString(md5Hasher.Sum(nil)), len(md5Strs))
|
||||
return s3MD5, nil
|
||||
}
|
||||
|
||||
func (o objectAPI) CompleteMultipartUpload(bucket string, object string, uploadID string, parts []completePart) (string, *probe.Error) {
|
||||
// Verify if bucket is valid.
|
||||
if !IsValidBucketName(bucket) {
|
||||
return "", probe.NewError(BucketNameInvalid{Bucket: bucket})
|
||||
}
|
||||
if !IsValidObjectName(object) {
|
||||
return "", probe.NewError(ObjectNameInvalid{
|
||||
Bucket: bucket,
|
||||
Object: object,
|
||||
})
|
||||
}
|
||||
if status, e := o.isUploadIDExists(bucket, object, uploadID); e != nil {
|
||||
return "", probe.NewError(e)
|
||||
} else if !status {
|
||||
return "", probe.NewError(InvalidUploadID{UploadID: uploadID})
|
||||
}
|
||||
fileWriter, e := o.storage.CreateFile(bucket, object)
|
||||
if e != nil {
|
||||
if e == errVolumeNotFound {
|
||||
return "", probe.NewError(BucketNotFound{
|
||||
Bucket: bucket,
|
||||
})
|
||||
} else if e == errIsNotRegular {
|
||||
return "", probe.NewError(ObjectExistsAsPrefix{
|
||||
Bucket: bucket,
|
||||
Prefix: object,
|
||||
})
|
||||
}
|
||||
return "", probe.NewError(e)
|
||||
}
|
||||
|
||||
var md5Sums []string
|
||||
for _, part := range parts {
|
||||
partSuffix := fmt.Sprintf("%s.%d.%s", uploadID, part.PartNumber, part.ETag)
|
||||
var fileReader io.ReadCloser
|
||||
fileReader, e = o.storage.ReadFile(minioMetaVolume, path.Join(bucket, object, partSuffix), 0)
|
||||
if e != nil {
|
||||
if e == errFileNotFound {
|
||||
return "", probe.NewError(InvalidPart{})
|
||||
}
|
||||
return "", probe.NewError(e)
|
||||
}
|
||||
_, e = io.Copy(fileWriter, fileReader)
|
||||
if e != nil {
|
||||
return "", probe.NewError(e)
|
||||
}
|
||||
e = fileReader.Close()
|
||||
if e != nil {
|
||||
return "", probe.NewError(e)
|
||||
}
|
||||
md5Sums = append(md5Sums, part.ETag)
|
||||
}
|
||||
e = fileWriter.Close()
|
||||
if e != nil {
|
||||
return "", probe.NewError(e)
|
||||
}
|
||||
|
||||
// Save the s3 md5.
|
||||
s3MD5, err := makeS3MD5(md5Sums...)
|
||||
if err != nil {
|
||||
return "", err.Trace(md5Sums...)
|
||||
}
|
||||
|
||||
// Cleanup all the parts.
|
||||
o.removeMultipartUpload(bucket, object, uploadID)
|
||||
|
||||
return s3MD5, nil
|
||||
}
|
||||
|
||||
func (o objectAPI) removeMultipartUpload(bucket, object, uploadID string) *probe.Error {
|
||||
// Verify if bucket is valid.
|
||||
if !IsValidBucketName(bucket) {
|
||||
return probe.NewError(BucketNameInvalid{Bucket: bucket})
|
||||
}
|
||||
if !IsValidObjectName(object) {
|
||||
return probe.NewError(ObjectNameInvalid{Bucket: bucket, Object: object})
|
||||
}
|
||||
marker := ""
|
||||
for {
|
||||
uploadIDPath := path.Join(bucket, object, uploadID)
|
||||
fileInfos, eof, e := o.storage.ListFiles(minioMetaVolume, uploadIDPath, marker, false, 1000)
|
||||
if e != nil {
|
||||
return probe.NewError(ObjectNotFound{Bucket: bucket, Object: object})
|
||||
}
|
||||
for _, fileInfo := range fileInfos {
|
||||
o.storage.DeleteFile(minioMetaVolume, fileInfo.Name)
|
||||
marker = fileInfo.Name
|
||||
}
|
||||
if eof {
|
||||
break
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o objectAPI) AbortMultipartUpload(bucket, object, uploadID string) *probe.Error {
|
||||
// Verify if bucket is valid.
|
||||
if !IsValidBucketName(bucket) {
|
||||
return probe.NewError(BucketNameInvalid{Bucket: bucket})
|
||||
}
|
||||
if !IsValidObjectName(object) {
|
||||
return probe.NewError(ObjectNameInvalid{Bucket: bucket, Object: object})
|
||||
}
|
||||
if status, e := o.isUploadIDExists(bucket, object, uploadID); e != nil {
|
||||
return probe.NewError(e)
|
||||
} else if !status {
|
||||
return probe.NewError(InvalidUploadID{UploadID: uploadID})
|
||||
}
|
||||
err := o.removeMultipartUpload(bucket, object, uploadID)
|
||||
if err != nil {
|
||||
return err.Trace(bucket, object, uploadID)
|
||||
}
|
||||
return nil
|
||||
}
|
336
object-api.go
Normal file
336
object-api.go
Normal file
@ -0,0 +1,336 @@
|
||||
/*
|
||||
* Minio Cloud Storage, (C) 2016 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"crypto/md5"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"io"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/minio/minio/pkg/mimedb"
|
||||
"github.com/minio/minio/pkg/probe"
|
||||
"github.com/minio/minio/pkg/safe"
|
||||
)
|
||||
|
||||
type objectAPI struct {
|
||||
storage StorageAPI
|
||||
}
|
||||
|
||||
func newObjectLayer(storage StorageAPI) *objectAPI {
|
||||
return &objectAPI{storage}
|
||||
}
|
||||
|
||||
/// Bucket operations
|
||||
|
||||
// MakeBucket - make a bucket.
|
||||
func (o objectAPI) MakeBucket(bucket string) *probe.Error {
|
||||
// Verify if bucket is valid.
|
||||
if !IsValidBucketName(bucket) {
|
||||
return probe.NewError(BucketNameInvalid{Bucket: bucket})
|
||||
}
|
||||
if e := o.storage.MakeVol(bucket); e != nil {
|
||||
if e == errVolumeExists {
|
||||
return probe.NewError(BucketExists{Bucket: bucket})
|
||||
}
|
||||
return probe.NewError(e)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetBucketInfo - get bucket info.
|
||||
func (o objectAPI) GetBucketInfo(bucket string) (BucketInfo, *probe.Error) {
|
||||
// Verify if bucket is valid.
|
||||
if !IsValidBucketName(bucket) {
|
||||
return BucketInfo{}, probe.NewError(BucketNameInvalid{Bucket: bucket})
|
||||
}
|
||||
vi, e := o.storage.StatVol(bucket)
|
||||
if e != nil {
|
||||
if e == errVolumeNotFound {
|
||||
return BucketInfo{}, probe.NewError(BucketNotFound{Bucket: bucket})
|
||||
}
|
||||
return BucketInfo{}, probe.NewError(e)
|
||||
}
|
||||
return BucketInfo{
|
||||
Name: bucket,
|
||||
Created: vi.Created,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ListBuckets - list buckets.
|
||||
func (o objectAPI) ListBuckets() ([]BucketInfo, *probe.Error) {
|
||||
var bucketInfos []BucketInfo
|
||||
vols, e := o.storage.ListVols()
|
||||
if e != nil {
|
||||
return nil, probe.NewError(e)
|
||||
}
|
||||
for _, vol := range vols {
|
||||
// StorageAPI can send volume names which are incompatible
|
||||
// with buckets, handle it and skip them.
|
||||
if !IsValidBucketName(vol.Name) {
|
||||
continue
|
||||
}
|
||||
bucketInfos = append(bucketInfos, BucketInfo{
|
||||
Name: vol.Name,
|
||||
Created: vol.Created,
|
||||
})
|
||||
}
|
||||
return bucketInfos, nil
|
||||
}
|
||||
|
||||
// DeleteBucket - delete a bucket.
|
||||
func (o objectAPI) DeleteBucket(bucket string) *probe.Error {
|
||||
// Verify if bucket is valid.
|
||||
if !IsValidBucketName(bucket) {
|
||||
return probe.NewError(BucketNameInvalid{Bucket: bucket})
|
||||
}
|
||||
if e := o.storage.DeleteVol(bucket); e != nil {
|
||||
if e == errVolumeNotFound {
|
||||
return probe.NewError(BucketNotFound{Bucket: bucket})
|
||||
} else if e == errVolumeNotEmpty {
|
||||
return probe.NewError(BucketNotEmpty{Bucket: bucket})
|
||||
}
|
||||
return probe.NewError(e)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
/// Object Operations
|
||||
|
||||
// GetObject - get an object.
|
||||
func (o objectAPI) GetObject(bucket, object string, startOffset int64) (io.ReadCloser, *probe.Error) {
|
||||
// Verify if bucket is valid.
|
||||
if !IsValidBucketName(bucket) {
|
||||
return nil, probe.NewError(BucketNameInvalid{Bucket: bucket})
|
||||
}
|
||||
// Verify if object is valid.
|
||||
if !IsValidObjectName(object) {
|
||||
return nil, probe.NewError(ObjectNameInvalid{Bucket: bucket, Object: object})
|
||||
}
|
||||
r, e := o.storage.ReadFile(bucket, object, startOffset)
|
||||
if e != nil {
|
||||
if e == errVolumeNotFound {
|
||||
return nil, probe.NewError(BucketNotFound{Bucket: bucket})
|
||||
} else if e == errFileNotFound {
|
||||
return nil, probe.NewError(ObjectNotFound{Bucket: bucket, Object: object})
|
||||
}
|
||||
return nil, probe.NewError(e)
|
||||
}
|
||||
return r, nil
|
||||
}
|
||||
|
||||
// GetObjectInfo - get object info.
|
||||
func (o objectAPI) GetObjectInfo(bucket, object string) (ObjectInfo, *probe.Error) {
|
||||
// Verify if bucket is valid.
|
||||
if !IsValidBucketName(bucket) {
|
||||
return ObjectInfo{}, probe.NewError(BucketNameInvalid{Bucket: bucket})
|
||||
}
|
||||
// Verify if object is valid.
|
||||
if !IsValidObjectName(object) {
|
||||
return ObjectInfo{}, probe.NewError(ObjectNameInvalid{Bucket: bucket, Object: object})
|
||||
}
|
||||
fi, e := o.storage.StatFile(bucket, object)
|
||||
if e != nil {
|
||||
if e == errVolumeNotFound {
|
||||
return ObjectInfo{}, probe.NewError(BucketNotFound{Bucket: bucket})
|
||||
} else if e == errFileNotFound || e == errIsNotRegular {
|
||||
return ObjectInfo{}, probe.NewError(ObjectNotFound{Bucket: bucket, Object: object})
|
||||
// Handle more lower level errors if needed.
|
||||
} else {
|
||||
return ObjectInfo{}, probe.NewError(e)
|
||||
}
|
||||
}
|
||||
contentType := "application/octet-stream"
|
||||
if objectExt := filepath.Ext(object); objectExt != "" {
|
||||
content, ok := mimedb.DB[strings.ToLower(strings.TrimPrefix(objectExt, "."))]
|
||||
if ok {
|
||||
contentType = content.ContentType
|
||||
}
|
||||
}
|
||||
return ObjectInfo{
|
||||
Bucket: bucket,
|
||||
Name: object,
|
||||
ModTime: fi.ModTime,
|
||||
Size: fi.Size,
|
||||
IsDir: fi.Mode.IsDir(),
|
||||
ContentType: contentType,
|
||||
MD5Sum: "", // Read from metadata.
|
||||
}, nil
|
||||
}
|
||||
|
||||
// safeCloseAndRemove - safely closes and removes underlying temporary
|
||||
// file writer if possible.
|
||||
func safeCloseAndRemove(writer io.WriteCloser) error {
|
||||
// If writer is a safe file, Attempt to close and remove.
|
||||
safeWriter, ok := writer.(*safe.File)
|
||||
if ok {
|
||||
return safeWriter.CloseAndRemove()
|
||||
}
|
||||
pipeWriter, ok := writer.(*io.PipeWriter)
|
||||
if ok {
|
||||
return pipeWriter.CloseWithError(errors.New("Close and error out."))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o objectAPI) PutObject(bucket string, object string, size int64, data io.Reader, metadata map[string]string) (string, *probe.Error) {
|
||||
// Verify if bucket is valid.
|
||||
if !IsValidBucketName(bucket) {
|
||||
return "", probe.NewError(BucketNameInvalid{Bucket: bucket})
|
||||
}
|
||||
if !IsValidObjectName(object) {
|
||||
return "", probe.NewError(ObjectNameInvalid{
|
||||
Bucket: bucket,
|
||||
Object: object,
|
||||
})
|
||||
}
|
||||
fileWriter, e := o.storage.CreateFile(bucket, object)
|
||||
if e != nil {
|
||||
if e == errVolumeNotFound {
|
||||
return "", probe.NewError(BucketNotFound{
|
||||
Bucket: bucket,
|
||||
})
|
||||
} else if e == errIsNotRegular {
|
||||
return "", probe.NewError(ObjectExistsAsPrefix{
|
||||
Bucket: bucket,
|
||||
Prefix: object,
|
||||
})
|
||||
}
|
||||
return "", probe.NewError(e)
|
||||
}
|
||||
|
||||
// Initialize md5 writer.
|
||||
md5Writer := md5.New()
|
||||
|
||||
// Instantiate a new multi writer.
|
||||
multiWriter := io.MultiWriter(md5Writer, fileWriter)
|
||||
|
||||
// Instantiate checksum hashers and create a multiwriter.
|
||||
if size > 0 {
|
||||
if _, e = io.CopyN(multiWriter, data, size); e != nil {
|
||||
safeCloseAndRemove(fileWriter)
|
||||
return "", probe.NewError(e)
|
||||
}
|
||||
} else {
|
||||
if _, e = io.Copy(multiWriter, data); e != nil {
|
||||
safeCloseAndRemove(fileWriter)
|
||||
return "", probe.NewError(e)
|
||||
}
|
||||
}
|
||||
|
||||
newMD5Hex := hex.EncodeToString(md5Writer.Sum(nil))
|
||||
// md5Hex representation.
|
||||
var md5Hex string
|
||||
if len(metadata) != 0 {
|
||||
md5Hex = metadata["md5Sum"]
|
||||
}
|
||||
if md5Hex != "" {
|
||||
if newMD5Hex != md5Hex {
|
||||
safeCloseAndRemove(fileWriter)
|
||||
return "", probe.NewError(BadDigest{md5Hex, newMD5Hex})
|
||||
}
|
||||
}
|
||||
e = fileWriter.Close()
|
||||
if e != nil {
|
||||
return "", probe.NewError(e)
|
||||
}
|
||||
|
||||
// Return md5sum.
|
||||
return newMD5Hex, nil
|
||||
}
|
||||
|
||||
func (o objectAPI) DeleteObject(bucket, object string) *probe.Error {
|
||||
// Verify if bucket is valid.
|
||||
if !IsValidBucketName(bucket) {
|
||||
return probe.NewError(BucketNameInvalid{Bucket: bucket})
|
||||
}
|
||||
if !IsValidObjectName(object) {
|
||||
return probe.NewError(ObjectNameInvalid{Bucket: bucket, Object: object})
|
||||
}
|
||||
if e := o.storage.DeleteFile(bucket, object); e != nil {
|
||||
if e == errVolumeNotFound {
|
||||
return probe.NewError(BucketNotFound{Bucket: bucket})
|
||||
}
|
||||
if e == errFileNotFound {
|
||||
return probe.NewError(ObjectNotFound{
|
||||
Bucket: bucket,
|
||||
Object: object,
|
||||
})
|
||||
}
|
||||
return probe.NewError(e)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o objectAPI) ListObjects(bucket, prefix, marker, delimiter string, maxKeys int) (ListObjectsInfo, *probe.Error) {
|
||||
// Verify if bucket is valid.
|
||||
if !IsValidBucketName(bucket) {
|
||||
return ListObjectsInfo{}, probe.NewError(BucketNameInvalid{Bucket: bucket})
|
||||
}
|
||||
if !IsValidObjectPrefix(prefix) {
|
||||
return ListObjectsInfo{}, probe.NewError(ObjectNameInvalid{Bucket: bucket, Object: prefix})
|
||||
}
|
||||
// Verify if delimiter is anything other than '/', which we do not support.
|
||||
if delimiter != "" && delimiter != slashPathSeparator {
|
||||
return ListObjectsInfo{}, probe.NewError(UnsupportedDelimiter{
|
||||
Delimiter: delimiter,
|
||||
})
|
||||
}
|
||||
// Verify if marker has prefix.
|
||||
if marker != "" {
|
||||
if !strings.HasPrefix(marker, prefix) {
|
||||
return ListObjectsInfo{}, probe.NewError(InvalidMarkerPrefixCombination{
|
||||
Marker: marker,
|
||||
Prefix: prefix,
|
||||
})
|
||||
}
|
||||
}
|
||||
recursive := true
|
||||
if delimiter == slashPathSeparator {
|
||||
recursive = false
|
||||
}
|
||||
fileInfos, eof, e := o.storage.ListFiles(bucket, prefix, marker, recursive, maxKeys)
|
||||
if e != nil {
|
||||
if e == errVolumeNotFound {
|
||||
return ListObjectsInfo{}, probe.NewError(BucketNotFound{Bucket: bucket})
|
||||
}
|
||||
return ListObjectsInfo{}, probe.NewError(e)
|
||||
}
|
||||
if maxKeys == 0 {
|
||||
return ListObjectsInfo{}, nil
|
||||
}
|
||||
result := ListObjectsInfo{IsTruncated: !eof}
|
||||
for _, fileInfo := range fileInfos {
|
||||
// With delimiter set we fill in NextMarker and Prefixes.
|
||||
if delimiter == slashPathSeparator {
|
||||
result.NextMarker = fileInfo.Name
|
||||
if fileInfo.Mode.IsDir() {
|
||||
result.Prefixes = append(result.Prefixes, fileInfo.Name)
|
||||
continue
|
||||
}
|
||||
}
|
||||
result.Objects = append(result.Objects, ObjectInfo{
|
||||
Name: fileInfo.Name,
|
||||
ModTime: fileInfo.ModTime,
|
||||
Size: fileInfo.Size,
|
||||
IsDir: fileInfo.Mode.IsDir(),
|
||||
})
|
||||
}
|
||||
return result, nil
|
||||
}
|
@ -20,14 +20,13 @@ import (
|
||||
"bytes"
|
||||
"crypto/md5"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/minio/minio/pkg/probe"
|
||||
)
|
||||
|
||||
// Testing GetObjectInfo().
|
||||
@ -38,17 +37,21 @@ func TestGetObjectInfo(t *testing.T) {
|
||||
}
|
||||
defer os.RemoveAll(directory)
|
||||
|
||||
// Create the fs.
|
||||
fs, err := newFS(directory)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
// Create the obj.
|
||||
fs, e := newFS(directory)
|
||||
if e != nil {
|
||||
t.Fatal(e)
|
||||
}
|
||||
|
||||
obj := newObjectLayer(fs)
|
||||
var err *probe.Error
|
||||
|
||||
// This bucket is used for testing getObjectInfo operations.
|
||||
err = fs.MakeBucket("test-getobjectinfo")
|
||||
err = obj.MakeBucket("test-getobjectinfo")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
_, err = fs.PutObject("test-getobjectinfo", "Asia/asiapics.jpg", int64(len("asiapics")), bytes.NewBufferString("asiapics"), nil)
|
||||
_, err = obj.PutObject("test-getobjectinfo", "Asia/asiapics.jpg", int64(len("asiapics")), bytes.NewBufferString("asiapics"), nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -76,8 +79,8 @@ func TestGetObjectInfo(t *testing.T) {
|
||||
{"abcdefgh", "abc", ObjectInfo{}, BucketNotFound{Bucket: "abcdefgh"}, false},
|
||||
{"ijklmnop", "efg", ObjectInfo{}, BucketNotFound{Bucket: "ijklmnop"}, false},
|
||||
// Test cases with valid but non-existing bucket names and invalid object name (Test number 8-9).
|
||||
{"test-getobjectinfo", "", ObjectInfo{}, ObjectNameInvalid{Bucket: "test-getobjectinfo", Object: ""}, false},
|
||||
{"test-getobjectinfo", "", ObjectInfo{}, ObjectNameInvalid{Bucket: "test-getobjectinfo", Object: ""}, false},
|
||||
{"abcdefgh", "", ObjectInfo{}, ObjectNameInvalid{Bucket: "abcdefgh", Object: ""}, false},
|
||||
{"ijklmnop", "", ObjectInfo{}, ObjectNameInvalid{Bucket: "ijklmnop", Object: ""}, false},
|
||||
// Test cases with non-existing object name with existing bucket (Test number 10-12).
|
||||
{"test-getobjectinfo", "Africa", ObjectInfo{}, ObjectNotFound{Bucket: "test-getobjectinfo", Object: "Africa"}, false},
|
||||
{"test-getobjectinfo", "Antartica", ObjectInfo{}, ObjectNotFound{Bucket: "test-getobjectinfo", Object: "Antartica"}, false},
|
||||
@ -88,7 +91,7 @@ func TestGetObjectInfo(t *testing.T) {
|
||||
{"test-getobjectinfo", "Asia/asiapics.jpg", resultCases[0], nil, true},
|
||||
}
|
||||
for i, testCase := range testCases {
|
||||
result, err := fs.GetObjectInfo(testCase.bucketName, testCase.objectName)
|
||||
result, err := obj.GetObjectInfo(testCase.bucketName, testCase.objectName)
|
||||
if err != nil && testCase.shouldPass {
|
||||
t.Errorf("Test %d: Expected to pass, but failed with: <ERROR> %s", i+1, err.Cause.Error())
|
||||
}
|
||||
@ -120,107 +123,25 @@ func TestGetObjectInfo(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// Testing getObjectInfo().
|
||||
func TestGetObjectInfoCore(t *testing.T) {
|
||||
directory, e := ioutil.TempDir("", "minio-get-objinfo-test")
|
||||
if e != nil {
|
||||
t.Fatal(e)
|
||||
}
|
||||
defer os.RemoveAll(directory)
|
||||
|
||||
// Create the fs.
|
||||
fs, err := newFS(directory)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// This bucket is used for testing getObjectInfo operations.
|
||||
err = fs.MakeBucket("test-getobjinfo")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
_, err = fs.PutObject("test-getobjinfo", "Asia/asiapics.jpg", int64(len("asiapics")), bytes.NewBufferString("asiapics"), nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
resultCases := []ObjectInfo{
|
||||
// ObjectInfo - 1.
|
||||
// ObjectName object name set to a existing directory in the test case.
|
||||
{Bucket: "test-getobjinfo", Name: "Asia", Size: 0, ContentType: "application/octet-stream", IsDir: true},
|
||||
// ObjectInfo -2.
|
||||
// ObjectName set to a existing object in the test case.
|
||||
{Bucket: "test-getobjinfo", Name: "Asia/asiapics.jpg", Size: int64(len("asiapics")), ContentType: "image/jpeg", IsDir: false},
|
||||
// ObjectInfo-3.
|
||||
// Object name set to a non-existing object in the test case.
|
||||
{Bucket: "test-getobjinfo", Name: "Africa", Size: 0, ContentType: "image/jpeg", IsDir: false},
|
||||
}
|
||||
testCases := []struct {
|
||||
bucketName string
|
||||
objectName string
|
||||
|
||||
// Expected output of getObjectInfo.
|
||||
result ObjectInfo
|
||||
err error
|
||||
|
||||
// Flag indicating whether the test is expected to pass or not.
|
||||
shouldPass bool
|
||||
}{
|
||||
// Testcase with object name set to a existing directory ( Test number 1).
|
||||
{"test-getobjinfo", "Asia", resultCases[0], nil, true},
|
||||
// ObjectName set to a existing object ( Test number 2).
|
||||
{"test-getobjinfo", "Asia/asiapics.jpg", resultCases[1], nil, true},
|
||||
// Object name set to a non-existing object. (Test number 3).
|
||||
{"test-getobjinfo", "Africa", resultCases[2], fmt.Errorf("%s", filepath.FromSlash("test-getobjinfo/Africa")), false},
|
||||
}
|
||||
rootPath := fs.(*Filesystem).GetRootPath()
|
||||
for i, testCase := range testCases {
|
||||
result, err := getObjectInfo(rootPath, testCase.bucketName, testCase.objectName)
|
||||
if err != nil && testCase.shouldPass {
|
||||
t.Errorf("Test %d: Expected to pass, but failed with: <ERROR> %s", i+1, err.Cause.Error())
|
||||
}
|
||||
if err == nil && !testCase.shouldPass {
|
||||
t.Errorf("Test %d: Expected to fail with <ERROR> \"%s\", but passed instead", i+1, testCase.err.Error())
|
||||
}
|
||||
// Failed as expected, but does it fail for the expected reason.
|
||||
if err != nil && !testCase.shouldPass {
|
||||
if !strings.Contains(err.Cause.Error(), testCase.err.Error()) {
|
||||
t.Errorf("Test %d: Expected to fail with error \"%s\", but instead failed with error \"%s\" instead", i+1, testCase.err.Error(), err.Cause.Error())
|
||||
}
|
||||
}
|
||||
|
||||
// Test passes as expected, but the output values are verified for correctness here.
|
||||
if err == nil && testCase.shouldPass {
|
||||
if testCase.result.Bucket != result.Bucket {
|
||||
t.Fatalf("Test %d: Expected Bucket name to be '%s', but found '%s' instead", i+1, testCase.result.Bucket, result.Bucket)
|
||||
}
|
||||
if testCase.result.Name != result.Name {
|
||||
t.Errorf("Test %d: Expected Object name to be %s, but instead found it to be %s", i+1, testCase.result.Name, result.Name)
|
||||
}
|
||||
if testCase.result.ContentType != result.ContentType {
|
||||
t.Errorf("Test %d: Expected Content Type of the object to be %v, but instead found it to be %v", i+1, testCase.result.ContentType, result.ContentType)
|
||||
}
|
||||
if testCase.result.IsDir != result.IsDir {
|
||||
t.Errorf("Test %d: Expected IsDir flag of the object to be %v, but instead found it to be %v", i+1, testCase.result.IsDir, result.IsDir)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkGetObject(b *testing.B) {
|
||||
// Make a temporary directory to use as the fs.
|
||||
// Make a temporary directory to use as the obj.
|
||||
directory, e := ioutil.TempDir("", "minio-benchmark-getobject")
|
||||
if e != nil {
|
||||
b.Fatal(e)
|
||||
}
|
||||
defer os.RemoveAll(directory)
|
||||
|
||||
// Create the fs.
|
||||
fs, err := newFS(directory)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
// Create the obj.
|
||||
fs, e := newFS(directory)
|
||||
if e != nil {
|
||||
b.Fatal(e)
|
||||
}
|
||||
|
||||
obj := newObjectLayer(fs)
|
||||
var err *probe.Error
|
||||
|
||||
// Make a bucket and put in a few objects.
|
||||
err = fs.MakeBucket("bucket")
|
||||
err = obj.MakeBucket("bucket")
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
@ -231,7 +152,7 @@ func BenchmarkGetObject(b *testing.B) {
|
||||
metadata := make(map[string]string)
|
||||
for i := 0; i < 10; i++ {
|
||||
metadata["md5Sum"] = hex.EncodeToString(hasher.Sum(nil))
|
||||
_, err = fs.PutObject("bucket", "object"+strconv.Itoa(i), int64(len(text)), bytes.NewBufferString(text), metadata)
|
||||
_, err = obj.PutObject("bucket", "object"+strconv.Itoa(i), int64(len(text)), bytes.NewBufferString(text), metadata)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
@ -240,7 +161,7 @@ func BenchmarkGetObject(b *testing.B) {
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
var buffer = new(bytes.Buffer)
|
||||
r, err := fs.GetObject("bucket", "object"+strconv.Itoa(i%10), 0)
|
||||
r, err := obj.GetObject("bucket", "object"+strconv.Itoa(i%10), 0)
|
||||
if err != nil {
|
||||
b.Error(err)
|
||||
}
|
@ -28,12 +28,11 @@ type BucketInfo struct {
|
||||
type ObjectInfo struct {
|
||||
Bucket string
|
||||
Name string
|
||||
ModifiedTime time.Time
|
||||
ModTime time.Time
|
||||
ContentType string
|
||||
MD5Sum string
|
||||
Size int64
|
||||
IsDir bool
|
||||
Err error
|
||||
}
|
||||
|
||||
// ListPartsInfo - various types of object resources.
|
||||
|
@ -134,6 +134,34 @@ func (e BadDigest) Error() string {
|
||||
return "Bad digest expected " + e.ExpectedMD5 + " is not valid with what we calculated " + e.CalculatedMD5
|
||||
}
|
||||
|
||||
// UnsupportedDelimiter - unsupported delimiter.
|
||||
type UnsupportedDelimiter struct {
|
||||
Delimiter string
|
||||
}
|
||||
|
||||
func (e UnsupportedDelimiter) Error() string {
|
||||
return fmt.Sprintf("delimiter '%s' is not supported. Only '/' is supported", e.Delimiter)
|
||||
}
|
||||
|
||||
// InvalidUploadIDKeyCombination - invalid upload id and key marker
|
||||
// combination.
|
||||
type InvalidUploadIDKeyCombination struct {
|
||||
UploadIDMarker, KeyMarker string
|
||||
}
|
||||
|
||||
func (e InvalidUploadIDKeyCombination) Error() string {
|
||||
return fmt.Sprintf("Invalid combination of uploadID marker '%s' and marker '%s'", e.UploadIDMarker, e.KeyMarker)
|
||||
}
|
||||
|
||||
// InvalidMarkerPrefixCombination - invalid marker and prefix combination.
|
||||
type InvalidMarkerPrefixCombination struct {
|
||||
Marker, Prefix string
|
||||
}
|
||||
|
||||
func (e InvalidMarkerPrefixCombination) Error() string {
|
||||
return fmt.Sprintf("Invalid combination of marker '%s' and prefix '%s'", e.Marker, e.Prefix)
|
||||
}
|
||||
|
||||
// InternalError - generic internal error
|
||||
type InternalError struct{}
|
||||
|
||||
@ -262,7 +290,16 @@ func (e InvalidRange) Error() string {
|
||||
|
||||
/// Multipart related errors
|
||||
|
||||
// InvalidUploadID invalid upload id
|
||||
// MalformedUploadID malformed upload id.
|
||||
type MalformedUploadID struct {
|
||||
UploadID string
|
||||
}
|
||||
|
||||
func (e MalformedUploadID) Error() string {
|
||||
return "Malformed upload id " + e.UploadID
|
||||
}
|
||||
|
||||
// InvalidUploadID invalid upload id.
|
||||
type InvalidUploadID struct {
|
||||
UploadID string
|
||||
}
|
@ -74,7 +74,7 @@ func errAllowableObjectNotFound(bucket string, r *http.Request) APIErrorCode {
|
||||
// ----------
|
||||
// This implementation of the GET operation retrieves object. To use GET,
|
||||
// you must have READ access to the object.
|
||||
func (api objectStorageAPI) GetObjectHandler(w http.ResponseWriter, r *http.Request) {
|
||||
func (api objectAPIHandlers) GetObjectHandler(w http.ResponseWriter, r *http.Request) {
|
||||
var object, bucket string
|
||||
vars := mux.Vars(r)
|
||||
bucket = vars["bucket"]
|
||||
@ -97,7 +97,7 @@ func (api objectStorageAPI) GetObjectHandler(w http.ResponseWriter, r *http.Requ
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Fetch object stat info.
|
||||
objInfo, err := api.ObjectAPI.GetObjectInfo(bucket, object)
|
||||
if err != nil {
|
||||
switch err.ToGoError().(type) {
|
||||
@ -117,7 +117,7 @@ func (api objectStorageAPI) GetObjectHandler(w http.ResponseWriter, r *http.Requ
|
||||
}
|
||||
|
||||
// Verify 'If-Modified-Since' and 'If-Unmodified-Since'.
|
||||
lastModified := objInfo.ModifiedTime
|
||||
lastModified := objInfo.ModTime
|
||||
if checkLastModified(w, r, lastModified) {
|
||||
return
|
||||
}
|
||||
@ -137,8 +137,15 @@ func (api objectStorageAPI) GetObjectHandler(w http.ResponseWriter, r *http.Requ
|
||||
startOffset := hrange.start
|
||||
readCloser, err := api.ObjectAPI.GetObject(bucket, object, startOffset)
|
||||
if err != nil {
|
||||
switch err.ToGoError().(type) {
|
||||
case BucketNotFound:
|
||||
writeErrorResponse(w, r, ErrNoSuchBucket, r.URL.Path)
|
||||
case ObjectNotFound:
|
||||
writeErrorResponse(w, r, errAllowableObjectNotFound(bucket, r), r.URL.Path)
|
||||
default:
|
||||
errorIf(err.Trace(), "GetObject failed.", nil)
|
||||
writeErrorResponse(w, r, ErrInternalError, r.URL.Path)
|
||||
}
|
||||
return
|
||||
}
|
||||
defer readCloser.Close() // Close after this handler returns.
|
||||
@ -261,7 +268,7 @@ func checkETag(w http.ResponseWriter, r *http.Request) bool {
|
||||
// HeadObjectHandler - HEAD Object
|
||||
// -----------
|
||||
// The HEAD operation retrieves metadata from an object without returning the object itself.
|
||||
func (api objectStorageAPI) HeadObjectHandler(w http.ResponseWriter, r *http.Request) {
|
||||
func (api objectAPIHandlers) HeadObjectHandler(w http.ResponseWriter, r *http.Request) {
|
||||
var object, bucket string
|
||||
vars := mux.Vars(r)
|
||||
bucket = vars["bucket"]
|
||||
@ -304,7 +311,7 @@ func (api objectStorageAPI) HeadObjectHandler(w http.ResponseWriter, r *http.Req
|
||||
}
|
||||
|
||||
// Verify 'If-Modified-Since' and 'If-Unmodified-Since'.
|
||||
lastModified := objInfo.ModifiedTime
|
||||
lastModified := objInfo.ModTime
|
||||
if checkLastModified(w, r, lastModified) {
|
||||
return
|
||||
}
|
||||
@ -325,7 +332,7 @@ func (api objectStorageAPI) HeadObjectHandler(w http.ResponseWriter, r *http.Req
|
||||
// ----------
|
||||
// This implementation of the PUT operation adds an object to a bucket
|
||||
// while reading the object from another source.
|
||||
func (api objectStorageAPI) CopyObjectHandler(w http.ResponseWriter, r *http.Request) {
|
||||
func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Request) {
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
object := vars["object"]
|
||||
@ -399,7 +406,7 @@ func (api objectStorageAPI) CopyObjectHandler(w http.ResponseWriter, r *http.Req
|
||||
|
||||
// Verify x-amz-copy-source-if-modified-since and
|
||||
// x-amz-copy-source-if-unmodified-since.
|
||||
lastModified := objInfo.ModifiedTime
|
||||
lastModified := objInfo.ModTime
|
||||
if checkCopySourceLastModified(w, r, lastModified) {
|
||||
return
|
||||
}
|
||||
@ -450,9 +457,8 @@ func (api objectStorageAPI) CopyObjectHandler(w http.ResponseWriter, r *http.Req
|
||||
metadata["md5Sum"] = hex.EncodeToString(md5Bytes)
|
||||
|
||||
// Create the object.
|
||||
objInfo, err = api.ObjectAPI.PutObject(bucket, object, size, readCloser, metadata)
|
||||
md5Sum, err := api.ObjectAPI.PutObject(bucket, object, size, readCloser, metadata)
|
||||
if err != nil {
|
||||
errorIf(err.Trace(), "PutObject failed.", nil)
|
||||
switch err.ToGoError().(type) {
|
||||
case RootPathFull:
|
||||
writeErrorResponse(w, r, ErrRootPathFull, r.URL.Path)
|
||||
@ -467,11 +473,20 @@ func (api objectStorageAPI) CopyObjectHandler(w http.ResponseWriter, r *http.Req
|
||||
case ObjectExistsAsPrefix:
|
||||
writeErrorResponse(w, r, ErrObjectExistsAsPrefix, r.URL.Path)
|
||||
default:
|
||||
errorIf(err.Trace(), "PutObject failed.", nil)
|
||||
writeErrorResponse(w, r, ErrInternalError, r.URL.Path)
|
||||
}
|
||||
return
|
||||
}
|
||||
response := generateCopyObjectResponse(objInfo.MD5Sum, objInfo.ModifiedTime)
|
||||
|
||||
objInfo, err = api.ObjectAPI.GetObjectInfo(bucket, object)
|
||||
if err != nil {
|
||||
errorIf(err.Trace(), "GetObjectInfo failed.", nil)
|
||||
writeErrorResponse(w, r, ErrInternalError, r.URL.Path)
|
||||
return
|
||||
}
|
||||
|
||||
response := generateCopyObjectResponse(md5Sum, objInfo.ModTime)
|
||||
encodedSuccessResponse := encodeResponse(response)
|
||||
// write headers
|
||||
setCommonHeaders(w)
|
||||
@ -576,7 +591,7 @@ func checkCopySourceETag(w http.ResponseWriter, r *http.Request) bool {
|
||||
// PutObjectHandler - PUT Object
|
||||
// ----------
|
||||
// This implementation of the PUT operation adds an object to a bucket.
|
||||
func (api objectStorageAPI) PutObjectHandler(w http.ResponseWriter, r *http.Request) {
|
||||
func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Request) {
|
||||
// If the matching failed, it means that the X-Amz-Copy-Source was
|
||||
// wrong, fail right here.
|
||||
if _, ok := r.Header["X-Amz-Copy-Source"]; ok {
|
||||
@ -606,7 +621,7 @@ func (api objectStorageAPI) PutObjectHandler(w http.ResponseWriter, r *http.Requ
|
||||
return
|
||||
}
|
||||
|
||||
var objInfo ObjectInfo
|
||||
var md5Sum string
|
||||
switch getRequestAuthType(r) {
|
||||
default:
|
||||
// For all unknown auth types return error.
|
||||
@ -619,7 +634,7 @@ func (api objectStorageAPI) PutObjectHandler(w http.ResponseWriter, r *http.Requ
|
||||
return
|
||||
}
|
||||
// Create anonymous object.
|
||||
objInfo, err = api.ObjectAPI.PutObject(bucket, object, size, r.Body, nil)
|
||||
md5Sum, err = api.ObjectAPI.PutObject(bucket, object, size, r.Body, nil)
|
||||
case authTypePresigned, authTypeSigned:
|
||||
// Initialize a pipe for data pipe line.
|
||||
reader, writer := io.Pipe()
|
||||
@ -658,7 +673,7 @@ func (api objectStorageAPI) PutObjectHandler(w http.ResponseWriter, r *http.Requ
|
||||
// Make sure we hex encode here.
|
||||
metadata["md5"] = hex.EncodeToString(md5Bytes)
|
||||
// Create object.
|
||||
objInfo, err = api.ObjectAPI.PutObject(bucket, object, size, reader, metadata)
|
||||
md5Sum, err = api.ObjectAPI.PutObject(bucket, object, size, reader, metadata)
|
||||
}
|
||||
if err != nil {
|
||||
errorIf(err.Trace(), "PutObject failed.", nil)
|
||||
@ -686,16 +701,16 @@ func (api objectStorageAPI) PutObjectHandler(w http.ResponseWriter, r *http.Requ
|
||||
}
|
||||
return
|
||||
}
|
||||
if objInfo.MD5Sum != "" {
|
||||
w.Header().Set("ETag", "\""+objInfo.MD5Sum+"\"")
|
||||
if md5Sum != "" {
|
||||
w.Header().Set("ETag", "\""+md5Sum+"\"")
|
||||
}
|
||||
writeSuccessResponse(w, nil)
|
||||
}
|
||||
|
||||
/// Multipart objectStorageAPI
|
||||
/// Multipart objectAPIHandlers
|
||||
|
||||
// NewMultipartUploadHandler - New multipart upload
|
||||
func (api objectStorageAPI) NewMultipartUploadHandler(w http.ResponseWriter, r *http.Request) {
|
||||
func (api objectAPIHandlers) NewMultipartUploadHandler(w http.ResponseWriter, r *http.Request) {
|
||||
var object, bucket string
|
||||
vars := mux.Vars(r)
|
||||
bucket = vars["bucket"]
|
||||
@ -748,7 +763,7 @@ func (api objectStorageAPI) NewMultipartUploadHandler(w http.ResponseWriter, r *
|
||||
}
|
||||
|
||||
// PutObjectPartHandler - Upload part
|
||||
func (api objectStorageAPI) PutObjectPartHandler(w http.ResponseWriter, r *http.Request) {
|
||||
func (api objectAPIHandlers) PutObjectPartHandler(w http.ResponseWriter, r *http.Request) {
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
object := vars["object"]
|
||||
@ -861,7 +876,7 @@ func (api objectStorageAPI) PutObjectPartHandler(w http.ResponseWriter, r *http.
|
||||
}
|
||||
|
||||
// AbortMultipartUploadHandler - Abort multipart upload
|
||||
func (api objectStorageAPI) AbortMultipartUploadHandler(w http.ResponseWriter, r *http.Request) {
|
||||
func (api objectAPIHandlers) AbortMultipartUploadHandler(w http.ResponseWriter, r *http.Request) {
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
object := vars["object"]
|
||||
@ -908,7 +923,7 @@ func (api objectStorageAPI) AbortMultipartUploadHandler(w http.ResponseWriter, r
|
||||
}
|
||||
|
||||
// ListObjectPartsHandler - List object parts
|
||||
func (api objectStorageAPI) ListObjectPartsHandler(w http.ResponseWriter, r *http.Request) {
|
||||
func (api objectAPIHandlers) ListObjectPartsHandler(w http.ResponseWriter, r *http.Request) {
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
object := vars["object"]
|
||||
@ -958,6 +973,8 @@ func (api objectStorageAPI) ListObjectPartsHandler(w http.ResponseWriter, r *htt
|
||||
writeErrorResponse(w, r, ErrNoSuchKey, r.URL.Path)
|
||||
case InvalidUploadID:
|
||||
writeErrorResponse(w, r, ErrNoSuchUpload, r.URL.Path)
|
||||
case InvalidPart:
|
||||
writeErrorResponse(w, r, ErrInvalidPart, r.URL.Path)
|
||||
default:
|
||||
writeErrorResponse(w, r, ErrInternalError, r.URL.Path)
|
||||
}
|
||||
@ -972,7 +989,7 @@ func (api objectStorageAPI) ListObjectPartsHandler(w http.ResponseWriter, r *htt
|
||||
}
|
||||
|
||||
// CompleteMultipartUploadHandler - Complete multipart upload
|
||||
func (api objectStorageAPI) CompleteMultipartUploadHandler(w http.ResponseWriter, r *http.Request) {
|
||||
func (api objectAPIHandlers) CompleteMultipartUploadHandler(w http.ResponseWriter, r *http.Request) {
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
object := vars["object"]
|
||||
@ -980,7 +997,7 @@ func (api objectStorageAPI) CompleteMultipartUploadHandler(w http.ResponseWriter
|
||||
// Get upload id.
|
||||
uploadID, _, _, _ := getObjectResources(r.URL.Query())
|
||||
|
||||
var objInfo ObjectInfo
|
||||
var md5Sum string
|
||||
var err *probe.Error
|
||||
switch getRequestAuthType(r) {
|
||||
default:
|
||||
@ -1007,6 +1024,7 @@ func (api objectStorageAPI) CompleteMultipartUploadHandler(w http.ResponseWriter
|
||||
}
|
||||
complMultipartUpload := &completeMultipartUpload{}
|
||||
if e = xml.Unmarshal(completeMultipartBytes, complMultipartUpload); e != nil {
|
||||
errorIf(probe.NewError(e), "XML Unmarshal failed", nil)
|
||||
writeErrorResponse(w, r, ErrMalformedXML, r.URL.Path)
|
||||
return
|
||||
}
|
||||
@ -1015,10 +1033,14 @@ func (api objectStorageAPI) CompleteMultipartUploadHandler(w http.ResponseWriter
|
||||
return
|
||||
}
|
||||
// Complete parts.
|
||||
completeParts := complMultipartUpload.Parts
|
||||
|
||||
var completeParts []completePart
|
||||
for _, part := range complMultipartUpload.Parts {
|
||||
part.ETag = strings.TrimPrefix(part.ETag, "\"")
|
||||
part.ETag = strings.TrimSuffix(part.ETag, "\"")
|
||||
completeParts = append(completeParts, part)
|
||||
}
|
||||
// Complete multipart upload.
|
||||
objInfo, err = api.ObjectAPI.CompleteMultipartUpload(bucket, object, uploadID, completeParts)
|
||||
md5Sum, err = api.ObjectAPI.CompleteMultipartUpload(bucket, object, uploadID, completeParts)
|
||||
if err != nil {
|
||||
errorIf(err.Trace(), "CompleteMultipartUpload failed.", nil)
|
||||
switch err.ToGoError().(type) {
|
||||
@ -1046,7 +1068,7 @@ func (api objectStorageAPI) CompleteMultipartUploadHandler(w http.ResponseWriter
|
||||
// Get object location.
|
||||
location := getLocation(r)
|
||||
// Generate complete multipart response.
|
||||
response := generateCompleteMultpartUploadResponse(bucket, object, location, objInfo.MD5Sum)
|
||||
response := generateCompleteMultpartUploadResponse(bucket, object, location, md5Sum)
|
||||
encodedSuccessResponse := encodeResponse(response)
|
||||
// Write headers.
|
||||
setCommonHeaders(w)
|
||||
@ -1054,10 +1076,10 @@ func (api objectStorageAPI) CompleteMultipartUploadHandler(w http.ResponseWriter
|
||||
writeSuccessResponse(w, encodedSuccessResponse)
|
||||
}
|
||||
|
||||
/// Delete objectStorageAPI
|
||||
/// Delete objectAPIHandlers
|
||||
|
||||
// DeleteObjectHandler - delete an object
|
||||
func (api objectStorageAPI) DeleteObjectHandler(w http.ResponseWriter, r *http.Request) {
|
||||
func (api objectAPIHandlers) DeleteObjectHandler(w http.ResponseWriter, r *http.Request) {
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
object := vars["object"]
|
||||
|
@ -1 +0,0 @@
|
||||
package main
|
95
object-utils.go
Normal file
95
object-utils.go
Normal file
@ -0,0 +1,95 @@
|
||||
/*
|
||||
* Minio Cloud Storage, (C) 2015, 2016 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"regexp"
|
||||
"strings"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
const (
|
||||
slashPathSeparator = "/"
|
||||
)
|
||||
|
||||
// validBucket regexp.
|
||||
var validBucket = regexp.MustCompile(`^[a-z0-9][a-z0-9\.\-]{1,61}[a-z0-9]$`)
|
||||
|
||||
// IsValidBucketName verifies a bucket name in accordance with Amazon's
|
||||
// requirements. It must be 3-63 characters long, can contain dashes
|
||||
// and periods, but must begin and end with a lowercase letter or a number.
|
||||
// See: http://docs.aws.amazon.com/AmazonS3/latest/dev/BucketRestrictions.html
|
||||
func IsValidBucketName(bucket string) bool {
|
||||
if len(bucket) < 3 || len(bucket) > 63 {
|
||||
return false
|
||||
}
|
||||
if bucket[0] == '.' || bucket[len(bucket)-1] == '.' {
|
||||
return false
|
||||
}
|
||||
return validBucket.MatchString(bucket)
|
||||
}
|
||||
|
||||
// IsValidObjectName verifies an object name in accordance with Amazon's
|
||||
// requirements. It cannot exceed 1024 characters and must be a valid UTF8
|
||||
// string.
|
||||
//
|
||||
// See:
|
||||
// http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html
|
||||
//
|
||||
// You should avoid the following characters in a key name because of
|
||||
// significant special handling for consistency across all
|
||||
// applications.
|
||||
//
|
||||
// Rejects strings with following characters.
|
||||
//
|
||||
// - Backslash ("\")
|
||||
// - Left curly brace ("{")
|
||||
// - Caret ("^")
|
||||
// - Right curly brace ("}")
|
||||
// - Grave accent / back tick ("`")
|
||||
// - Right square bracket ("]")
|
||||
// - Left square bracket ("[")
|
||||
// - Tilde ("~")
|
||||
// - 'Greater Than' symbol (">")
|
||||
// - 'Less Than' symbol ("<")
|
||||
// - Vertical bar / pipe ("|")
|
||||
func IsValidObjectName(object string) bool {
|
||||
if len(object) > 1024 || len(object) == 0 {
|
||||
return false
|
||||
}
|
||||
if !utf8.ValidString(object) {
|
||||
return false
|
||||
}
|
||||
// Reject unsupported characters in object name.
|
||||
return !strings.ContainsAny(object, "`^*{}[]|\\\"'")
|
||||
}
|
||||
|
||||
// IsValidObjectPrefix verifies whether the prefix is a valid object name.
|
||||
// Its valid to have a empty prefix.
|
||||
func IsValidObjectPrefix(object string) bool {
|
||||
// Prefix can be empty or "/".
|
||||
if object == "" || object == "/" {
|
||||
return true
|
||||
}
|
||||
// Verify if prefix is a valid object name.
|
||||
return IsValidObjectName(object)
|
||||
|
||||
}
|
||||
|
||||
func pathJoin(path1 string, path2 string) string {
|
||||
return strings.TrimSuffix(path1, slashPathSeparator) + slashPathSeparator + path2
|
||||
}
|
@ -28,7 +28,7 @@ import (
|
||||
)
|
||||
|
||||
// APITestSuite - collection of API tests
|
||||
func APITestSuite(c *check.C, create func() ObjectAPI) {
|
||||
func APITestSuite(c *check.C, create func() *objectAPI) {
|
||||
testMakeBucket(c, create)
|
||||
testMultipleObjectCreation(c, create)
|
||||
testPaging(c, create)
|
||||
@ -46,17 +46,17 @@ func APITestSuite(c *check.C, create func() ObjectAPI) {
|
||||
testMultipartObjectAbort(c, create)
|
||||
}
|
||||
|
||||
func testMakeBucket(c *check.C, create func() ObjectAPI) {
|
||||
fs := create()
|
||||
err := fs.MakeBucket("bucket")
|
||||
func testMakeBucket(c *check.C, create func() *objectAPI) {
|
||||
obj := create()
|
||||
err := obj.MakeBucket("bucket")
|
||||
c.Assert(err, check.IsNil)
|
||||
}
|
||||
|
||||
func testMultipartObjectCreation(c *check.C, create func() ObjectAPI) {
|
||||
fs := create()
|
||||
err := fs.MakeBucket("bucket")
|
||||
func testMultipartObjectCreation(c *check.C, create func() *objectAPI) {
|
||||
obj := create()
|
||||
err := obj.MakeBucket("bucket")
|
||||
c.Assert(err, check.IsNil)
|
||||
uploadID, err := fs.NewMultipartUpload("bucket", "key")
|
||||
uploadID, err := obj.NewMultipartUpload("bucket", "key")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
completedParts := completeMultipartUpload{}
|
||||
@ -72,21 +72,21 @@ func testMultipartObjectCreation(c *check.C, create func() ObjectAPI) {
|
||||
expectedMD5Sumhex := hex.EncodeToString(hasher.Sum(nil))
|
||||
|
||||
var calculatedMD5sum string
|
||||
calculatedMD5sum, err = fs.PutObjectPart("bucket", "key", uploadID, i, int64(len(randomString)), bytes.NewBufferString(randomString), expectedMD5Sumhex)
|
||||
calculatedMD5sum, err = obj.PutObjectPart("bucket", "key", uploadID, i, int64(len(randomString)), bytes.NewBufferString(randomString), expectedMD5Sumhex)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(calculatedMD5sum, check.Equals, expectedMD5Sumhex)
|
||||
completedParts.Parts = append(completedParts.Parts, completePart{PartNumber: i, ETag: calculatedMD5sum})
|
||||
}
|
||||
objInfo, err := fs.CompleteMultipartUpload("bucket", "key", uploadID, completedParts.Parts)
|
||||
md5Sum, err := obj.CompleteMultipartUpload("bucket", "key", uploadID, completedParts.Parts)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(objInfo.MD5Sum, check.Equals, "3605d84b1c43b1a664aa7c0d5082d271-10")
|
||||
c.Assert(md5Sum, check.Equals, "3605d84b1c43b1a664aa7c0d5082d271-10")
|
||||
}
|
||||
|
||||
func testMultipartObjectAbort(c *check.C, create func() ObjectAPI) {
|
||||
fs := create()
|
||||
err := fs.MakeBucket("bucket")
|
||||
func testMultipartObjectAbort(c *check.C, create func() *objectAPI) {
|
||||
obj := create()
|
||||
err := obj.MakeBucket("bucket")
|
||||
c.Assert(err, check.IsNil)
|
||||
uploadID, err := fs.NewMultipartUpload("bucket", "key")
|
||||
uploadID, err := obj.NewMultipartUpload("bucket", "key")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
parts := make(map[int]string)
|
||||
@ -104,19 +104,19 @@ func testMultipartObjectAbort(c *check.C, create func() ObjectAPI) {
|
||||
|
||||
metadata["md5"] = expectedMD5Sumhex
|
||||
var calculatedMD5sum string
|
||||
calculatedMD5sum, err = fs.PutObjectPart("bucket", "key", uploadID, i, int64(len(randomString)), bytes.NewBufferString(randomString), expectedMD5Sumhex)
|
||||
calculatedMD5sum, err = obj.PutObjectPart("bucket", "key", uploadID, i, int64(len(randomString)), bytes.NewBufferString(randomString), expectedMD5Sumhex)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(calculatedMD5sum, check.Equals, expectedMD5Sumhex)
|
||||
parts[i] = expectedMD5Sumhex
|
||||
}
|
||||
err = fs.AbortMultipartUpload("bucket", "key", uploadID)
|
||||
err = obj.AbortMultipartUpload("bucket", "key", uploadID)
|
||||
c.Assert(err, check.IsNil)
|
||||
}
|
||||
|
||||
func testMultipleObjectCreation(c *check.C, create func() ObjectAPI) {
|
||||
func testMultipleObjectCreation(c *check.C, create func() *objectAPI) {
|
||||
objects := make(map[string][]byte)
|
||||
fs := create()
|
||||
err := fs.MakeBucket("bucket")
|
||||
obj := create()
|
||||
err := obj.MakeBucket("bucket")
|
||||
c.Assert(err, check.IsNil)
|
||||
for i := 0; i < 10; i++ {
|
||||
randomPerm := rand.Perm(10)
|
||||
@ -133,40 +133,40 @@ func testMultipleObjectCreation(c *check.C, create func() ObjectAPI) {
|
||||
objects[key] = []byte(randomString)
|
||||
metadata := make(map[string]string)
|
||||
metadata["md5Sum"] = expectedMD5Sumhex
|
||||
objInfo, err := fs.PutObject("bucket", key, int64(len(randomString)), bytes.NewBufferString(randomString), metadata)
|
||||
md5Sum, err := obj.PutObject("bucket", key, int64(len(randomString)), bytes.NewBufferString(randomString), metadata)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(objInfo.MD5Sum, check.Equals, expectedMD5Sumhex)
|
||||
c.Assert(md5Sum, check.Equals, expectedMD5Sumhex)
|
||||
}
|
||||
|
||||
for key, value := range objects {
|
||||
var byteBuffer bytes.Buffer
|
||||
r, err := fs.GetObject("bucket", key, 0)
|
||||
r, err := obj.GetObject("bucket", key, 0)
|
||||
c.Assert(err, check.IsNil)
|
||||
_, e := io.Copy(&byteBuffer, r)
|
||||
c.Assert(e, check.IsNil)
|
||||
c.Assert(byteBuffer.Bytes(), check.DeepEquals, value)
|
||||
c.Assert(r.Close(), check.IsNil)
|
||||
|
||||
objInfo, err := fs.GetObjectInfo("bucket", key)
|
||||
objInfo, err := obj.GetObjectInfo("bucket", key)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(objInfo.Size, check.Equals, int64(len(value)))
|
||||
r.Close()
|
||||
}
|
||||
}
|
||||
|
||||
func testPaging(c *check.C, create func() ObjectAPI) {
|
||||
fs := create()
|
||||
fs.MakeBucket("bucket")
|
||||
result, err := fs.ListObjects("bucket", "", "", "", 0)
|
||||
func testPaging(c *check.C, create func() *objectAPI) {
|
||||
obj := create()
|
||||
obj.MakeBucket("bucket")
|
||||
result, err := obj.ListObjects("bucket", "", "", "", 0)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(len(result.Objects), check.Equals, 0)
|
||||
c.Assert(result.IsTruncated, check.Equals, false)
|
||||
// check before paging occurs
|
||||
for i := 0; i < 5; i++ {
|
||||
key := "obj" + strconv.Itoa(i)
|
||||
_, err = fs.PutObject("bucket", key, int64(len(key)), bytes.NewBufferString(key), nil)
|
||||
_, err = obj.PutObject("bucket", key, int64(len(key)), bytes.NewBufferString(key), nil)
|
||||
c.Assert(err, check.IsNil)
|
||||
result, err = fs.ListObjects("bucket", "", "", "", 5)
|
||||
result, err = obj.ListObjects("bucket", "", "", "", 5)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(len(result.Objects), check.Equals, i+1)
|
||||
c.Assert(result.IsTruncated, check.Equals, false)
|
||||
@ -174,27 +174,27 @@ func testPaging(c *check.C, create func() ObjectAPI) {
|
||||
// check after paging occurs pages work
|
||||
for i := 6; i <= 10; i++ {
|
||||
key := "obj" + strconv.Itoa(i)
|
||||
_, err = fs.PutObject("bucket", key, int64(len(key)), bytes.NewBufferString(key), nil)
|
||||
_, err = obj.PutObject("bucket", key, int64(len(key)), bytes.NewBufferString(key), nil)
|
||||
c.Assert(err, check.IsNil)
|
||||
result, err = fs.ListObjects("bucket", "obj", "", "", 5)
|
||||
result, err = obj.ListObjects("bucket", "obj", "", "", 5)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(len(result.Objects), check.Equals, 5)
|
||||
c.Assert(result.IsTruncated, check.Equals, true)
|
||||
}
|
||||
// check paging with prefix at end returns less objects
|
||||
{
|
||||
_, err = fs.PutObject("bucket", "newPrefix", int64(len("prefix1")), bytes.NewBufferString("prefix1"), nil)
|
||||
_, err = obj.PutObject("bucket", "newPrefix", int64(len("prefix1")), bytes.NewBufferString("prefix1"), nil)
|
||||
c.Assert(err, check.IsNil)
|
||||
_, err = fs.PutObject("bucket", "newPrefix2", int64(len("prefix2")), bytes.NewBufferString("prefix2"), nil)
|
||||
_, err = obj.PutObject("bucket", "newPrefix2", int64(len("prefix2")), bytes.NewBufferString("prefix2"), nil)
|
||||
c.Assert(err, check.IsNil)
|
||||
result, err = fs.ListObjects("bucket", "new", "", "", 5)
|
||||
result, err = obj.ListObjects("bucket", "new", "", "", 5)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(len(result.Objects), check.Equals, 2)
|
||||
}
|
||||
|
||||
// check ordering of pages
|
||||
{
|
||||
result, err = fs.ListObjects("bucket", "", "", "", 1000)
|
||||
result, err = obj.ListObjects("bucket", "", "", "", 1000)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(result.Objects[0].Name, check.Equals, "newPrefix")
|
||||
c.Assert(result.Objects[1].Name, check.Equals, "newPrefix2")
|
||||
@ -205,11 +205,11 @@ func testPaging(c *check.C, create func() ObjectAPI) {
|
||||
|
||||
// check delimited results with delimiter and prefix
|
||||
{
|
||||
_, err = fs.PutObject("bucket", "this/is/delimited", int64(len("prefix1")), bytes.NewBufferString("prefix1"), nil)
|
||||
_, err = obj.PutObject("bucket", "this/is/delimited", int64(len("prefix1")), bytes.NewBufferString("prefix1"), nil)
|
||||
c.Assert(err, check.IsNil)
|
||||
_, err = fs.PutObject("bucket", "this/is/also/a/delimited/file", int64(len("prefix2")), bytes.NewBufferString("prefix2"), nil)
|
||||
_, err = obj.PutObject("bucket", "this/is/also/a/delimited/file", int64(len("prefix2")), bytes.NewBufferString("prefix2"), nil)
|
||||
c.Assert(err, check.IsNil)
|
||||
result, err = fs.ListObjects("bucket", "this/is/", "", "/", 10)
|
||||
result, err = obj.ListObjects("bucket", "this/is/", "", "/", 10)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(len(result.Objects), check.Equals, 1)
|
||||
c.Assert(result.Prefixes[0], check.Equals, "this/is/also/")
|
||||
@ -217,7 +217,7 @@ func testPaging(c *check.C, create func() ObjectAPI) {
|
||||
|
||||
// check delimited results with delimiter without prefix
|
||||
{
|
||||
result, err = fs.ListObjects("bucket", "", "", "/", 1000)
|
||||
result, err = obj.ListObjects("bucket", "", "", "/", 1000)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(result.Objects[0].Name, check.Equals, "newPrefix")
|
||||
c.Assert(result.Objects[1].Name, check.Equals, "newPrefix2")
|
||||
@ -229,7 +229,7 @@ func testPaging(c *check.C, create func() ObjectAPI) {
|
||||
|
||||
// check results with Marker
|
||||
{
|
||||
result, err = fs.ListObjects("bucket", "", "newPrefix", "", 3)
|
||||
result, err = obj.ListObjects("bucket", "", "newPrefix", "", 3)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(result.Objects[0].Name, check.Equals, "newPrefix2")
|
||||
c.Assert(result.Objects[1].Name, check.Equals, "obj0")
|
||||
@ -237,7 +237,7 @@ func testPaging(c *check.C, create func() ObjectAPI) {
|
||||
}
|
||||
// check ordering of results with prefix
|
||||
{
|
||||
result, err = fs.ListObjects("bucket", "obj", "", "", 1000)
|
||||
result, err = obj.ListObjects("bucket", "obj", "", "", 1000)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(result.Objects[0].Name, check.Equals, "obj0")
|
||||
c.Assert(result.Objects[1].Name, check.Equals, "obj1")
|
||||
@ -247,27 +247,27 @@ func testPaging(c *check.C, create func() ObjectAPI) {
|
||||
}
|
||||
// check ordering of results with prefix and no paging
|
||||
{
|
||||
result, err = fs.ListObjects("bucket", "new", "", "", 5)
|
||||
result, err = obj.ListObjects("bucket", "new", "", "", 5)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(result.Objects[0].Name, check.Equals, "newPrefix")
|
||||
c.Assert(result.Objects[1].Name, check.Equals, "newPrefix2")
|
||||
}
|
||||
}
|
||||
|
||||
func testObjectOverwriteWorks(c *check.C, create func() ObjectAPI) {
|
||||
fs := create()
|
||||
err := fs.MakeBucket("bucket")
|
||||
func testObjectOverwriteWorks(c *check.C, create func() *objectAPI) {
|
||||
obj := create()
|
||||
err := obj.MakeBucket("bucket")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
_, err = fs.PutObject("bucket", "object", int64(len("one")), bytes.NewBufferString("one"), nil)
|
||||
_, err = obj.PutObject("bucket", "object", int64(len("one")), bytes.NewBufferString("one"), nil)
|
||||
c.Assert(err, check.IsNil)
|
||||
// c.Assert(md5Sum1hex, check.Equals, objInfo.MD5Sum)
|
||||
|
||||
_, err = fs.PutObject("bucket", "object", int64(len("three")), bytes.NewBufferString("three"), nil)
|
||||
_, err = obj.PutObject("bucket", "object", int64(len("three")), bytes.NewBufferString("three"), nil)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
var bytesBuffer bytes.Buffer
|
||||
r, err := fs.GetObject("bucket", "object", 0)
|
||||
r, err := obj.GetObject("bucket", "object", 0)
|
||||
c.Assert(err, check.IsNil)
|
||||
_, e := io.Copy(&bytesBuffer, r)
|
||||
c.Assert(e, check.IsNil)
|
||||
@ -275,30 +275,30 @@ func testObjectOverwriteWorks(c *check.C, create func() ObjectAPI) {
|
||||
c.Assert(r.Close(), check.IsNil)
|
||||
}
|
||||
|
||||
func testNonExistantBucketOperations(c *check.C, create func() ObjectAPI) {
|
||||
fs := create()
|
||||
_, err := fs.PutObject("bucket", "object", int64(len("one")), bytes.NewBufferString("one"), nil)
|
||||
func testNonExistantBucketOperations(c *check.C, create func() *objectAPI) {
|
||||
obj := create()
|
||||
_, err := obj.PutObject("bucket", "object", int64(len("one")), bytes.NewBufferString("one"), nil)
|
||||
c.Assert(err, check.Not(check.IsNil))
|
||||
}
|
||||
|
||||
func testBucketRecreateFails(c *check.C, create func() ObjectAPI) {
|
||||
fs := create()
|
||||
err := fs.MakeBucket("string")
|
||||
func testBucketRecreateFails(c *check.C, create func() *objectAPI) {
|
||||
obj := create()
|
||||
err := obj.MakeBucket("string")
|
||||
c.Assert(err, check.IsNil)
|
||||
err = fs.MakeBucket("string")
|
||||
err = obj.MakeBucket("string")
|
||||
c.Assert(err, check.Not(check.IsNil))
|
||||
}
|
||||
|
||||
func testPutObjectInSubdir(c *check.C, create func() ObjectAPI) {
|
||||
fs := create()
|
||||
err := fs.MakeBucket("bucket")
|
||||
func testPutObjectInSubdir(c *check.C, create func() *objectAPI) {
|
||||
obj := create()
|
||||
err := obj.MakeBucket("bucket")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
_, err = fs.PutObject("bucket", "dir1/dir2/object", int64(len("hello world")), bytes.NewBufferString("hello world"), nil)
|
||||
_, err = obj.PutObject("bucket", "dir1/dir2/object", int64(len("hello world")), bytes.NewBufferString("hello world"), nil)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
var bytesBuffer bytes.Buffer
|
||||
r, err := fs.GetObject("bucket", "dir1/dir2/object", 0)
|
||||
r, err := obj.GetObject("bucket", "dir1/dir2/object", 0)
|
||||
c.Assert(err, check.IsNil)
|
||||
n, e := io.Copy(&bytesBuffer, r)
|
||||
c.Assert(e, check.IsNil)
|
||||
@ -307,49 +307,49 @@ func testPutObjectInSubdir(c *check.C, create func() ObjectAPI) {
|
||||
c.Assert(r.Close(), check.IsNil)
|
||||
}
|
||||
|
||||
func testListBuckets(c *check.C, create func() ObjectAPI) {
|
||||
fs := create()
|
||||
func testListBuckets(c *check.C, create func() *objectAPI) {
|
||||
obj := create()
|
||||
|
||||
// test empty list
|
||||
buckets, err := fs.ListBuckets()
|
||||
buckets, err := obj.ListBuckets()
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(len(buckets), check.Equals, 0)
|
||||
|
||||
// add one and test exists
|
||||
err = fs.MakeBucket("bucket1")
|
||||
err = obj.MakeBucket("bucket1")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
buckets, err = fs.ListBuckets()
|
||||
buckets, err = obj.ListBuckets()
|
||||
c.Assert(len(buckets), check.Equals, 1)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
// add two and test exists
|
||||
err = fs.MakeBucket("bucket2")
|
||||
err = obj.MakeBucket("bucket2")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
buckets, err = fs.ListBuckets()
|
||||
buckets, err = obj.ListBuckets()
|
||||
c.Assert(len(buckets), check.Equals, 2)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
// add three and test exists + prefix
|
||||
err = fs.MakeBucket("bucket22")
|
||||
err = obj.MakeBucket("bucket22")
|
||||
|
||||
buckets, err = fs.ListBuckets()
|
||||
buckets, err = obj.ListBuckets()
|
||||
c.Assert(len(buckets), check.Equals, 3)
|
||||
c.Assert(err, check.IsNil)
|
||||
}
|
||||
|
||||
func testListBucketsOrder(c *check.C, create func() ObjectAPI) {
|
||||
func testListBucketsOrder(c *check.C, create func() *objectAPI) {
|
||||
// if implementation contains a map, order of map keys will vary.
|
||||
// this ensures they return in the same order each time
|
||||
for i := 0; i < 10; i++ {
|
||||
fs := create()
|
||||
obj := create()
|
||||
// add one and test exists
|
||||
err := fs.MakeBucket("bucket1")
|
||||
err := obj.MakeBucket("bucket1")
|
||||
c.Assert(err, check.IsNil)
|
||||
err = fs.MakeBucket("bucket2")
|
||||
err = obj.MakeBucket("bucket2")
|
||||
c.Assert(err, check.IsNil)
|
||||
buckets, err := fs.ListBuckets()
|
||||
buckets, err := obj.ListBuckets()
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(len(buckets), check.Equals, 2)
|
||||
c.Assert(buckets[0].Name, check.Equals, "bucket1")
|
||||
@ -357,20 +357,20 @@ func testListBucketsOrder(c *check.C, create func() ObjectAPI) {
|
||||
}
|
||||
}
|
||||
|
||||
func testListObjectsTestsForNonExistantBucket(c *check.C, create func() ObjectAPI) {
|
||||
fs := create()
|
||||
result, err := fs.ListObjects("bucket", "", "", "", 1000)
|
||||
func testListObjectsTestsForNonExistantBucket(c *check.C, create func() *objectAPI) {
|
||||
obj := create()
|
||||
result, err := obj.ListObjects("bucket", "", "", "", 1000)
|
||||
c.Assert(err, check.Not(check.IsNil))
|
||||
c.Assert(result.IsTruncated, check.Equals, false)
|
||||
c.Assert(len(result.Objects), check.Equals, 0)
|
||||
}
|
||||
|
||||
func testNonExistantObjectInBucket(c *check.C, create func() ObjectAPI) {
|
||||
fs := create()
|
||||
err := fs.MakeBucket("bucket")
|
||||
func testNonExistantObjectInBucket(c *check.C, create func() *objectAPI) {
|
||||
obj := create()
|
||||
err := obj.MakeBucket("bucket")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
_, err = fs.GetObject("bucket", "dir1", 0)
|
||||
_, err = obj.GetObject("bucket", "dir1", 0)
|
||||
c.Assert(err, check.Not(check.IsNil))
|
||||
switch err := err.ToGoError().(type) {
|
||||
case ObjectNotFound:
|
||||
@ -380,44 +380,44 @@ func testNonExistantObjectInBucket(c *check.C, create func() ObjectAPI) {
|
||||
}
|
||||
}
|
||||
|
||||
func testGetDirectoryReturnsObjectNotFound(c *check.C, create func() ObjectAPI) {
|
||||
fs := create()
|
||||
err := fs.MakeBucket("bucket")
|
||||
func testGetDirectoryReturnsObjectNotFound(c *check.C, create func() *objectAPI) {
|
||||
obj := create()
|
||||
err := obj.MakeBucket("bucket")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
_, err = fs.PutObject("bucket", "dir1/dir2/object", int64(len("hello world")), bytes.NewBufferString("hello world"), nil)
|
||||
_, err = obj.PutObject("bucket", "dir1/dir2/object", int64(len("hello world")), bytes.NewBufferString("hello world"), nil)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
_, err = fs.GetObject("bucket", "dir1", 0)
|
||||
_, err = obj.GetObject("bucket", "dir1", 0)
|
||||
switch err := err.ToGoError().(type) {
|
||||
case ObjectExistsAsPrefix:
|
||||
case ObjectNotFound:
|
||||
c.Assert(err.Bucket, check.Equals, "bucket")
|
||||
c.Assert(err.Prefix, check.Equals, "dir1")
|
||||
c.Assert(err.Object, check.Equals, "dir1")
|
||||
default:
|
||||
// force a failure with a line number
|
||||
c.Assert(err.Error(), check.Equals, "Object exists on : bucket as prefix dir1")
|
||||
c.Assert(err, check.Equals, "ObjectNotFound")
|
||||
}
|
||||
|
||||
_, err = fs.GetObject("bucket", "dir1/", 0)
|
||||
_, err = obj.GetObject("bucket", "dir1/", 0)
|
||||
switch err := err.ToGoError().(type) {
|
||||
case ObjectExistsAsPrefix:
|
||||
case ObjectNotFound:
|
||||
c.Assert(err.Bucket, check.Equals, "bucket")
|
||||
c.Assert(err.Prefix, check.Equals, "dir1/")
|
||||
c.Assert(err.Object, check.Equals, "dir1/")
|
||||
default:
|
||||
// force a failure with a line number
|
||||
c.Assert(err.Error(), check.Equals, "Object exists on : bucket as prefix dir1")
|
||||
c.Assert(err, check.Equals, "ObjectNotFound")
|
||||
}
|
||||
}
|
||||
|
||||
func testDefaultContentType(c *check.C, create func() ObjectAPI) {
|
||||
fs := create()
|
||||
err := fs.MakeBucket("bucket")
|
||||
func testDefaultContentType(c *check.C, create func() *objectAPI) {
|
||||
obj := create()
|
||||
err := obj.MakeBucket("bucket")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
// Test empty
|
||||
_, err = fs.PutObject("bucket", "one", int64(len("one")), bytes.NewBufferString("one"), nil)
|
||||
_, err = obj.PutObject("bucket", "one", int64(len("one")), bytes.NewBufferString("one"), nil)
|
||||
c.Assert(err, check.IsNil)
|
||||
objInfo, err := fs.GetObjectInfo("bucket", "one")
|
||||
objInfo, err := obj.GetObjectInfo("bucket", "one")
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(objInfo.ContentType, check.Equals, "application/octet-stream")
|
||||
}
|
31
routers.go
31
routers.go
@ -18,19 +18,39 @@ package main
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"os"
|
||||
|
||||
router "github.com/gorilla/mux"
|
||||
"github.com/minio/minio/pkg/probe"
|
||||
)
|
||||
|
||||
// configureServer handler returns final handler for the http server.
|
||||
func configureServerHandler(objectAPI ObjectAPI) http.Handler {
|
||||
func configureServerHandler(srvCmdConfig serverCmdConfig) http.Handler {
|
||||
var storageHandlers StorageAPI
|
||||
if len(srvCmdConfig.exportPaths) == 1 {
|
||||
// Verify if export path is a local file system path.
|
||||
st, e := os.Stat(srvCmdConfig.exportPaths[0])
|
||||
if e == nil && st.Mode().IsDir() {
|
||||
// Initialize storage API.
|
||||
storageHandlers, e = newFS(srvCmdConfig.exportPaths[0])
|
||||
fatalIf(probe.NewError(e), "Initializing fs failed.", nil)
|
||||
} else {
|
||||
// Initialize storage API.
|
||||
storageHandlers, e = newNetworkFS(srvCmdConfig.exportPaths[0])
|
||||
fatalIf(probe.NewError(e), "Initializing network fs failed.", nil)
|
||||
}
|
||||
} // else if - XL part.
|
||||
|
||||
// Initialize object layer.
|
||||
objectAPI := newObjectLayer(storageHandlers)
|
||||
|
||||
// Initialize API.
|
||||
api := objectStorageAPI{
|
||||
apiHandlers := objectAPIHandlers{
|
||||
ObjectAPI: objectAPI,
|
||||
}
|
||||
|
||||
// Initialize Web.
|
||||
web := &webAPI{
|
||||
webHandlers := &webAPIHandlers{
|
||||
ObjectAPI: objectAPI,
|
||||
}
|
||||
|
||||
@ -38,8 +58,9 @@ func configureServerHandler(objectAPI ObjectAPI) http.Handler {
|
||||
mux := router.NewRouter()
|
||||
|
||||
// Register all routers.
|
||||
registerWebRouter(mux, web)
|
||||
registerAPIRouter(mux, api)
|
||||
registerStorageRPCRouter(mux, storageHandlers)
|
||||
registerWebRouter(mux, webHandlers)
|
||||
registerAPIRouter(mux, apiHandlers)
|
||||
// Add new routers here.
|
||||
|
||||
// List of some generic handlers which are applied for all
|
||||
|
@ -69,12 +69,17 @@ EXAMPLES:
|
||||
`,
|
||||
}
|
||||
|
||||
type serverCmdConfig struct {
|
||||
serverAddr string
|
||||
exportPaths []string
|
||||
}
|
||||
|
||||
// configureServer configure a new server instance
|
||||
func configureServer(serverAddr string, objectAPI ObjectAPI) *http.Server {
|
||||
func configureServer(srvCmdConfig serverCmdConfig) *http.Server {
|
||||
// Minio server config
|
||||
apiServer := &http.Server{
|
||||
Addr: serverAddr,
|
||||
Handler: configureServerHandler(objectAPI),
|
||||
Addr: srvCmdConfig.serverAddr,
|
||||
Handler: configureServerHandler(srvCmdConfig),
|
||||
MaxHeaderBytes: 1 << 20,
|
||||
}
|
||||
|
||||
@ -148,7 +153,7 @@ func initServerConfig(c *cli.Context) {
|
||||
|
||||
// Check server arguments.
|
||||
func checkServerSyntax(c *cli.Context) {
|
||||
if c.Args().First() == "help" {
|
||||
if !c.Args().Present() && c.Args().First() == "help" {
|
||||
cli.ShowCommandHelpAndExit(c, "server", 1)
|
||||
}
|
||||
if len(c.Args()) > 2 {
|
||||
@ -255,25 +260,17 @@ func serverMain(c *cli.Context) {
|
||||
}
|
||||
}
|
||||
|
||||
// Check configured ports.
|
||||
// Check if requested port is available.
|
||||
checkPortAvailability(getPort(net.JoinHostPort(host, port)))
|
||||
|
||||
var objectAPI ObjectAPI
|
||||
var err *probe.Error
|
||||
|
||||
// Set backend FS type.
|
||||
fsPath := strings.TrimSpace(c.Args().Get(0))
|
||||
if fsPath != "" {
|
||||
// Last argument is always a file system path, verify if it exists and is accessible.
|
||||
_, e := os.Stat(fsPath)
|
||||
fatalIf(probe.NewError(e), "Unable to validate the path", nil)
|
||||
// Initialize filesystem storage layer.
|
||||
objectAPI, err = newFS(fsPath)
|
||||
fatalIf(err.Trace(fsPath), "Initializing filesystem failed.", nil)
|
||||
}
|
||||
// Save all command line args as export paths.
|
||||
exportPaths := c.Args()
|
||||
|
||||
// Configure server.
|
||||
apiServer := configureServer(serverAddress, objectAPI)
|
||||
apiServer := configureServer(serverCmdConfig{
|
||||
serverAddr: serverAddress,
|
||||
exportPaths: exportPaths,
|
||||
})
|
||||
|
||||
// Credential.
|
||||
cred := serverConfig.GetCredential()
|
||||
@ -304,6 +301,6 @@ func serverMain(c *cli.Context) {
|
||||
}
|
||||
|
||||
// Start server.
|
||||
err = minhttp.ListenAndServe(apiServer)
|
||||
err := minhttp.ListenAndServe(apiServer)
|
||||
errorIf(err.Trace(), "Failed to start the minio server.", nil)
|
||||
}
|
||||
|
@ -96,10 +96,10 @@ func (s *MyAPISuite) SetUpSuite(c *C) {
|
||||
// Save config.
|
||||
c.Assert(serverConfig.Save(), IsNil)
|
||||
|
||||
fs, err := newFS(fsroot)
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
apiServer := configureServer(addr, fs)
|
||||
apiServer := configureServer(serverCmdConfig{
|
||||
serverAddr: addr,
|
||||
exportPaths: []string{fsroot},
|
||||
})
|
||||
testAPIFSCacheServer = httptest.NewServer(apiServer.Handler)
|
||||
}
|
||||
|
||||
@ -1172,6 +1172,7 @@ func (s *MyAPISuite) TestValidateObjectMultipartUploadID(c *C) {
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
response, err = client.Do(request)
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(response.StatusCode, Equals, http.StatusOK)
|
||||
|
||||
decoder := xml.NewDecoder(response.Body)
|
@ -27,7 +27,7 @@ type StorageAPI interface {
|
||||
DeleteVol(volume string) (err error)
|
||||
|
||||
// File operations.
|
||||
ListFiles(volume, prefix, marker string, recursive bool, count int) (files []FileInfo, isEOF bool, err error)
|
||||
ListFiles(volume, prefix, marker string, recursive bool, count int) (files []FileInfo, eof bool, err error)
|
||||
ReadFile(volume string, path string, offset int64) (readCloser io.ReadCloser, err error)
|
||||
CreateFile(volume string, path string) (writeCloser io.WriteCloser, err error)
|
||||
StatFile(volume string, path string) (file FileInfo, err error)
|
||||
|
@ -1,35 +0,0 @@
|
||||
/*
|
||||
* Minio Cloud Storage, (C) 2016 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
)
|
||||
|
||||
// isDirEmpty - returns whether given directory is empty or not.
|
||||
func isDirEmpty(dirname string) (status bool, err error) {
|
||||
f, err := os.Open(dirname)
|
||||
if err == nil {
|
||||
defer f.Close()
|
||||
if _, err = f.Readdirnames(1); err == io.EOF {
|
||||
status = true
|
||||
err = nil
|
||||
}
|
||||
}
|
||||
return status, err
|
||||
}
|
@ -1,19 +1,3 @@
|
||||
/*
|
||||
* Minio Cloud Storage, (C) 2016 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
@ -21,17 +5,17 @@ import (
|
||||
"time"
|
||||
)
|
||||
|
||||
// VolInfo - volume info
|
||||
type VolInfo struct {
|
||||
Name string
|
||||
Created time.Time
|
||||
}
|
||||
|
||||
// FileInfo - file stat information.
|
||||
type FileInfo struct {
|
||||
Volume string
|
||||
Name string
|
||||
ModTime time.Time
|
||||
Size int64
|
||||
Type os.FileMode
|
||||
}
|
||||
|
||||
// VolInfo - volume info
|
||||
type VolInfo struct {
|
||||
Name string
|
||||
Created time.Time
|
||||
Mode os.FileMode
|
||||
}
|
||||
|
45
storage-errors.go
Normal file
45
storage-errors.go
Normal file
@ -0,0 +1,45 @@
|
||||
/*
|
||||
* Minio Cloud Storage, (C) 2015, 2016 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import "errors"
|
||||
|
||||
// errDiskPathFull - cannot create volume or files when disk is full.
|
||||
var errDiskPathFull = errors.New("Disk path full.")
|
||||
|
||||
// errFileNotFound - cannot find the file.
|
||||
var errFileNotFound = errors.New("File not found.")
|
||||
|
||||
// errVolumeExists - cannot create same volume again.
|
||||
var errVolumeExists = errors.New("Volume already exists.")
|
||||
|
||||
// errIsNotRegular - not a regular file type.
|
||||
var errIsNotRegular = errors.New("Not a regular file type.")
|
||||
|
||||
// errVolumeNotFound - cannot find the volume.
|
||||
var errVolumeNotFound = errors.New("Volume not found.")
|
||||
|
||||
// errVolumeNotEmpty - volume not empty.
|
||||
var errVolumeNotEmpty = errors.New("Volume is not empty.")
|
||||
|
||||
// errVolumeAccessDenied - cannot access volume, insufficient
|
||||
// permissions.
|
||||
var errVolumeAccessDenied = errors.New("Volume access denied.")
|
||||
|
||||
// errVolumeAccessDenied - cannot access file, insufficient
|
||||
// permissions.
|
||||
var errFileAccessDenied = errors.New("File access denied.")
|
273
storage-local.go
273
storage-local.go
@ -1,273 +0,0 @@
|
||||
/*
|
||||
* Minio Cloud Storage, (C) 2016 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"syscall"
|
||||
|
||||
"github.com/minio/minio/pkg/disk"
|
||||
"github.com/minio/minio/pkg/safe"
|
||||
)
|
||||
|
||||
// ErrDiskPathFull - cannot create volume or files when disk is full.
|
||||
var ErrDiskPathFull = errors.New("Disk path full.")
|
||||
|
||||
// ErrVolumeExists - cannot create same volume again.
|
||||
var ErrVolumeExists = errors.New("Volume already exists.")
|
||||
|
||||
// ErrIsNotRegular - is not a regular file type.
|
||||
var ErrIsNotRegular = errors.New("Not a regular file type.")
|
||||
|
||||
// localStorage implements StorageAPI on top of provided diskPath.
|
||||
type localStorage struct {
|
||||
diskPath string
|
||||
fsInfo disk.Info
|
||||
minFreeDisk int64
|
||||
}
|
||||
|
||||
// Initialize a new local storage.
|
||||
func newLocalStorage(diskPath string) (StorageAPI, error) {
|
||||
if diskPath == "" {
|
||||
return nil, errInvalidArgument
|
||||
}
|
||||
st, e := os.Stat(diskPath)
|
||||
if e != nil {
|
||||
return nil, e
|
||||
}
|
||||
if !st.IsDir() {
|
||||
return nil, syscall.ENOTDIR
|
||||
}
|
||||
|
||||
info, e := disk.GetInfo(diskPath)
|
||||
if e != nil {
|
||||
return nil, e
|
||||
}
|
||||
disk := localStorage{
|
||||
diskPath: diskPath,
|
||||
fsInfo: info,
|
||||
minFreeDisk: 5, // Minimum 5% disk should be free.
|
||||
}
|
||||
return disk, nil
|
||||
}
|
||||
|
||||
// Make a volume entry.
|
||||
func (s localStorage) MakeVol(volume string) error {
|
||||
if e := checkDiskFree(s.diskPath, s.minFreeDisk); e != nil {
|
||||
return e
|
||||
}
|
||||
volumeDir := getVolumeDir(s.diskPath, volume)
|
||||
if _, e := os.Stat(volumeDir); e == nil {
|
||||
return ErrVolumeExists
|
||||
}
|
||||
|
||||
// Make a volume entry.
|
||||
if e := os.Mkdir(volumeDir, 0700); e != nil {
|
||||
return e
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// removeDuplicateVols - remove duplicate volumes.
|
||||
func removeDuplicateVols(vols []VolInfo) []VolInfo {
|
||||
length := len(vols) - 1
|
||||
for i := 0; i < length; i++ {
|
||||
for j := i + 1; j <= length; j++ {
|
||||
if vols[i].Name == vols[j].Name {
|
||||
// Pick the latest volume from a duplicate entry.
|
||||
if vols[i].Created.Sub(vols[j].Created) > 0 {
|
||||
vols[i] = vols[length]
|
||||
} else {
|
||||
vols[j] = vols[length]
|
||||
}
|
||||
vols = vols[0:length]
|
||||
length--
|
||||
j--
|
||||
}
|
||||
}
|
||||
}
|
||||
return vols
|
||||
}
|
||||
|
||||
// ListVols - list volumes.
|
||||
func (s localStorage) ListVols() ([]VolInfo, error) {
|
||||
files, e := ioutil.ReadDir(s.diskPath)
|
||||
if e != nil {
|
||||
return nil, e
|
||||
}
|
||||
var volsInfo []VolInfo
|
||||
for _, file := range files {
|
||||
if !file.IsDir() {
|
||||
// If not directory, ignore all file types.
|
||||
continue
|
||||
}
|
||||
volInfo := VolInfo{
|
||||
Name: file.Name(),
|
||||
Created: file.ModTime(),
|
||||
}
|
||||
volsInfo = append(volsInfo, volInfo)
|
||||
}
|
||||
// Remove duplicated volume entries.
|
||||
volsInfo = removeDuplicateVols(volsInfo)
|
||||
return volsInfo, nil
|
||||
}
|
||||
|
||||
// getVolumeDir - will convert incoming volume names to
|
||||
// corresponding valid volume names on the backend in a platform
|
||||
// compatible way for all operating systems.
|
||||
func getVolumeDir(diskPath, volume string) string {
|
||||
volumes, e := ioutil.ReadDir(diskPath)
|
||||
if e != nil {
|
||||
return volume
|
||||
}
|
||||
for _, vol := range volumes {
|
||||
// Verify if lowercase version of the volume
|
||||
// is equal to the incoming volume, then use the proper name.
|
||||
if strings.ToLower(vol.Name()) == volume {
|
||||
return filepath.Join(diskPath, vol.Name())
|
||||
}
|
||||
}
|
||||
return filepath.Join(diskPath, volume)
|
||||
}
|
||||
|
||||
// StatVol - get volume info.
|
||||
func (s localStorage) StatVol(volume string) (VolInfo, error) {
|
||||
volumeDir := getVolumeDir(s.diskPath, volume)
|
||||
// Stat a volume entry.
|
||||
st, e := os.Stat(volumeDir)
|
||||
if e != nil {
|
||||
return VolInfo{}, e
|
||||
}
|
||||
volInfo := VolInfo{}
|
||||
volInfo.Name = st.Name()
|
||||
volInfo.Created = st.ModTime()
|
||||
return volInfo, nil
|
||||
}
|
||||
|
||||
// DeleteVol - delete a volume.
|
||||
func (s localStorage) DeleteVol(volume string) error {
|
||||
return os.Remove(getVolumeDir(s.diskPath, volume))
|
||||
}
|
||||
|
||||
/// File operations.
|
||||
|
||||
// ListFiles - list files are prefix and marker.
|
||||
func (s localStorage) ListFiles(volume, prefix, marker string, recursive bool, count int) (files []FileInfo, isEOF bool, err error) {
|
||||
// TODO
|
||||
return files, true, nil
|
||||
}
|
||||
|
||||
// ReadFile - read a file at a given offset.
|
||||
func (s localStorage) ReadFile(volume string, path string, offset int64) (io.ReadCloser, error) {
|
||||
filePath := filepath.Join(getVolumeDir(s.diskPath, volume), path)
|
||||
file, e := os.Open(filePath)
|
||||
if e != nil {
|
||||
return nil, e
|
||||
}
|
||||
st, e := file.Stat()
|
||||
if e != nil {
|
||||
return nil, e
|
||||
}
|
||||
// Verify if its not a regular file, since subsequent Seek is undefined.
|
||||
if !st.Mode().IsRegular() {
|
||||
return nil, ErrIsNotRegular
|
||||
}
|
||||
_, e = file.Seek(offset, os.SEEK_SET)
|
||||
if e != nil {
|
||||
return nil, e
|
||||
}
|
||||
return file, nil
|
||||
}
|
||||
|
||||
// CreateFile - create a file at path.
|
||||
func (s localStorage) CreateFile(volume, path string) (writeCloser io.WriteCloser, err error) {
|
||||
if e := checkDiskFree(s.diskPath, s.minFreeDisk); e != nil {
|
||||
return nil, e
|
||||
}
|
||||
filePath := filepath.Join(getVolumeDir(s.diskPath, volume), path)
|
||||
// Creates a safe file.
|
||||
return safe.CreateFileWithPrefix(filePath, "$tmpfile")
|
||||
}
|
||||
|
||||
// StatFile - get file info.
|
||||
func (s localStorage) StatFile(volume, path string) (file FileInfo, err error) {
|
||||
filePath := filepath.Join(getVolumeDir(s.diskPath, volume), path)
|
||||
st, e := os.Stat(filePath)
|
||||
if e != nil {
|
||||
return FileInfo{}, e
|
||||
}
|
||||
file = FileInfo{
|
||||
Volume: volume,
|
||||
Name: st.Name(),
|
||||
ModTime: st.ModTime(),
|
||||
Size: st.Size(),
|
||||
Type: st.Mode(),
|
||||
}
|
||||
return file, nil
|
||||
}
|
||||
|
||||
// deleteFile - delete file path if its empty.
|
||||
func deleteFile(basePath, deletePath, volume, path string) error {
|
||||
if basePath == deletePath {
|
||||
return nil
|
||||
}
|
||||
// Verify if the path exists.
|
||||
pathSt, e := os.Stat(deletePath)
|
||||
if e != nil {
|
||||
return e
|
||||
}
|
||||
if pathSt.IsDir() {
|
||||
// Verify if directory is empty.
|
||||
empty, e := isDirEmpty(deletePath)
|
||||
if e != nil {
|
||||
return e
|
||||
}
|
||||
if !empty {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
// Attempt to remove path.
|
||||
if e := os.Remove(deletePath); e != nil {
|
||||
return e
|
||||
}
|
||||
// Recursively go down the next path and delete again.
|
||||
if e := deleteFile(basePath, filepath.Dir(deletePath), volume, path); e != nil {
|
||||
return e
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeleteFile - delete a file at path.
|
||||
func (s localStorage) DeleteFile(volume, path string) error {
|
||||
volumeDir := getVolumeDir(s.diskPath, volume)
|
||||
|
||||
// Following code is needed so that we retain "/" suffix if any
|
||||
// in path argument. Do not use filepath.Join() since it would
|
||||
// strip off any suffixes.
|
||||
filePath := s.diskPath + string(os.PathSeparator) + volume + string(os.PathSeparator) + path
|
||||
|
||||
// Convert to platform friendly paths.
|
||||
filePath = filepath.FromSlash(filePath)
|
||||
|
||||
// Delete file and delete parent directory as well if its empty.
|
||||
return deleteFile(volumeDir, filePath, volume, path)
|
||||
}
|
55
storage-rpc-datatypes.go
Normal file
55
storage-rpc-datatypes.go
Normal file
@ -0,0 +1,55 @@
|
||||
/*
|
||||
* Minio Cloud Storage, (C) 2016 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
// GenericReply generic rpc reply.
|
||||
type GenericReply struct{}
|
||||
|
||||
// GenericArgs generic rpc args.
|
||||
type GenericArgs struct{}
|
||||
|
||||
// ListVolsReply list vols rpc reply.
|
||||
type ListVolsReply struct {
|
||||
Vols []VolInfo
|
||||
}
|
||||
|
||||
// ListFilesArgs list file args.
|
||||
type ListFilesArgs struct {
|
||||
Vol string
|
||||
Prefix string
|
||||
Marker string
|
||||
Recursive bool
|
||||
Count int
|
||||
}
|
||||
|
||||
// ListFilesReply list file reply.
|
||||
type ListFilesReply struct {
|
||||
Files []FileInfo
|
||||
EOF bool
|
||||
}
|
||||
|
||||
// StatFileArgs stat file args.
|
||||
type StatFileArgs struct {
|
||||
Vol string
|
||||
Path string
|
||||
}
|
||||
|
||||
// DeleteFileArgs delete file args.
|
||||
type DeleteFileArgs struct {
|
||||
Vol string
|
||||
Path string
|
||||
}
|
149
storage-rpc-server.go
Normal file
149
storage-rpc-server.go
Normal file
@ -0,0 +1,149 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"io"
|
||||
"net/http"
|
||||
"net/rpc"
|
||||
"strconv"
|
||||
|
||||
router "github.com/gorilla/mux"
|
||||
)
|
||||
|
||||
// Storage server implements rpc primitives to facilitate exporting a
|
||||
// disk over a network.
|
||||
type storageServer struct {
|
||||
storage StorageAPI
|
||||
}
|
||||
|
||||
/// Volume operations handlers
|
||||
|
||||
// MakeVolHandler - make vol handler is rpc wrapper for MakeVol operation.
|
||||
func (s *storageServer) MakeVolHandler(arg *string, reply *GenericReply) error {
|
||||
return s.storage.MakeVol(*arg)
|
||||
}
|
||||
|
||||
// ListVolsHandler - list vols handler is rpc wrapper for ListVols operation.
|
||||
func (s *storageServer) ListVolsHandler(arg *string, reply *ListVolsReply) error {
|
||||
vols, err := s.storage.ListVols()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
reply.Vols = vols
|
||||
return nil
|
||||
}
|
||||
|
||||
// StatVolHandler - stat vol handler is a rpc wrapper for StatVol operation.
|
||||
func (s *storageServer) StatVolHandler(arg *string, reply *VolInfo) error {
|
||||
volInfo, err := s.storage.StatVol(*arg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*reply = volInfo
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeleteVolHandler - delete vol handler is a rpc wrapper for
|
||||
// DeleteVol operation.
|
||||
func (s *storageServer) DeleteVolHandler(arg *string, reply *GenericReply) error {
|
||||
return s.storage.DeleteVol(*arg)
|
||||
}
|
||||
|
||||
/// File operations
|
||||
|
||||
// ListFilesHandler - list files handler.
|
||||
func (s *storageServer) ListFilesHandler(arg *ListFilesArgs, reply *ListFilesReply) error {
|
||||
files, eof, err := s.storage.ListFiles(arg.Vol, arg.Prefix, arg.Marker, arg.Recursive, arg.Count)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Fill reply structure.
|
||||
reply.Files = files
|
||||
reply.EOF = eof
|
||||
|
||||
// Return success.
|
||||
return nil
|
||||
}
|
||||
|
||||
// StatFileHandler - stat file handler is rpc wrapper to stat file.
|
||||
func (s *storageServer) StatFileHandler(arg *StatFileArgs, reply *FileInfo) error {
|
||||
fileInfo, err := s.storage.StatFile(arg.Vol, arg.Path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*reply = fileInfo
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeleteFileHandler - delete file handler is rpc wrapper to delete file.
|
||||
func (s *storageServer) DeleteFileHandler(arg *DeleteFileArgs, reply *GenericReply) error {
|
||||
return s.storage.DeleteFile(arg.Vol, arg.Path)
|
||||
}
|
||||
|
||||
// registerStorageRPCRouter - register storage rpc router.
|
||||
func registerStorageRPCRouter(mux *router.Router, storageAPI StorageAPI) {
|
||||
stServer := &storageServer{
|
||||
storage: storageAPI,
|
||||
}
|
||||
storageRPCServer := rpc.NewServer()
|
||||
storageRPCServer.RegisterName("Storage", stServer)
|
||||
storageRouter := mux.NewRoute().PathPrefix(reservedBucket).Subrouter()
|
||||
// Add minio storage routes.
|
||||
storageRouter.Path("/rpc/storage").Handler(storageRPCServer)
|
||||
// StreamUpload - stream upload handler.
|
||||
storageRouter.Methods("POST").Path("/rpc/storage/upload/{volume}/{path:.+}").HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
vars := router.Vars(r)
|
||||
volume := vars["volume"]
|
||||
path := vars["path"]
|
||||
writeCloser, err := stServer.storage.CreateFile(volume, path)
|
||||
if err != nil {
|
||||
httpErr := http.StatusInternalServerError
|
||||
if err == errVolumeNotFound {
|
||||
httpErr = http.StatusNotFound
|
||||
} else if err == errIsNotRegular {
|
||||
httpErr = http.StatusConflict
|
||||
}
|
||||
http.Error(w, err.Error(), httpErr)
|
||||
return
|
||||
}
|
||||
reader := r.Body
|
||||
if _, err = io.Copy(writeCloser, reader); err != nil {
|
||||
safeCloseAndRemove(writeCloser)
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
writeCloser.Close()
|
||||
reader.Close()
|
||||
})
|
||||
// StreamDownloadHandler - stream download handler.
|
||||
storageRouter.Methods("GET").Path("/rpc/storage/download/{volume}/{path:.+}").Queries("offset", "{offset:.*}").HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
vars := router.Vars(r)
|
||||
volume := vars["volume"]
|
||||
path := vars["path"]
|
||||
offset, err := strconv.ParseInt(r.URL.Query().Get("offset"), 10, 64)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
readCloser, err := stServer.storage.ReadFile(volume, path, offset)
|
||||
if err != nil {
|
||||
httpErr := http.StatusBadRequest
|
||||
if err == errVolumeNotFound {
|
||||
httpErr = http.StatusNotFound
|
||||
} else if err == errFileNotFound {
|
||||
httpErr = http.StatusNotFound
|
||||
}
|
||||
http.Error(w, err.Error(), httpErr)
|
||||
return
|
||||
}
|
||||
|
||||
// Copy reader to writer.
|
||||
io.Copy(w, readCloser)
|
||||
|
||||
// Flush out any remaining buffers to client.
|
||||
w.(http.Flusher).Flush()
|
||||
|
||||
// Close the reader.
|
||||
readCloser.Close()
|
||||
})
|
||||
}
|
@ -71,7 +71,7 @@ type ServerInfoRep struct {
|
||||
}
|
||||
|
||||
// ServerInfo - get server info.
|
||||
func (web *webAPI) ServerInfo(r *http.Request, args *WebGenericArgs, reply *ServerInfoRep) error {
|
||||
func (web *webAPIHandlers) ServerInfo(r *http.Request, args *WebGenericArgs, reply *ServerInfoRep) error {
|
||||
if !isJWTReqAuthenticated(r) {
|
||||
return &json2.Error{Message: "Unauthorized request"}
|
||||
}
|
||||
@ -106,16 +106,17 @@ type DiskInfoRep struct {
|
||||
}
|
||||
|
||||
// DiskInfo - get disk statistics.
|
||||
func (web *webAPI) DiskInfo(r *http.Request, args *WebGenericArgs, reply *DiskInfoRep) error {
|
||||
if !isJWTReqAuthenticated(r) {
|
||||
return &json2.Error{Message: "Unauthorized request"}
|
||||
}
|
||||
info, e := disk.GetInfo(web.ObjectAPI.(*Filesystem).GetRootPath())
|
||||
if e != nil {
|
||||
return &json2.Error{Message: e.Error()}
|
||||
}
|
||||
reply.DiskInfo = info
|
||||
reply.UIVersion = miniobrowser.UIVersion
|
||||
func (web *webAPIHandlers) DiskInfo(r *http.Request, args *WebGenericArgs, reply *DiskInfoRep) error {
|
||||
// FIXME: bring in StatFS in StorageAPI interface and uncomment the below lines.
|
||||
// if !isJWTReqAuthenticated(r) {
|
||||
// return &json2.Error{Message: "Unauthorized request"}
|
||||
// }
|
||||
// info, e := disk.GetInfo(web.ObjectAPI.(*Filesystem).GetRootPath())
|
||||
// if e != nil {
|
||||
// return &json2.Error{Message: e.Error()}
|
||||
// }
|
||||
// reply.DiskInfo = info
|
||||
// reply.UIVersion = miniobrowser.UIVersion
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -125,7 +126,7 @@ type MakeBucketArgs struct {
|
||||
}
|
||||
|
||||
// MakeBucket - make a bucket.
|
||||
func (web *webAPI) MakeBucket(r *http.Request, args *MakeBucketArgs, reply *WebGenericRep) error {
|
||||
func (web *webAPIHandlers) MakeBucket(r *http.Request, args *MakeBucketArgs, reply *WebGenericRep) error {
|
||||
if !isJWTReqAuthenticated(r) {
|
||||
return &json2.Error{Message: "Unauthorized request"}
|
||||
}
|
||||
@ -152,7 +153,7 @@ type WebBucketInfo struct {
|
||||
}
|
||||
|
||||
// ListBuckets - list buckets api.
|
||||
func (web *webAPI) ListBuckets(r *http.Request, args *WebGenericArgs, reply *ListBucketsRep) error {
|
||||
func (web *webAPIHandlers) ListBuckets(r *http.Request, args *WebGenericArgs, reply *ListBucketsRep) error {
|
||||
if !isJWTReqAuthenticated(r) {
|
||||
return &json2.Error{Message: "Unauthorized request"}
|
||||
}
|
||||
@ -198,7 +199,7 @@ type WebObjectInfo struct {
|
||||
}
|
||||
|
||||
// ListObjects - list objects api.
|
||||
func (web *webAPI) ListObjects(r *http.Request, args *ListObjectsArgs, reply *ListObjectsRep) error {
|
||||
func (web *webAPIHandlers) ListObjects(r *http.Request, args *ListObjectsArgs, reply *ListObjectsRep) error {
|
||||
marker := ""
|
||||
if !isJWTReqAuthenticated(r) {
|
||||
return &json2.Error{Message: "Unauthorized request"}
|
||||
@ -212,7 +213,7 @@ func (web *webAPI) ListObjects(r *http.Request, args *ListObjectsArgs, reply *Li
|
||||
for _, obj := range lo.Objects {
|
||||
reply.Objects = append(reply.Objects, WebObjectInfo{
|
||||
Key: obj.Name,
|
||||
LastModified: obj.ModifiedTime,
|
||||
LastModified: obj.ModTime,
|
||||
Size: obj.Size,
|
||||
})
|
||||
}
|
||||
@ -237,7 +238,7 @@ type RemoveObjectArgs struct {
|
||||
}
|
||||
|
||||
// RemoveObject - removes an object.
|
||||
func (web *webAPI) RemoveObject(r *http.Request, args *RemoveObjectArgs, reply *WebGenericRep) error {
|
||||
func (web *webAPIHandlers) RemoveObject(r *http.Request, args *RemoveObjectArgs, reply *WebGenericRep) error {
|
||||
if !isJWTReqAuthenticated(r) {
|
||||
return &json2.Error{Message: "Unauthorized request"}
|
||||
}
|
||||
@ -262,7 +263,7 @@ type LoginRep struct {
|
||||
}
|
||||
|
||||
// Login - user login handler.
|
||||
func (web *webAPI) Login(r *http.Request, args *LoginArgs, reply *LoginRep) error {
|
||||
func (web *webAPIHandlers) Login(r *http.Request, args *LoginArgs, reply *LoginRep) error {
|
||||
jwt := initJWT()
|
||||
if jwt.Authenticate(args.Username, args.Password) {
|
||||
token, err := jwt.GenerateToken(args.Username)
|
||||
@ -283,7 +284,7 @@ type GenerateAuthReply struct {
|
||||
UIVersion string `json:"uiVersion"`
|
||||
}
|
||||
|
||||
func (web webAPI) GenerateAuth(r *http.Request, args *WebGenericArgs, reply *GenerateAuthReply) error {
|
||||
func (web webAPIHandlers) GenerateAuth(r *http.Request, args *WebGenericArgs, reply *GenerateAuthReply) error {
|
||||
if !isJWTReqAuthenticated(r) {
|
||||
return &json2.Error{Message: "Unauthorized request"}
|
||||
}
|
||||
@ -307,7 +308,7 @@ type SetAuthReply struct {
|
||||
}
|
||||
|
||||
// SetAuth - Set accessKey and secretKey credentials.
|
||||
func (web *webAPI) SetAuth(r *http.Request, args *SetAuthArgs, reply *SetAuthReply) error {
|
||||
func (web *webAPIHandlers) SetAuth(r *http.Request, args *SetAuthArgs, reply *SetAuthReply) error {
|
||||
if !isJWTReqAuthenticated(r) {
|
||||
return &json2.Error{Message: "Unauthorized request"}
|
||||
}
|
||||
@ -344,7 +345,7 @@ type GetAuthReply struct {
|
||||
}
|
||||
|
||||
// GetAuth - return accessKey and secretKey credentials.
|
||||
func (web *webAPI) GetAuth(r *http.Request, args *WebGenericArgs, reply *GetAuthReply) error {
|
||||
func (web *webAPIHandlers) GetAuth(r *http.Request, args *WebGenericArgs, reply *GetAuthReply) error {
|
||||
if !isJWTReqAuthenticated(r) {
|
||||
return &json2.Error{Message: "Unauthorized request"}
|
||||
}
|
||||
@ -356,7 +357,7 @@ func (web *webAPI) GetAuth(r *http.Request, args *WebGenericArgs, reply *GetAuth
|
||||
}
|
||||
|
||||
// Upload - file upload handler.
|
||||
func (web *webAPI) Upload(w http.ResponseWriter, r *http.Request) {
|
||||
func (web *webAPIHandlers) Upload(w http.ResponseWriter, r *http.Request) {
|
||||
if !isJWTReqAuthenticated(r) {
|
||||
writeWebErrorResponse(w, errInvalidToken)
|
||||
return
|
||||
@ -370,7 +371,7 @@ func (web *webAPI) Upload(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
// Download - file download handler.
|
||||
func (web *webAPI) Download(w http.ResponseWriter, r *http.Request) {
|
||||
func (web *webAPIHandlers) Download(w http.ResponseWriter, r *http.Request) {
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
object := vars["object"]
|
||||
|
@ -29,8 +29,8 @@ import (
|
||||
)
|
||||
|
||||
// webAPI container for Web API.
|
||||
type webAPI struct {
|
||||
ObjectAPI ObjectAPI
|
||||
type webAPIHandlers struct {
|
||||
ObjectAPI *objectAPI
|
||||
}
|
||||
|
||||
// indexHandler - Handler to serve index.html
|
||||
@ -58,7 +58,7 @@ func assetFS() *assetfs.AssetFS {
|
||||
const specialAssets = "loader.css|logo.svg|firefox.png|safari.png|chrome.png|favicon.ico"
|
||||
|
||||
// registerWebRouter - registers web router for serving minio browser.
|
||||
func registerWebRouter(mux *router.Router, web *webAPI) {
|
||||
func registerWebRouter(mux *router.Router, web *webAPIHandlers) {
|
||||
// Initialize a new json2 codec.
|
||||
codec := json2.NewCodec()
|
||||
|
||||
@ -74,7 +74,7 @@ func registerWebRouter(mux *router.Router, web *webAPI) {
|
||||
// RPC handler at URI - /minio/webrpc
|
||||
webBrowserRouter.Methods("POST").Path("/webrpc").Handler(webRPC)
|
||||
webBrowserRouter.Methods("PUT").Path("/upload/{bucket}/{object:.+}").HandlerFunc(web.Upload)
|
||||
webBrowserRouter.Methods("GET").Path("/download/{bucket}/{object:.+}").Queries("token", "").HandlerFunc(web.Download)
|
||||
webBrowserRouter.Methods("GET").Path("/download/{bucket}/{object:.+}").Queries("token", "{token:.*}").HandlerFunc(web.Download)
|
||||
|
||||
// Add compression for assets.
|
||||
compressedAssets := handlers.CompressHandler(http.StripPrefix(reservedBucket, http.FileServer(assetFS())))
|
||||
|
Loading…
Reference in New Issue
Block a user