mirror of
https://github.com/minio/minio.git
synced 2025-01-24 05:03:16 -05:00
Merge pull request #1290 from balamurugana/devel
Refactor multipart upload
This commit is contained in:
commit
06e3171076
@ -359,7 +359,12 @@ func generateListMultipartUploadsResponse(bucket string, multipartsInfo ListMult
|
||||
listMultipartUploadsResponse.MaxUploads = multipartsInfo.MaxUploads
|
||||
listMultipartUploadsResponse.NextUploadIDMarker = multipartsInfo.NextUploadIDMarker
|
||||
listMultipartUploadsResponse.UploadIDMarker = multipartsInfo.UploadIDMarker
|
||||
|
||||
listMultipartUploadsResponse.CommonPrefixes = make([]CommonPrefix, len(multipartsInfo.CommonPrefixes))
|
||||
for index, commonPrefix := range multipartsInfo.CommonPrefixes {
|
||||
listMultipartUploadsResponse.CommonPrefixes[index] = CommonPrefix{
|
||||
Prefix: commonPrefix,
|
||||
}
|
||||
}
|
||||
listMultipartUploadsResponse.Uploads = make([]Upload, len(multipartsInfo.Uploads))
|
||||
for index, upload := range multipartsInfo.Uploads {
|
||||
newUpload := Upload{}
|
||||
|
@ -179,8 +179,22 @@ func (api objectStorageAPI) ListMultipartUploadsHandler(w http.ResponseWriter, r
|
||||
writeErrorResponse(w, r, ErrInvalidMaxUploads, r.URL.Path)
|
||||
return
|
||||
}
|
||||
if maxUploads == 0 {
|
||||
maxUploads = maxObjectList
|
||||
if keyMarker != "" {
|
||||
// Unescape keyMarker string
|
||||
keyMarkerUnescaped, e := url.QueryUnescape(keyMarker)
|
||||
if e != nil {
|
||||
if e != nil {
|
||||
// Return 'NoSuchKey' to indicate invalid marker key.
|
||||
writeErrorResponse(w, r, ErrNoSuchKey, r.URL.Path)
|
||||
return
|
||||
}
|
||||
keyMarker = keyMarkerUnescaped
|
||||
// Marker not common with prefix is not implemented.
|
||||
if !strings.HasPrefix(keyMarker, prefix) {
|
||||
writeErrorResponse(w, r, ErrNotImplemented, r.URL.Path)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
listMultipartsInfo, err := api.ObjectAPI.ListMultipartUploads(bucket, prefix, keyMarker, uploadIDMarker, delimiter, maxUploads)
|
||||
|
@ -1,56 +0,0 @@
|
||||
/*
|
||||
* Minio Cloud Storage, (C) 2015, 2016 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"github.com/minio/minio/pkg/probe"
|
||||
"github.com/minio/minio/pkg/quick"
|
||||
)
|
||||
|
||||
var multipartsMetadataPath string
|
||||
|
||||
// SetFSMultipartsMetadataPath - set custom multiparts session metadata path.
|
||||
func setFSMultipartsMetadataPath(metadataPath string) {
|
||||
multipartsMetadataPath = metadataPath
|
||||
}
|
||||
|
||||
// saveMultipartsSession - save multiparts.
|
||||
func saveMultipartsSession(mparts multiparts) *probe.Error {
|
||||
qc, err := quick.New(mparts)
|
||||
if err != nil {
|
||||
return err.Trace()
|
||||
}
|
||||
if err := qc.Save(multipartsMetadataPath); err != nil {
|
||||
return err.Trace()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// loadMultipartsSession load multipart session file.
|
||||
func loadMultipartsSession() (*multiparts, *probe.Error) {
|
||||
mparts := &multiparts{}
|
||||
mparts.Version = "1"
|
||||
mparts.ActiveSession = make(map[string]*multipartSession)
|
||||
qc, err := quick.New(mparts)
|
||||
if err != nil {
|
||||
return nil, err.Trace()
|
||||
}
|
||||
if err := qc.Load(multipartsMetadataPath); err != nil {
|
||||
return nil, err.Trace()
|
||||
}
|
||||
return qc.Data().(*multiparts), nil
|
||||
}
|
@ -31,12 +31,15 @@ const (
|
||||
)
|
||||
|
||||
// isDirExist - returns whether given directory is exist or not.
|
||||
func isDirExist(dirname string) (status bool, err error) {
|
||||
fi, err := os.Lstat(dirname)
|
||||
if err == nil {
|
||||
status = fi.IsDir()
|
||||
func isDirExist(dirname string) (bool, error) {
|
||||
fi, e := os.Lstat(dirname)
|
||||
if e != nil {
|
||||
if os.IsNotExist(e) {
|
||||
return false, nil
|
||||
}
|
||||
return false, e
|
||||
}
|
||||
return
|
||||
return fi.IsDir(), nil
|
||||
}
|
||||
|
||||
func (fs *Filesystem) saveTreeWalk(params listObjectParams, walker *treeWalker) {
|
||||
@ -124,9 +127,8 @@ func (fs Filesystem) ListObjects(bucket, prefix, marker, delimiter string, maxKe
|
||||
// Verify if prefix exists.
|
||||
prefixDir := filepath.Dir(filepath.FromSlash(prefix))
|
||||
rootDir := filepath.Join(bucketDir, prefixDir)
|
||||
_, e := isDirExist(rootDir)
|
||||
if e != nil {
|
||||
if os.IsNotExist(e) {
|
||||
if status, e := isDirExist(rootDir); !status {
|
||||
if e == nil {
|
||||
// Prefix does not exist, not an error just respond empty
|
||||
// list response.
|
||||
return result, nil
|
||||
|
395
fs-multipart-dir.go
Normal file
395
fs-multipart-dir.go
Normal file
@ -0,0 +1,395 @@
|
||||
/*
|
||||
* Minio Cloud Storage, (C) 2016 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// DirEntry - directory entry
|
||||
type DirEntry struct {
|
||||
Name string
|
||||
Size int64
|
||||
Mode os.FileMode
|
||||
ModTime time.Time
|
||||
}
|
||||
|
||||
// IsDir - returns true if DirEntry is a directory
|
||||
func (entry DirEntry) IsDir() bool {
|
||||
return entry.Mode.IsDir()
|
||||
}
|
||||
|
||||
// IsSymlink - returns true if DirEntry is a symbolic link
|
||||
func (entry DirEntry) IsSymlink() bool {
|
||||
return entry.Mode&os.ModeSymlink == os.ModeSymlink
|
||||
}
|
||||
|
||||
// IsRegular - returns true if DirEntry is a regular file
|
||||
func (entry DirEntry) IsRegular() bool {
|
||||
return entry.Mode.IsRegular()
|
||||
}
|
||||
|
||||
// sort interface for DirEntry slice
|
||||
type byEntryName []DirEntry
|
||||
|
||||
func (f byEntryName) Len() int { return len(f) }
|
||||
func (f byEntryName) Swap(i, j int) { f[i], f[j] = f[j], f[i] }
|
||||
func (f byEntryName) Less(i, j int) bool { return f[i].Name < f[j].Name }
|
||||
|
||||
func filteredReaddir(dirname string, filter func(DirEntry) bool, appendPath bool) ([]DirEntry, error) {
|
||||
result := []DirEntry{}
|
||||
|
||||
d, err := os.Open(dirname)
|
||||
if err != nil {
|
||||
return result, err
|
||||
}
|
||||
|
||||
defer d.Close()
|
||||
|
||||
for {
|
||||
fis, err := d.Readdir(1000)
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
|
||||
return result, err
|
||||
}
|
||||
|
||||
for _, fi := range fis {
|
||||
name := fi.Name()
|
||||
if appendPath {
|
||||
name = filepath.Join(dirname, name)
|
||||
}
|
||||
|
||||
if fi.IsDir() {
|
||||
name += string(os.PathSeparator)
|
||||
}
|
||||
|
||||
entry := DirEntry{Name: name, Size: fi.Size(), Mode: fi.Mode(), ModTime: fi.ModTime()}
|
||||
|
||||
if filter == nil || filter(entry) {
|
||||
result = append(result, entry)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
sort.Sort(byEntryName(result))
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func filteredReaddirnames(dirname string, filter func(string) bool) ([]string, error) {
|
||||
result := []string{}
|
||||
d, err := os.Open(dirname)
|
||||
if err != nil {
|
||||
return result, err
|
||||
}
|
||||
|
||||
defer d.Close()
|
||||
|
||||
for {
|
||||
names, err := d.Readdirnames(1000)
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
|
||||
return result, err
|
||||
}
|
||||
|
||||
for _, name := range names {
|
||||
if filter == nil || filter(name) {
|
||||
result = append(result, name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
sort.Strings(result)
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func scanMultipartDir(bucketDir, prefixPath, markerPath, uploadIDMarker string, recursive bool) multipartObjectInfoChannel {
|
||||
objectInfoCh := make(chan multipartObjectInfo, listObjectsLimit)
|
||||
timeoutCh := make(chan struct{}, 1)
|
||||
|
||||
// TODO: check if bucketDir is absolute path
|
||||
scanDir := bucketDir
|
||||
dirDepth := bucketDir
|
||||
|
||||
if prefixPath != "" {
|
||||
if !filepath.IsAbs(prefixPath) {
|
||||
tmpPrefixPath := filepath.Join(bucketDir, prefixPath)
|
||||
if strings.HasSuffix(prefixPath, string(os.PathSeparator)) {
|
||||
tmpPrefixPath += string(os.PathSeparator)
|
||||
}
|
||||
prefixPath = tmpPrefixPath
|
||||
}
|
||||
|
||||
// TODO: check if prefixPath starts with bucketDir
|
||||
|
||||
// Case #1: if prefixPath is /mnt/mys3/mybucket/2012/photos/paris, then
|
||||
// dirDepth is /mnt/mys3/mybucket/2012/photos
|
||||
// Case #2: if prefixPath is /mnt/mys3/mybucket/2012/photos/, then
|
||||
// dirDepth is /mnt/mys3/mybucket/2012/photos
|
||||
dirDepth = filepath.Dir(prefixPath)
|
||||
scanDir = dirDepth
|
||||
} else {
|
||||
prefixPath = bucketDir
|
||||
}
|
||||
|
||||
if markerPath != "" {
|
||||
if !filepath.IsAbs(markerPath) {
|
||||
tmpMarkerPath := filepath.Join(bucketDir, markerPath)
|
||||
if strings.HasSuffix(markerPath, string(os.PathSeparator)) {
|
||||
tmpMarkerPath += string(os.PathSeparator)
|
||||
}
|
||||
|
||||
markerPath = tmpMarkerPath
|
||||
}
|
||||
|
||||
// TODO: check markerPath must be a file
|
||||
if uploadIDMarker != "" {
|
||||
markerPath = filepath.Join(markerPath, uploadIDMarker+uploadIDSuffix)
|
||||
}
|
||||
|
||||
// TODO: check if markerPath starts with bucketDir
|
||||
// TODO: check if markerPath starts with prefixPath
|
||||
|
||||
// Case #1: if markerPath is /mnt/mys3/mybucket/2012/photos/gophercon.png, then
|
||||
// scanDir is /mnt/mys3/mybucket/2012/photos
|
||||
// Case #2: if markerPath is /mnt/mys3/mybucket/2012/photos/gophercon.png/1fbd117a-268a-4ed0-85c9-8cc3888cbf20.uploadid, then
|
||||
// scanDir is /mnt/mys3/mybucket/2012/photos/gophercon.png
|
||||
// Case #3: if markerPath is /mnt/mys3/mybucket/2012/photos/, then
|
||||
// scanDir is /mnt/mys3/mybucket/2012/photos
|
||||
|
||||
scanDir = filepath.Dir(markerPath)
|
||||
} else {
|
||||
markerPath = bucketDir
|
||||
}
|
||||
|
||||
// Have bucketDir ends with os.PathSeparator
|
||||
if !strings.HasSuffix(bucketDir, string(os.PathSeparator)) {
|
||||
bucketDir += string(os.PathSeparator)
|
||||
}
|
||||
|
||||
// Remove os.PathSeparator if scanDir ends with
|
||||
if strings.HasSuffix(scanDir, string(os.PathSeparator)) {
|
||||
scanDir = filepath.Dir(scanDir)
|
||||
}
|
||||
|
||||
// goroutine - retrieves directory entries, makes ObjectInfo and sends into the channel.
|
||||
go func() {
|
||||
defer close(objectInfoCh)
|
||||
defer close(timeoutCh)
|
||||
|
||||
// send function - returns true if ObjectInfo is sent
|
||||
// within (time.Second * 15) else false on timeout.
|
||||
send := func(oi multipartObjectInfo) bool {
|
||||
timer := time.After(time.Second * 15)
|
||||
select {
|
||||
case objectInfoCh <- oi:
|
||||
return true
|
||||
case <-timer:
|
||||
timeoutCh <- struct{}{}
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
for {
|
||||
entries, err := filteredReaddir(scanDir,
|
||||
func(entry DirEntry) bool {
|
||||
if entry.IsDir() || (entry.IsRegular() && strings.HasSuffix(entry.Name, uploadIDSuffix)) {
|
||||
return strings.HasPrefix(entry.Name, prefixPath) && entry.Name > markerPath
|
||||
}
|
||||
|
||||
return false
|
||||
},
|
||||
true)
|
||||
if err != nil {
|
||||
send(multipartObjectInfo{Err: err})
|
||||
return
|
||||
}
|
||||
|
||||
var entry DirEntry
|
||||
for len(entries) > 0 {
|
||||
entry, entries = entries[0], entries[1:]
|
||||
|
||||
if entry.IsRegular() {
|
||||
// Handle uploadid file
|
||||
name := strings.Replace(filepath.Dir(entry.Name), bucketDir, "", 1)
|
||||
if name == "" {
|
||||
// This should not happen ie uploadid file should not be in bucket directory
|
||||
send(multipartObjectInfo{Err: errors.New("corrupted meta data")})
|
||||
return
|
||||
}
|
||||
|
||||
uploadID := strings.Split(filepath.Base(entry.Name), uploadIDSuffix)[0]
|
||||
|
||||
objInfo := multipartObjectInfo{
|
||||
Name: name,
|
||||
UploadID: uploadID,
|
||||
ModifiedTime: entry.ModTime,
|
||||
}
|
||||
|
||||
if !send(objInfo) {
|
||||
return
|
||||
}
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
subentries, err := filteredReaddir(entry.Name,
|
||||
func(entry DirEntry) bool {
|
||||
return entry.IsDir() || (entry.IsRegular() && strings.HasSuffix(entry.Name, uploadIDSuffix))
|
||||
},
|
||||
true)
|
||||
if err != nil {
|
||||
send(multipartObjectInfo{Err: err})
|
||||
return
|
||||
}
|
||||
|
||||
subDirFound := false
|
||||
uploadIDEntries := []DirEntry{}
|
||||
// If subentries has a directory, then current entry needs to be sent
|
||||
for _, subentry := range subentries {
|
||||
if subentry.IsDir() {
|
||||
subDirFound = true
|
||||
|
||||
if recursive {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !recursive && subentry.IsRegular() {
|
||||
uploadIDEntries = append(uploadIDEntries, subentry)
|
||||
}
|
||||
}
|
||||
|
||||
if subDirFound || len(subentries) == 0 {
|
||||
objInfo := multipartObjectInfo{
|
||||
Name: strings.Replace(entry.Name, bucketDir, "", 1),
|
||||
ModifiedTime: entry.ModTime,
|
||||
IsDir: true,
|
||||
}
|
||||
|
||||
if !send(objInfo) {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if recursive {
|
||||
entries = append(subentries, entries...)
|
||||
} else {
|
||||
entries = append(uploadIDEntries, entries...)
|
||||
}
|
||||
}
|
||||
|
||||
if !recursive {
|
||||
break
|
||||
}
|
||||
|
||||
markerPath = scanDir + string(os.PathSeparator)
|
||||
|
||||
if scanDir = filepath.Dir(scanDir); scanDir < dirDepth {
|
||||
break
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return multipartObjectInfoChannel{ch: objectInfoCh, timeoutCh: timeoutCh}
|
||||
}
|
||||
|
||||
// multipartObjectInfo - Multipart object info
|
||||
type multipartObjectInfo struct {
|
||||
Name string
|
||||
UploadID string
|
||||
ModifiedTime time.Time
|
||||
IsDir bool
|
||||
Err error
|
||||
}
|
||||
|
||||
// multipartObjectInfoChannel - multipart object info channel
|
||||
type multipartObjectInfoChannel struct {
|
||||
ch <-chan multipartObjectInfo
|
||||
objInfo *multipartObjectInfo
|
||||
closed bool
|
||||
timeoutCh <-chan struct{}
|
||||
timedOut bool
|
||||
}
|
||||
|
||||
func (oic *multipartObjectInfoChannel) Read() (multipartObjectInfo, bool) {
|
||||
if oic.closed {
|
||||
return multipartObjectInfo{}, false
|
||||
}
|
||||
|
||||
if oic.objInfo == nil {
|
||||
// First read.
|
||||
if oi, ok := <-oic.ch; ok {
|
||||
oic.objInfo = &oi
|
||||
} else {
|
||||
oic.closed = true
|
||||
return multipartObjectInfo{}, false
|
||||
}
|
||||
}
|
||||
|
||||
retObjInfo := *oic.objInfo
|
||||
status := true
|
||||
oic.objInfo = nil
|
||||
|
||||
// Read once more to know whether it was last read.
|
||||
if oi, ok := <-oic.ch; ok {
|
||||
oic.objInfo = &oi
|
||||
} else {
|
||||
oic.closed = true
|
||||
}
|
||||
|
||||
return retObjInfo, status
|
||||
}
|
||||
|
||||
// IsClosed - return whether channel is closed or not.
|
||||
func (oic multipartObjectInfoChannel) IsClosed() bool {
|
||||
if oic.objInfo != nil {
|
||||
return false
|
||||
}
|
||||
return oic.closed
|
||||
}
|
||||
|
||||
// IsTimedOut - return whether channel is closed due to timeout.
|
||||
func (oic multipartObjectInfoChannel) IsTimedOut() bool {
|
||||
if oic.timedOut {
|
||||
return true
|
||||
}
|
||||
|
||||
select {
|
||||
case _, ok := <-oic.timeoutCh:
|
||||
if ok {
|
||||
oic.timedOut = true
|
||||
return true
|
||||
}
|
||||
return false
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
1064
fs-multipart.go
1064
fs-multipart.go
File diff suppressed because it is too large
Load Diff
64
fs.go
64
fs.go
@ -17,10 +17,7 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/minio/minio/pkg/probe"
|
||||
)
|
||||
@ -33,58 +30,32 @@ type listObjectParams struct {
|
||||
prefix string
|
||||
}
|
||||
|
||||
// listMultipartObjectParams - list multipart object params used for list multipart object map
|
||||
type listMultipartObjectParams struct {
|
||||
bucket string
|
||||
delimiter string
|
||||
keyMarker string
|
||||
prefix string
|
||||
uploadIDMarker string
|
||||
}
|
||||
|
||||
// Filesystem - local variables
|
||||
type Filesystem struct {
|
||||
path string
|
||||
minFreeDisk int64
|
||||
rwLock *sync.RWMutex
|
||||
multiparts *multiparts
|
||||
listObjectMap map[listObjectParams][]*treeWalker
|
||||
listObjectMapMutex *sync.Mutex
|
||||
}
|
||||
|
||||
// MultipartSession holds active session information
|
||||
type multipartSession struct {
|
||||
TotalParts int
|
||||
ObjectName string
|
||||
UploadID string
|
||||
Initiated time.Time
|
||||
Parts []partInfo
|
||||
}
|
||||
|
||||
// multiparts collection of many parts
|
||||
type multiparts struct {
|
||||
Version string `json:"version"`
|
||||
ActiveSession map[string]*multipartSession `json:"activeSessions"`
|
||||
path string
|
||||
minFreeDisk int64
|
||||
rwLock *sync.RWMutex
|
||||
listObjectMap map[listObjectParams][]*treeWalker
|
||||
listObjectMapMutex *sync.Mutex
|
||||
listMultipartObjectMap map[listMultipartObjectParams][]multipartObjectInfoChannel
|
||||
listMultipartObjectMapMutex *sync.Mutex
|
||||
}
|
||||
|
||||
// newFS instantiate a new filesystem.
|
||||
func newFS(rootPath string) (ObjectAPI, *probe.Error) {
|
||||
setFSMultipartsMetadataPath(filepath.Join(rootPath, "$multiparts-session.json"))
|
||||
|
||||
var err *probe.Error
|
||||
// load multiparts session from disk
|
||||
var mparts *multiparts
|
||||
mparts, err = loadMultipartsSession()
|
||||
if err != nil {
|
||||
if os.IsNotExist(err.ToGoError()) {
|
||||
mparts = &multiparts{
|
||||
Version: "1",
|
||||
ActiveSession: make(map[string]*multipartSession),
|
||||
}
|
||||
if err = saveMultipartsSession(*mparts); err != nil {
|
||||
return nil, err.Trace()
|
||||
}
|
||||
} else {
|
||||
return nil, err.Trace()
|
||||
}
|
||||
}
|
||||
|
||||
fs := &Filesystem{
|
||||
rwLock: &sync.RWMutex{},
|
||||
}
|
||||
fs.path = rootPath
|
||||
fs.multiparts = mparts
|
||||
|
||||
/// Defaults
|
||||
|
||||
@ -94,6 +65,9 @@ func newFS(rootPath string) (ObjectAPI, *probe.Error) {
|
||||
fs.listObjectMap = make(map[listObjectParams][]*treeWalker)
|
||||
fs.listObjectMapMutex = &sync.Mutex{}
|
||||
|
||||
fs.listMultipartObjectMap = make(map[listMultipartObjectParams][]multipartObjectInfoChannel)
|
||||
fs.listMultipartObjectMapMutex = &sync.Mutex{}
|
||||
|
||||
// Return here.
|
||||
return fs, nil
|
||||
}
|
||||
|
@ -79,7 +79,7 @@ func testMultipartObjectCreation(c *check.C, create func() ObjectAPI) {
|
||||
}
|
||||
objInfo, err := fs.CompleteMultipartUpload("bucket", "key", uploadID, completedParts.Parts)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(objInfo.MD5Sum, check.Equals, "9b7d6f13ba00e24d0b02de92e814891b-10")
|
||||
c.Assert(objInfo.MD5Sum, check.Equals, "3605d84b1c43b1a664aa7c0d5082d271-10")
|
||||
}
|
||||
|
||||
func testMultipartObjectAbort(c *check.C, create func() ObjectAPI) {
|
||||
|
202
vendor/github.com/skyrings/skyring-common/LICENSE
generated
vendored
Normal file
202
vendor/github.com/skyrings/skyring-common/LICENSE
generated
vendored
Normal file
@ -0,0 +1,202 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "{}"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright {yyyy} {name of copyright owner}
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
133
vendor/github.com/skyrings/skyring-common/tools/uuid/uuid.go
generated
vendored
Normal file
133
vendor/github.com/skyrings/skyring-common/tools/uuid/uuid.go
generated
vendored
Normal file
@ -0,0 +1,133 @@
|
||||
// Copyright 2015 Red Hat, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package uuid
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// UUID is 128bits = 16bytes
|
||||
type UUID [16]byte
|
||||
|
||||
func (uuid UUID) String() string {
|
||||
return fmt.Sprintf("%x-%x-%x-%x-%x", uuid[0:4], uuid[4:6], uuid[6:8], uuid[8:10], uuid[10:])
|
||||
}
|
||||
|
||||
func (uuid UUID) IsZero() bool {
|
||||
var zeroUuid UUID
|
||||
return Equal(zeroUuid, uuid)
|
||||
}
|
||||
|
||||
func (uuid UUID) MarshalJSON() ([]byte, error) {
|
||||
return []byte(`"` + uuid.String() + `"`), nil
|
||||
}
|
||||
|
||||
func (uuid *UUID) UnmarshalJSON(b []byte) error {
|
||||
if u, err := Parse(string(b)); err != nil {
|
||||
return err
|
||||
} else {
|
||||
copy(uuid[:], u[:])
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func New() (*UUID, error) {
|
||||
uuid := new(UUID)
|
||||
|
||||
n, err := io.ReadFull(rand.Reader, uuid[:])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
} else if n != len(uuid) {
|
||||
return nil, errors.New(fmt.Sprintf("insufficient random data (expected: %d, read: %d)", len(uuid), n))
|
||||
} else {
|
||||
// variant bits; for more info
|
||||
// see https://www.ietf.org/rfc/rfc4122.txt section 4.1.1
|
||||
uuid[8] = uuid[8]&0x3f | 0x80
|
||||
// version 4 (pseudo-random); for more info
|
||||
// see https://www.ietf.org/rfc/rfc4122.txt section 4.1.3
|
||||
uuid[6] = uuid[6]&0x0f | 0x40
|
||||
}
|
||||
|
||||
return uuid, nil
|
||||
}
|
||||
|
||||
func Equal(uuid1 UUID, uuid2 UUID) bool {
|
||||
for i, v := range uuid1 {
|
||||
if v != uuid2[i] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func Parse(s string) (*UUID, error) {
|
||||
// the string format should be either in
|
||||
// xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx (or)
|
||||
// xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
|
||||
|
||||
// If the uuid is marshaled by us we add " " around the uuid.
|
||||
// while parsing this, we have to remove the " " around the
|
||||
// uuid. So we check if uuid has " " around it, if yes we remove
|
||||
// it.
|
||||
|
||||
if strings.HasPrefix(s, "\"") && strings.HasSuffix(s, "\"") {
|
||||
s = s[1 : len(s)-1]
|
||||
}
|
||||
|
||||
uuid := new(UUID)
|
||||
if len(s) == 36 {
|
||||
if ba, err := hex.DecodeString(s[0:8]); err == nil {
|
||||
copy(uuid[:4], ba)
|
||||
} else {
|
||||
return nil, err
|
||||
}
|
||||
if ba, err := hex.DecodeString(s[9:13]); err == nil {
|
||||
copy(uuid[4:], ba)
|
||||
} else {
|
||||
return nil, err
|
||||
}
|
||||
if ba, err := hex.DecodeString(s[14:18]); err == nil {
|
||||
copy(uuid[6:], ba)
|
||||
} else {
|
||||
return nil, err
|
||||
}
|
||||
if ba, err := hex.DecodeString(s[19:23]); err == nil {
|
||||
copy(uuid[8:], ba)
|
||||
} else {
|
||||
return nil, err
|
||||
}
|
||||
if ba, err := hex.DecodeString(s[24:]); err == nil {
|
||||
copy(uuid[10:], ba)
|
||||
} else {
|
||||
return nil, err
|
||||
}
|
||||
} else if len(s) == 32 {
|
||||
if ba, err := hex.DecodeString(s); err == nil {
|
||||
copy(uuid[:], ba)
|
||||
} else {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
return nil, errors.New("unknown UUID string " + s)
|
||||
}
|
||||
|
||||
return uuid, nil
|
||||
}
|
9
vendor/vendor.json
vendored
9
vendor/vendor.json
vendored
@ -107,6 +107,12 @@
|
||||
"revision": "eb527c8097e0f19a3ff7b253a3fe70545070f420",
|
||||
"revisionTime": "2015-08-29T22:34:20-07:00"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "u0hXGADM3JDza8YjgiyNJpAJk8g=",
|
||||
"path": "github.com/skyrings/skyring-common/tools/uuid",
|
||||
"revision": "762fd2bfc12e766d90478d638255981ab1966a3d",
|
||||
"revisionTime": "2016-03-24T19:44:43+05:30"
|
||||
},
|
||||
{
|
||||
"path": "golang.org/x/crypto/bcrypt",
|
||||
"revision": "7b85b097bf7527677d54d3220065e966a0e3b613",
|
||||
@ -122,5 +128,6 @@
|
||||
"revision": "11d3bc7aa68e238947792f30573146a3231fc0f1",
|
||||
"revisionTime": "2015-07-29T10:04:31+02:00"
|
||||
}
|
||||
]
|
||||
],
|
||||
"rootPath": "github.com/minio/minio"
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user