minio/pkg/fs/fs-multipart.go

570 lines
17 KiB
Go
Raw Normal View History

/*
* Minio Cloud Storage, (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package fs
import (
"crypto/md5"
"encoding/base64"
"encoding/hex"
"encoding/json"
"encoding/xml"
"errors"
"fmt"
"io"
"io/ioutil"
"math/rand"
"os"
"path/filepath"
"sort"
"strconv"
"strings"
"time"
"github.com/minio/minio-xl/pkg/atomic"
"github.com/minio/minio-xl/pkg/crypto/sha256"
"github.com/minio/minio-xl/pkg/crypto/sha512"
"github.com/minio/minio-xl/pkg/probe"
"github.com/minio/minio/pkg/contentdb"
2015-10-17 22:17:33 -04:00
"github.com/minio/minio/pkg/disk"
)
2016-02-04 23:40:58 -05:00
// isValidUploadID - is upload id.
2015-10-17 22:17:33 -04:00
func (fs Filesystem) isValidUploadID(object, uploadID string) bool {
s, ok := fs.multiparts.ActiveSession[object]
if !ok {
return false
}
if uploadID == s.UploadID {
return true
}
return false
}
// ListMultipartUploads - list incomplete multipart sessions for a given BucketMultipartResourcesMetadata
2015-10-17 22:17:33 -04:00
func (fs Filesystem) ListMultipartUploads(bucket string, resources BucketMultipartResourcesMetadata) (BucketMultipartResourcesMetadata, *probe.Error) {
2016-02-04 23:40:58 -05:00
fs.rwLock.RLock()
defer fs.rwLock.RUnlock()
// Input validation.
if !IsValidBucketName(bucket) {
return BucketMultipartResourcesMetadata{}, probe.NewError(BucketNameInvalid{Bucket: bucket})
}
bucket = fs.denormalizeBucket(bucket)
bucketPath := filepath.Join(fs.path, bucket)
if _, e := os.Stat(bucketPath); e != nil {
2016-02-04 23:40:58 -05:00
// Check bucket exists.
if os.IsNotExist(e) {
return BucketMultipartResourcesMetadata{}, probe.NewError(BucketNotFound{Bucket: bucket})
}
return BucketMultipartResourcesMetadata{}, probe.NewError(e)
}
var uploads []*UploadMetadata
for object, session := range fs.multiparts.ActiveSession {
if strings.HasPrefix(object, resources.Prefix) {
if len(uploads) > resources.MaxUploads {
sort.Sort(byUploadMetadataKey(uploads))
resources.Upload = uploads
resources.NextKeyMarker = object
resources.NextUploadIDMarker = session.UploadID
resources.IsTruncated = true
return resources, nil
}
2016-02-04 23:40:58 -05:00
// UploadIDMarker is ignored if KeyMarker is empty.
switch {
case resources.KeyMarker != "" && resources.UploadIDMarker == "":
if object > resources.KeyMarker {
upload := new(UploadMetadata)
upload.Object = object
upload.UploadID = session.UploadID
upload.Initiated = session.Initiated
uploads = append(uploads, upload)
}
case resources.KeyMarker != "" && resources.UploadIDMarker != "":
if session.UploadID > resources.UploadIDMarker {
if object >= resources.KeyMarker {
upload := new(UploadMetadata)
upload.Object = object
upload.UploadID = session.UploadID
upload.Initiated = session.Initiated
uploads = append(uploads, upload)
}
}
default:
upload := new(UploadMetadata)
upload.Object = object
upload.UploadID = session.UploadID
upload.Initiated = session.Initiated
uploads = append(uploads, upload)
}
}
}
sort.Sort(byUploadMetadataKey(uploads))
resources.Upload = uploads
return resources, nil
}
2016-02-04 23:40:58 -05:00
// concatenate parts.
2015-10-17 22:17:33 -04:00
func (fs Filesystem) concatParts(parts *CompleteMultipartUpload, objectPath string, mw io.Writer) *probe.Error {
for _, part := range parts.Part {
partFile, e := os.OpenFile(objectPath+fmt.Sprintf("$%d-$multiparts", part.PartNumber), os.O_RDONLY, 0600)
defer partFile.Close()
if e != nil {
return probe.NewError(e)
}
recvMD5 := part.ETag
2016-02-04 23:40:58 -05:00
// Complete multipart request header md5sum per part is hex
// encoded trim it and decode if possible.
if _, e = hex.DecodeString(strings.Trim(recvMD5, "\"")); e != nil {
return probe.NewError(InvalidDigest{Md5: recvMD5})
}
2016-02-04 23:40:58 -05:00
if _, e = io.Copy(mw, partFile); e != nil {
return probe.NewError(e)
}
}
return nil
}
// NewMultipartUpload - initiate a new multipart session
2015-10-17 22:17:33 -04:00
func (fs Filesystem) NewMultipartUpload(bucket, object string) (string, *probe.Error) {
2016-02-04 23:40:58 -05:00
fs.rwLock.Lock()
defer fs.rwLock.Unlock()
2015-10-17 22:17:33 -04:00
di, e := disk.GetInfo(fs.path)
if e != nil {
return "", probe.NewError(e)
2015-10-17 22:17:33 -04:00
}
2016-02-04 23:40:58 -05:00
// Remove 5% from total space for cumulative disk space used for
// journalling, inodes etc.
availableDiskSpace := (float64(di.Free) / (float64(di.Total) - (0.05 * float64(di.Total)))) * 100
if int64(availableDiskSpace) <= fs.minFreeDisk {
2015-10-17 22:17:33 -04:00
return "", probe.NewError(RootPathFull{Path: fs.path})
}
2016-02-04 23:40:58 -05:00
// Input validation.
if !IsValidBucketName(bucket) {
return "", probe.NewError(BucketNameInvalid{Bucket: bucket})
}
if !IsValidObjectName(object) {
return "", probe.NewError(ObjectNameInvalid{Object: object})
}
bucket = fs.denormalizeBucket(bucket)
bucketPath := filepath.Join(fs.path, bucket)
if _, e = os.Stat(bucketPath); e != nil {
2016-02-04 23:40:58 -05:00
// Check bucket exists.
if os.IsNotExist(e) {
return "", probe.NewError(BucketNotFound{Bucket: bucket})
}
return "", probe.NewError(e)
}
objectPath := filepath.Join(bucketPath, object)
objectDir := filepath.Dir(objectPath)
if _, e = os.Stat(objectDir); e != nil {
if !os.IsNotExist(e) {
return "", probe.NewError(e)
}
e = os.MkdirAll(objectDir, 0700)
if e != nil {
return "", probe.NewError(e)
}
}
2016-02-04 23:40:58 -05:00
// Generate new upload id.
id := []byte(strconv.FormatInt(rand.Int63(), 10) + bucket + object + time.Now().String())
uploadIDSum := sha512.Sum512(id)
uploadID := base64.URLEncoding.EncodeToString(uploadIDSum[:])[:47]
multiPartfile, e := os.OpenFile(objectPath+"$multiparts", os.O_WRONLY|os.O_CREATE, 0600)
if e != nil {
return "", probe.NewError(e)
}
defer multiPartfile.Close()
2016-02-04 23:40:58 -05:00
// Initialize multipart session.
mpartSession := &MultipartSession{}
mpartSession.TotalParts = 0
mpartSession.UploadID = uploadID
mpartSession.Initiated = time.Now().UTC()
var parts []*PartMetadata
mpartSession.Parts = parts
fs.multiparts.ActiveSession[object] = mpartSession
encoder := json.NewEncoder(multiPartfile)
if e = encoder.Encode(mpartSession); e != nil {
return "", probe.NewError(e)
}
if err := saveMultipartsSession(fs.multiparts); err != nil {
return "", err.Trace()
}
return uploadID, nil
}
2016-02-04 23:40:58 -05:00
// partNumber is a sortable interface for Part slice.
type partNumber []*PartMetadata
func (a partNumber) Len() int { return len(a) }
func (a partNumber) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a partNumber) Less(i, j int) bool { return a[i].PartNumber < a[j].PartNumber }
// CreateObjectPart - create a part in a multipart session
2015-10-17 22:17:33 -04:00
func (fs Filesystem) CreateObjectPart(bucket, object, uploadID, expectedMD5Sum string, partID int, size int64, data io.Reader, signature *Signature) (string, *probe.Error) {
2016-02-04 23:40:58 -05:00
fs.rwLock.Lock()
defer fs.rwLock.Unlock()
di, err := disk.GetInfo(fs.path)
2015-10-17 22:17:33 -04:00
if err != nil {
return "", probe.NewError(err)
}
2016-02-04 23:40:58 -05:00
// Remove 5% from total space for cumulative disk space used for
// journalling, inodes etc.
availableDiskSpace := (float64(di.Free) / (float64(di.Total) - (0.05 * float64(di.Total)))) * 100
if int64(availableDiskSpace) <= fs.minFreeDisk {
2015-10-17 22:17:33 -04:00
return "", probe.NewError(RootPathFull{Path: fs.path})
}
2016-02-04 23:40:58 -05:00
// Part id cannot be negative.
if partID <= 0 {
return "", probe.NewError(errors.New("invalid part id, cannot be zero or less than zero"))
}
2016-02-04 23:40:58 -05:00
// Check bucket name valid.
if !IsValidBucketName(bucket) {
return "", probe.NewError(BucketNameInvalid{Bucket: bucket})
}
2016-02-04 23:40:58 -05:00
// Verify object path legal.
if !IsValidObjectName(object) {
return "", probe.NewError(ObjectNameInvalid{Bucket: bucket, Object: object})
}
2016-02-04 23:40:58 -05:00
// Verify upload is valid for the incoming object.
if !fs.isValidUploadID(object, uploadID) {
return "", probe.NewError(InvalidUploadID{UploadID: uploadID})
}
if strings.TrimSpace(expectedMD5Sum) != "" {
2015-10-17 22:17:33 -04:00
var expectedMD5SumBytes []byte
expectedMD5SumBytes, err = base64.StdEncoding.DecodeString(strings.TrimSpace(expectedMD5Sum))
if err != nil {
2016-02-04 23:40:58 -05:00
// Pro-actively close the connection
return "", probe.NewError(InvalidDigest{Md5: expectedMD5Sum})
}
expectedMD5Sum = hex.EncodeToString(expectedMD5SumBytes)
}
bucket = fs.denormalizeBucket(bucket)
bucketPath := filepath.Join(fs.path, bucket)
2015-10-17 22:17:33 -04:00
if _, err = os.Stat(bucketPath); err != nil {
2016-02-04 23:40:58 -05:00
// Check bucket exists.
if os.IsNotExist(err) {
return "", probe.NewError(BucketNotFound{Bucket: bucket})
}
return "", probe.NewError(err)
}
objectPath := filepath.Join(bucketPath, object)
partPath := objectPath + fmt.Sprintf("$%d-$multiparts", partID)
partFile, e := atomic.FileCreateWithPrefix(partPath, "$multiparts")
if e != nil {
return "", probe.NewError(e)
}
h := md5.New()
sh := sha256.New()
mw := io.MultiWriter(partFile, h, sh)
if _, e = io.CopyN(mw, data, size); e != nil {
partFile.CloseAndPurge()
return "", probe.NewError(e)
}
md5sum := hex.EncodeToString(h.Sum(nil))
2016-02-04 23:40:58 -05:00
// Verify if the written object is equal to what is expected, only
// if it is requested as such.
if strings.TrimSpace(expectedMD5Sum) != "" {
if !isMD5SumEqual(strings.TrimSpace(expectedMD5Sum), md5sum) {
partFile.CloseAndPurge()
return "", probe.NewError(BadDigest{Md5: expectedMD5Sum, Bucket: bucket, Object: object})
}
}
if signature != nil {
ok, err := signature.DoesSignatureMatch(hex.EncodeToString(sh.Sum(nil)))
if err != nil {
partFile.CloseAndPurge()
return "", err.Trace()
}
if !ok {
partFile.CloseAndPurge()
return "", probe.NewError(SignatureDoesNotMatch{})
}
}
partFile.Close()
fi, e := os.Stat(partPath)
if e != nil {
return "", probe.NewError(e)
}
partMetadata := PartMetadata{}
partMetadata.ETag = md5sum
partMetadata.PartNumber = partID
partMetadata.Size = fi.Size()
partMetadata.LastModified = fi.ModTime()
multiPartfile, e := os.OpenFile(objectPath+"$multiparts", os.O_RDWR|os.O_APPEND, 0600)
if e != nil {
return "", probe.NewError(e)
}
defer multiPartfile.Close()
var deserializedMultipartSession MultipartSession
decoder := json.NewDecoder(multiPartfile)
if e = decoder.Decode(&deserializedMultipartSession); e != nil {
return "", probe.NewError(e)
}
deserializedMultipartSession.Parts = append(deserializedMultipartSession.Parts, &partMetadata)
deserializedMultipartSession.TotalParts++
fs.multiparts.ActiveSession[object] = &deserializedMultipartSession
sort.Sort(partNumber(deserializedMultipartSession.Parts))
encoder := json.NewEncoder(multiPartfile)
if e = encoder.Encode(&deserializedMultipartSession); e != nil {
return "", probe.NewError(e)
}
return partMetadata.ETag, nil
}
// CompleteMultipartUpload - complete a multipart upload and persist the data
2015-10-17 22:17:33 -04:00
func (fs Filesystem) CompleteMultipartUpload(bucket, object, uploadID string, data io.Reader, signature *Signature) (ObjectMetadata, *probe.Error) {
2016-02-04 23:40:58 -05:00
fs.rwLock.Lock()
defer fs.rwLock.Unlock()
2016-02-04 23:40:58 -05:00
// Check bucket name is valid.
if !IsValidBucketName(bucket) {
return ObjectMetadata{}, probe.NewError(BucketNameInvalid{Bucket: bucket})
}
2016-02-04 23:40:58 -05:00
// Verify object path is legal.
if !IsValidObjectName(object) {
return ObjectMetadata{}, probe.NewError(ObjectNameInvalid{Bucket: bucket, Object: object})
}
2016-02-04 23:40:58 -05:00
// Verify if valid upload for incoming object.
if !fs.isValidUploadID(object, uploadID) {
return ObjectMetadata{}, probe.NewError(InvalidUploadID{UploadID: uploadID})
}
bucket = fs.denormalizeBucket(bucket)
bucketPath := filepath.Join(fs.path, bucket)
if _, e := os.Stat(bucketPath); e != nil {
2016-02-04 23:40:58 -05:00
// Check bucket exists.
if os.IsNotExist(e) {
return ObjectMetadata{}, probe.NewError(BucketNotFound{Bucket: bucket})
}
return ObjectMetadata{}, probe.NewError(InternalError{})
}
objectPath := filepath.Join(bucketPath, object)
file, e := atomic.FileCreateWithPrefix(objectPath, "")
if e != nil {
return ObjectMetadata{}, probe.NewError(e)
}
h := md5.New()
mw := io.MultiWriter(file, h)
partBytes, e := ioutil.ReadAll(data)
if e != nil {
file.CloseAndPurge()
return ObjectMetadata{}, probe.NewError(e)
}
if signature != nil {
sh := sha256.New()
sh.Write(partBytes)
ok, err := signature.DoesSignatureMatch(hex.EncodeToString(sh.Sum(nil)))
if err != nil {
file.CloseAndPurge()
return ObjectMetadata{}, err.Trace()
}
if !ok {
file.CloseAndPurge()
return ObjectMetadata{}, probe.NewError(SignatureDoesNotMatch{})
}
}
parts := &CompleteMultipartUpload{}
if e := xml.Unmarshal(partBytes, parts); e != nil {
file.CloseAndPurge()
return ObjectMetadata{}, probe.NewError(MalformedXML{})
}
if !sort.IsSorted(completedParts(parts.Part)) {
file.CloseAndPurge()
return ObjectMetadata{}, probe.NewError(InvalidPartOrder{})
}
if err := fs.concatParts(parts, objectPath, mw); err != nil {
file.CloseAndPurge()
return ObjectMetadata{}, err.Trace()
}
delete(fs.multiparts.ActiveSession, object)
for _, part := range parts.Part {
if e = os.Remove(objectPath + fmt.Sprintf("$%d-$multiparts", part.PartNumber)); e != nil {
file.CloseAndPurge()
return ObjectMetadata{}, probe.NewError(e)
}
}
if e := os.Remove(objectPath + "$multiparts"); e != nil {
file.CloseAndPurge()
return ObjectMetadata{}, probe.NewError(e)
}
if e := saveMultipartsSession(fs.multiparts); e != nil {
file.CloseAndPurge()
return ObjectMetadata{}, e.Trace()
}
file.Close()
st, e := os.Stat(objectPath)
if e != nil {
return ObjectMetadata{}, probe.NewError(e)
}
contentType := "application/octet-stream"
if objectExt := filepath.Ext(objectPath); objectExt != "" {
contentType = contentdb.MustLookup(strings.ToLower(strings.TrimPrefix(objectExt, ".")))
}
newObject := ObjectMetadata{
Bucket: bucket,
Object: object,
Created: st.ModTime(),
Size: st.Size(),
ContentType: contentType,
Md5: hex.EncodeToString(h.Sum(nil)),
}
return newObject, nil
}
// ListObjectParts - list parts from incomplete multipart session for a given ObjectResourcesMetadata
2015-10-17 22:17:33 -04:00
func (fs Filesystem) ListObjectParts(bucket, object string, resources ObjectResourcesMetadata) (ObjectResourcesMetadata, *probe.Error) {
2016-02-04 23:40:58 -05:00
fs.rwLock.Lock()
defer fs.rwLock.Unlock()
2016-02-04 23:40:58 -05:00
// Check bucket name is valid.
if !IsValidBucketName(bucket) {
return ObjectResourcesMetadata{}, probe.NewError(BucketNameInvalid{Bucket: bucket})
}
2016-02-04 23:40:58 -05:00
// Verify object path legal.
if !IsValidObjectName(object) {
return ObjectResourcesMetadata{}, probe.NewError(ObjectNameInvalid{Bucket: bucket, Object: object})
}
2016-02-04 23:40:58 -05:00
// Verify if upload id is valid for incoming object.
if !fs.isValidUploadID(object, resources.UploadID) {
return ObjectResourcesMetadata{}, probe.NewError(InvalidUploadID{UploadID: resources.UploadID})
}
objectResourcesMetadata := resources
objectResourcesMetadata.Bucket = bucket
objectResourcesMetadata.Object = object
var startPartNumber int
switch {
case objectResourcesMetadata.PartNumberMarker == 0:
startPartNumber = 1
default:
startPartNumber = objectResourcesMetadata.PartNumberMarker
}
bucket = fs.denormalizeBucket(bucket)
bucketPath := filepath.Join(fs.path, bucket)
if _, e := os.Stat(bucketPath); e != nil {
2016-02-04 23:40:58 -05:00
// Check bucket exists.
if os.IsNotExist(e) {
return ObjectResourcesMetadata{}, probe.NewError(BucketNotFound{Bucket: bucket})
}
return ObjectResourcesMetadata{}, probe.NewError(e)
}
objectPath := filepath.Join(bucketPath, object)
multiPartfile, e := os.OpenFile(objectPath+"$multiparts", os.O_RDONLY, 0600)
if e != nil {
return ObjectResourcesMetadata{}, probe.NewError(e)
}
defer multiPartfile.Close()
var deserializedMultipartSession MultipartSession
decoder := json.NewDecoder(multiPartfile)
if e = decoder.Decode(&deserializedMultipartSession); e != nil {
return ObjectResourcesMetadata{}, probe.NewError(e)
}
var parts []*PartMetadata
for i := startPartNumber; i <= deserializedMultipartSession.TotalParts; i++ {
if len(parts) > objectResourcesMetadata.MaxParts {
sort.Sort(partNumber(parts))
objectResourcesMetadata.IsTruncated = true
objectResourcesMetadata.Part = parts
objectResourcesMetadata.NextPartNumberMarker = i
return objectResourcesMetadata, nil
}
parts = append(parts, deserializedMultipartSession.Parts[i-1])
}
sort.Sort(partNumber(parts))
objectResourcesMetadata.Part = parts
return objectResourcesMetadata, nil
}
// AbortMultipartUpload - abort an incomplete multipart session
2015-10-17 22:17:33 -04:00
func (fs Filesystem) AbortMultipartUpload(bucket, object, uploadID string) *probe.Error {
2016-02-04 23:40:58 -05:00
fs.rwLock.Lock()
defer fs.rwLock.Unlock()
2016-02-04 23:40:58 -05:00
// Check bucket name valid.
if !IsValidBucketName(bucket) {
return probe.NewError(BucketNameInvalid{Bucket: bucket})
}
2016-02-04 23:40:58 -05:00
// Verify object path legal.
if !IsValidObjectName(object) {
return probe.NewError(ObjectNameInvalid{Bucket: bucket, Object: object})
}
if !fs.isValidUploadID(object, uploadID) {
return probe.NewError(InvalidUploadID{UploadID: uploadID})
}
bucket = fs.denormalizeBucket(bucket)
bucketPath := filepath.Join(fs.path, bucket)
if _, e := os.Stat(bucketPath); e != nil {
2016-02-04 23:40:58 -05:00
// Check bucket exists.
if os.IsNotExist(e) {
return probe.NewError(BucketNotFound{Bucket: bucket})
}
return probe.NewError(e)
}
objectPath := filepath.Join(bucketPath, object)
for _, part := range fs.multiparts.ActiveSession[object].Parts {
e := os.RemoveAll(objectPath + fmt.Sprintf("$%d-$multiparts", part.PartNumber))
if e != nil {
return probe.NewError(e)
}
}
delete(fs.multiparts.ActiveSession, object)
if e := os.RemoveAll(objectPath + "$multiparts"); e != nil {
return probe.NewError(e)
}
return nil
}