2016-05-20 23:48:47 -04:00
|
|
|
/*
|
|
|
|
* Minio Cloud Storage, (C) 2016 Minio, Inc.
|
|
|
|
*
|
|
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
* you may not use this file except in compliance with the License.
|
|
|
|
* You may obtain a copy of the License at
|
|
|
|
*
|
|
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
*
|
|
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
* See the License for the specific language governing permissions and
|
|
|
|
* limitations under the License.
|
|
|
|
*/
|
|
|
|
|
|
|
|
package main
|
|
|
|
|
|
|
|
import (
|
|
|
|
"encoding/json"
|
|
|
|
"path"
|
|
|
|
"sort"
|
|
|
|
"strings"
|
|
|
|
"sync"
|
|
|
|
"time"
|
|
|
|
)
|
|
|
|
|
2016-06-01 19:43:31 -04:00
|
|
|
// A uploadInfo represents the s3 compatible spec.
|
2016-05-20 23:48:47 -04:00
|
|
|
type uploadInfo struct {
|
2016-06-01 19:43:31 -04:00
|
|
|
UploadID string `json:"uploadId"` // UploadID for the active multipart upload.
|
|
|
|
Deleted bool `json:"deleted"` // Currently unused, for future use.
|
|
|
|
Initiated time.Time `json:"initiated"` // Indicates when the uploadID was initiated.
|
2016-05-20 23:48:47 -04:00
|
|
|
}
|
|
|
|
|
2016-06-01 19:43:31 -04:00
|
|
|
// A uploadsV1 represents `uploads.json` metadata header.
|
2016-05-20 23:48:47 -04:00
|
|
|
type uploadsV1 struct {
|
2016-06-01 19:43:31 -04:00
|
|
|
Version string `json:"version"` // Version of the current `uploads.json`
|
|
|
|
Format string `json:"format"` // Format of the current `uploads.json`
|
|
|
|
Uploads []uploadInfo `json:"uploadIds"` // Captures all the upload ids for a given object.
|
2016-05-20 23:48:47 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// byInitiatedTime is a collection satisfying sort.Interface.
|
|
|
|
type byInitiatedTime []uploadInfo
|
|
|
|
|
|
|
|
func (t byInitiatedTime) Len() int { return len(t) }
|
|
|
|
func (t byInitiatedTime) Swap(i, j int) { t[i], t[j] = t[j], t[i] }
|
|
|
|
func (t byInitiatedTime) Less(i, j int) bool {
|
2016-05-25 12:22:39 -04:00
|
|
|
return t[i].Initiated.Before(t[j].Initiated)
|
2016-05-20 23:48:47 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// AddUploadID - adds a new upload id in order of its initiated time.
|
|
|
|
func (u *uploadsV1) AddUploadID(uploadID string, initiated time.Time) {
|
|
|
|
u.Uploads = append(u.Uploads, uploadInfo{
|
|
|
|
UploadID: uploadID,
|
|
|
|
Initiated: initiated,
|
|
|
|
})
|
|
|
|
sort.Sort(byInitiatedTime(u.Uploads))
|
|
|
|
}
|
|
|
|
|
2016-05-26 06:15:01 -04:00
|
|
|
// Index - returns the index of matching the upload id.
|
|
|
|
func (u uploadsV1) Index(uploadID string) int {
|
2016-05-20 23:48:47 -04:00
|
|
|
for i, u := range u.Uploads {
|
|
|
|
if u.UploadID == uploadID {
|
|
|
|
return i
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return -1
|
|
|
|
}
|
|
|
|
|
2016-05-26 17:43:17 -04:00
|
|
|
// readUploadsJSON - get all the saved uploads JSON.
|
2016-06-01 19:43:31 -04:00
|
|
|
func readUploadsJSON(bucket, object string, disk StorageAPI) (uploadIDs uploadsV1, err error) {
|
2016-05-20 23:48:47 -04:00
|
|
|
uploadJSONPath := path.Join(mpartMetaPrefix, bucket, object, uploadsJSONFile)
|
2016-06-01 19:43:31 -04:00
|
|
|
// Read all of 'uploads.json'
|
|
|
|
buffer, rErr := readAll(disk, minioMetaBucket, uploadJSONPath)
|
|
|
|
if rErr != nil {
|
|
|
|
return uploadsV1{}, rErr
|
2016-05-20 23:48:47 -04:00
|
|
|
}
|
2016-06-01 19:43:31 -04:00
|
|
|
rErr = json.Unmarshal(buffer, &uploadIDs)
|
|
|
|
if rErr != nil {
|
|
|
|
return uploadsV1{}, rErr
|
2016-05-20 23:48:47 -04:00
|
|
|
}
|
2016-06-01 19:43:31 -04:00
|
|
|
return uploadIDs, nil
|
2016-05-20 23:48:47 -04:00
|
|
|
}
|
|
|
|
|
2016-06-01 19:43:31 -04:00
|
|
|
// updateUploadsJSON - update `uploads.json` with new uploadsJSON for all disks.
|
2016-05-26 17:43:17 -04:00
|
|
|
func updateUploadsJSON(bucket, object string, uploadsJSON uploadsV1, storageDisks ...StorageAPI) error {
|
2016-05-20 23:48:47 -04:00
|
|
|
uploadsPath := path.Join(mpartMetaPrefix, bucket, object, uploadsJSONFile)
|
2016-05-29 03:42:09 -04:00
|
|
|
uniqueID := getUUID()
|
|
|
|
tmpUploadsPath := path.Join(tmpMetaPrefix, uniqueID)
|
2016-05-20 23:48:47 -04:00
|
|
|
var errs = make([]error, len(storageDisks))
|
|
|
|
var wg = &sync.WaitGroup{}
|
|
|
|
|
2016-05-26 17:43:17 -04:00
|
|
|
// Update `uploads.json` for all the disks.
|
2016-05-20 23:48:47 -04:00
|
|
|
for index, disk := range storageDisks {
|
2016-06-02 19:34:15 -04:00
|
|
|
if disk == nil {
|
|
|
|
errs[index] = errDiskNotFound
|
|
|
|
continue
|
|
|
|
}
|
2016-05-20 23:48:47 -04:00
|
|
|
wg.Add(1)
|
2016-05-26 17:43:17 -04:00
|
|
|
// Update `uploads.json` in routine.
|
2016-05-20 23:48:47 -04:00
|
|
|
go func(index int, disk StorageAPI) {
|
|
|
|
defer wg.Done()
|
2016-05-28 18:13:15 -04:00
|
|
|
uploadsBytes, wErr := json.Marshal(uploadsJSON)
|
2016-05-20 23:48:47 -04:00
|
|
|
if wErr != nil {
|
|
|
|
errs[index] = wErr
|
|
|
|
return
|
|
|
|
}
|
2016-05-28 18:13:15 -04:00
|
|
|
n, wErr := disk.AppendFile(minioMetaBucket, tmpUploadsPath, uploadsBytes)
|
2016-05-20 23:48:47 -04:00
|
|
|
if wErr != nil {
|
|
|
|
errs[index] = wErr
|
|
|
|
return
|
|
|
|
}
|
2016-05-28 18:13:15 -04:00
|
|
|
if n != int64(len(uploadsBytes)) {
|
|
|
|
errs[index] = errUnexpected
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if wErr = disk.RenameFile(minioMetaBucket, tmpUploadsPath, minioMetaBucket, uploadsPath); wErr != nil {
|
2016-05-20 23:48:47 -04:00
|
|
|
errs[index] = wErr
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}(index, disk)
|
|
|
|
}
|
|
|
|
|
2016-05-26 17:43:17 -04:00
|
|
|
// Wait for all the routines to finish updating `uploads.json`
|
2016-05-20 23:48:47 -04:00
|
|
|
wg.Wait()
|
|
|
|
|
2016-06-02 19:34:15 -04:00
|
|
|
// For only single disk return first error.
|
|
|
|
if len(storageDisks) == 1 {
|
|
|
|
return errs[0]
|
|
|
|
} // else count all the errors for quorum validation.
|
|
|
|
var errCount = 0
|
2016-05-26 17:43:17 -04:00
|
|
|
// Return for first error.
|
2016-05-20 23:48:47 -04:00
|
|
|
for _, err := range errs {
|
|
|
|
if err != nil {
|
2016-06-02 19:34:15 -04:00
|
|
|
errCount++
|
2016-05-20 23:48:47 -04:00
|
|
|
}
|
|
|
|
}
|
2016-06-02 19:34:15 -04:00
|
|
|
// Count all the errors and validate if we have write quorum.
|
|
|
|
if errCount > len(storageDisks)-len(storageDisks)/2+3 {
|
|
|
|
// Validate if we have read quorum return success.
|
|
|
|
if errCount > len(storageDisks)-len(storageDisks)/2+1 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
// Rename `uploads.json` left over back to tmp location.
|
|
|
|
for index, disk := range storageDisks {
|
|
|
|
if disk == nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
// Undo rename `uploads.json` in parallel.
|
|
|
|
wg.Add(1)
|
|
|
|
go func(index int, disk StorageAPI) {
|
|
|
|
defer wg.Done()
|
|
|
|
if errs[index] != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
_ = disk.RenameFile(minioMetaBucket, uploadsPath, minioMetaBucket, tmpUploadsPath)
|
|
|
|
}(index, disk)
|
|
|
|
}
|
|
|
|
wg.Wait()
|
|
|
|
return errXLWriteQuorum
|
|
|
|
}
|
2016-05-20 23:48:47 -04:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2016-05-26 17:43:17 -04:00
|
|
|
// newUploadsV1 - initialize new uploads v1.
|
|
|
|
func newUploadsV1(format string) uploadsV1 {
|
|
|
|
uploadIDs := uploadsV1{}
|
|
|
|
uploadIDs.Version = "1"
|
|
|
|
uploadIDs.Format = format
|
|
|
|
return uploadIDs
|
|
|
|
}
|
|
|
|
|
2016-05-20 23:48:47 -04:00
|
|
|
// writeUploadJSON - create `uploads.json` or update it with new uploadID.
|
2016-05-26 17:43:17 -04:00
|
|
|
func writeUploadJSON(bucket, object, uploadID string, initiated time.Time, storageDisks ...StorageAPI) (err error) {
|
2016-05-20 23:48:47 -04:00
|
|
|
uploadsPath := path.Join(mpartMetaPrefix, bucket, object, uploadsJSONFile)
|
2016-05-29 03:42:09 -04:00
|
|
|
uniqueID := getUUID()
|
|
|
|
tmpUploadsPath := path.Join(tmpMetaPrefix, uniqueID)
|
2016-05-20 23:48:47 -04:00
|
|
|
|
|
|
|
var errs = make([]error, len(storageDisks))
|
|
|
|
var wg = &sync.WaitGroup{}
|
|
|
|
|
2016-05-26 17:43:17 -04:00
|
|
|
var uploadsJSON uploadsV1
|
2016-06-01 19:43:31 -04:00
|
|
|
for _, disk := range storageDisks {
|
2016-06-02 19:34:15 -04:00
|
|
|
if disk == nil {
|
|
|
|
continue
|
|
|
|
}
|
2016-06-01 19:43:31 -04:00
|
|
|
uploadsJSON, err = readUploadsJSON(bucket, object, disk)
|
|
|
|
break
|
|
|
|
}
|
2016-05-26 17:43:17 -04:00
|
|
|
if err != nil {
|
|
|
|
// For any other errors.
|
|
|
|
if err != errFileNotFound {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if len(storageDisks) == 1 {
|
|
|
|
// Set uploads format to `fs` for single disk.
|
|
|
|
uploadsJSON = newUploadsV1("fs")
|
|
|
|
} else {
|
|
|
|
// Set uploads format to `xl` otherwise.
|
|
|
|
uploadsJSON = newUploadsV1("xl")
|
|
|
|
}
|
2016-05-20 23:48:47 -04:00
|
|
|
}
|
2016-05-26 17:43:17 -04:00
|
|
|
// Add a new upload id.
|
|
|
|
uploadsJSON.AddUploadID(uploadID, initiated)
|
2016-05-20 23:48:47 -04:00
|
|
|
|
2016-05-26 17:43:17 -04:00
|
|
|
// Update `uploads.json` on all disks.
|
2016-05-20 23:48:47 -04:00
|
|
|
for index, disk := range storageDisks {
|
2016-06-02 19:34:15 -04:00
|
|
|
if disk == nil {
|
|
|
|
errs[index] = errDiskNotFound
|
|
|
|
continue
|
|
|
|
}
|
2016-05-20 23:48:47 -04:00
|
|
|
wg.Add(1)
|
2016-05-26 17:43:17 -04:00
|
|
|
// Update `uploads.json` in a routine.
|
2016-05-20 23:48:47 -04:00
|
|
|
go func(index int, disk StorageAPI) {
|
|
|
|
defer wg.Done()
|
2016-05-28 18:13:15 -04:00
|
|
|
uploadsJSONBytes, wErr := json.Marshal(&uploadsJSON)
|
2016-05-20 23:48:47 -04:00
|
|
|
if wErr != nil {
|
|
|
|
errs[index] = wErr
|
|
|
|
return
|
|
|
|
}
|
2016-06-01 19:43:31 -04:00
|
|
|
// Write `uploads.json` to disk.
|
2016-05-28 18:13:15 -04:00
|
|
|
n, wErr := disk.AppendFile(minioMetaBucket, tmpUploadsPath, uploadsJSONBytes)
|
2016-05-20 23:48:47 -04:00
|
|
|
if wErr != nil {
|
|
|
|
errs[index] = wErr
|
|
|
|
return
|
|
|
|
}
|
2016-05-28 18:13:15 -04:00
|
|
|
if n != int64(len(uploadsJSONBytes)) {
|
|
|
|
errs[index] = errUnexpected
|
2016-05-20 23:48:47 -04:00
|
|
|
return
|
|
|
|
}
|
2016-05-24 16:35:43 -04:00
|
|
|
wErr = disk.RenameFile(minioMetaBucket, tmpUploadsPath, minioMetaBucket, uploadsPath)
|
2016-05-20 23:48:47 -04:00
|
|
|
if wErr != nil {
|
|
|
|
if dErr := disk.DeleteFile(minioMetaBucket, tmpUploadsPath); dErr != nil {
|
|
|
|
errs[index] = dErr
|
|
|
|
return
|
|
|
|
}
|
|
|
|
errs[index] = wErr
|
|
|
|
return
|
|
|
|
}
|
2016-05-24 16:35:43 -04:00
|
|
|
errs[index] = nil
|
2016-05-20 23:48:47 -04:00
|
|
|
}(index, disk)
|
|
|
|
}
|
|
|
|
|
2016-05-26 17:43:17 -04:00
|
|
|
// Wait for all the writes to finish.
|
2016-05-20 23:48:47 -04:00
|
|
|
wg.Wait()
|
|
|
|
|
2016-06-02 19:34:15 -04:00
|
|
|
// For only single disk return first error.
|
|
|
|
if len(storageDisks) == 1 {
|
|
|
|
return errs[0]
|
|
|
|
} // else count all the errors for quorum validation.
|
|
|
|
var errCount = 0
|
|
|
|
// Return for first error.
|
|
|
|
for _, err := range errs {
|
2016-05-20 23:48:47 -04:00
|
|
|
if err != nil {
|
2016-06-02 19:34:15 -04:00
|
|
|
errCount++
|
2016-05-20 23:48:47 -04:00
|
|
|
}
|
|
|
|
}
|
2016-06-02 19:34:15 -04:00
|
|
|
// Count all the errors and validate if we have write quorum.
|
|
|
|
if errCount > len(storageDisks)-len(storageDisks)/2+3 {
|
|
|
|
// Validate if we have read quorum return success.
|
|
|
|
if errCount > len(storageDisks)-len(storageDisks)/2+1 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
// Rename `uploads.json` left over back to tmp location.
|
|
|
|
for index, disk := range storageDisks {
|
|
|
|
if disk == nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
// Undo rename `uploads.json` in parallel.
|
|
|
|
wg.Add(1)
|
|
|
|
go func(index int, disk StorageAPI) {
|
|
|
|
defer wg.Done()
|
|
|
|
if errs[index] != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
_ = disk.RenameFile(minioMetaBucket, uploadsPath, minioMetaBucket, tmpUploadsPath)
|
|
|
|
}(index, disk)
|
|
|
|
}
|
|
|
|
wg.Wait()
|
|
|
|
return errXLWriteQuorum
|
|
|
|
}
|
2016-05-20 23:48:47 -04:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Wrapper which removes all the uploaded parts.
|
|
|
|
func cleanupUploadedParts(bucket, object, uploadID string, storageDisks ...StorageAPI) error {
|
|
|
|
var errs = make([]error, len(storageDisks))
|
|
|
|
var wg = &sync.WaitGroup{}
|
2016-05-26 17:43:17 -04:00
|
|
|
|
|
|
|
// Construct uploadIDPath.
|
|
|
|
uploadIDPath := path.Join(mpartMetaPrefix, bucket, object, uploadID)
|
|
|
|
|
|
|
|
// Cleanup uploadID for all disks.
|
2016-05-20 23:48:47 -04:00
|
|
|
for index, disk := range storageDisks {
|
2016-06-02 19:34:15 -04:00
|
|
|
if disk == nil {
|
|
|
|
errs[index] = errDiskNotFound
|
|
|
|
continue
|
|
|
|
}
|
2016-05-20 23:48:47 -04:00
|
|
|
wg.Add(1)
|
2016-05-26 17:43:17 -04:00
|
|
|
// Cleanup each uploadID in a routine.
|
2016-05-20 23:48:47 -04:00
|
|
|
go func(index int, disk StorageAPI) {
|
|
|
|
defer wg.Done()
|
2016-05-26 17:43:17 -04:00
|
|
|
err := cleanupDir(disk, minioMetaBucket, uploadIDPath)
|
2016-05-20 23:48:47 -04:00
|
|
|
if err != nil {
|
|
|
|
errs[index] = err
|
|
|
|
return
|
|
|
|
}
|
|
|
|
errs[index] = nil
|
|
|
|
}(index, disk)
|
|
|
|
}
|
2016-05-26 17:43:17 -04:00
|
|
|
|
|
|
|
// Wait for all the cleanups to finish.
|
2016-05-20 23:48:47 -04:00
|
|
|
wg.Wait()
|
|
|
|
|
2016-05-26 17:43:17 -04:00
|
|
|
// Return first error.
|
2016-05-20 23:48:47 -04:00
|
|
|
for _, err := range errs {
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2016-05-26 17:43:17 -04:00
|
|
|
// listMultipartUploadIDs - list all the upload ids from a marker up to 'count'.
|
|
|
|
func listMultipartUploadIDs(bucketName, objectName, uploadIDMarker string, count int, disk StorageAPI) ([]uploadMetadata, bool, error) {
|
2016-05-25 12:22:39 -04:00
|
|
|
var uploads []uploadMetadata
|
2016-05-26 17:43:17 -04:00
|
|
|
// Read `uploads.json`.
|
|
|
|
uploadsJSON, err := readUploadsJSON(bucketName, objectName, disk)
|
2016-05-25 12:22:39 -04:00
|
|
|
if err != nil {
|
|
|
|
return nil, false, err
|
2016-05-20 23:48:47 -04:00
|
|
|
}
|
2016-05-25 12:22:39 -04:00
|
|
|
index := 0
|
|
|
|
if uploadIDMarker != "" {
|
2016-05-26 17:43:17 -04:00
|
|
|
for ; index < len(uploadsJSON.Uploads); index++ {
|
|
|
|
if uploadsJSON.Uploads[index].UploadID == uploadIDMarker {
|
2016-05-25 12:22:39 -04:00
|
|
|
// Skip the uploadID as it would already be listed in previous listing.
|
|
|
|
index++
|
2016-05-20 23:48:47 -04:00
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2016-05-26 17:43:17 -04:00
|
|
|
for index < len(uploadsJSON.Uploads) {
|
2016-05-25 12:22:39 -04:00
|
|
|
uploads = append(uploads, uploadMetadata{
|
|
|
|
Object: objectName,
|
2016-05-26 17:43:17 -04:00
|
|
|
UploadID: uploadsJSON.Uploads[index].UploadID,
|
|
|
|
Initiated: uploadsJSON.Uploads[index].Initiated,
|
2016-05-25 12:22:39 -04:00
|
|
|
})
|
|
|
|
count--
|
|
|
|
index++
|
|
|
|
if count == 0 {
|
|
|
|
break
|
|
|
|
}
|
2016-05-20 23:48:47 -04:00
|
|
|
}
|
2016-05-26 17:43:17 -04:00
|
|
|
end := (index == len(uploadsJSON.Uploads))
|
|
|
|
return uploads, end, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Returns if the prefix is a multipart upload.
|
|
|
|
func (xl xlObjects) isMultipartUpload(bucket, prefix string) bool {
|
2016-06-01 19:43:31 -04:00
|
|
|
for _, disk := range xl.getLoadBalancedQuorumDisks() {
|
2016-06-02 19:34:15 -04:00
|
|
|
if disk == nil {
|
|
|
|
continue
|
|
|
|
}
|
2016-06-01 19:43:31 -04:00
|
|
|
_, err := disk.StatFile(bucket, pathJoin(prefix, uploadsJSONFile))
|
2016-05-20 23:48:47 -04:00
|
|
|
if err != nil {
|
2016-06-03 01:49:27 -04:00
|
|
|
if err == errDiskNotFound {
|
|
|
|
continue
|
|
|
|
}
|
2016-06-01 19:43:31 -04:00
|
|
|
return false
|
2016-05-20 23:48:47 -04:00
|
|
|
}
|
2016-06-01 19:43:31 -04:00
|
|
|
break
|
2016-05-20 23:48:47 -04:00
|
|
|
}
|
2016-06-01 19:43:31 -04:00
|
|
|
return true
|
|
|
|
}
|
2016-05-20 23:48:47 -04:00
|
|
|
|
2016-06-01 19:43:31 -04:00
|
|
|
// listUploadsInfo - list all uploads info.
|
|
|
|
func (xl xlObjects) listUploadsInfo(prefixPath string) (uploadsInfo []uploadInfo, err error) {
|
|
|
|
for _, disk := range xl.getLoadBalancedQuorumDisks() {
|
2016-06-02 19:34:15 -04:00
|
|
|
if disk == nil {
|
|
|
|
continue
|
|
|
|
}
|
2016-06-01 19:43:31 -04:00
|
|
|
splitPrefixes := strings.SplitN(prefixPath, "/", 3)
|
2016-06-03 01:49:27 -04:00
|
|
|
var uploadsJSON uploadsV1
|
|
|
|
uploadsJSON, err = readUploadsJSON(splitPrefixes[1], splitPrefixes[2], disk)
|
2016-05-25 12:22:39 -04:00
|
|
|
if err != nil {
|
2016-06-03 01:49:27 -04:00
|
|
|
if err == errDiskNotFound {
|
|
|
|
continue
|
|
|
|
}
|
2016-06-01 19:43:31 -04:00
|
|
|
if err == errFileNotFound {
|
|
|
|
return []uploadInfo{}, nil
|
2016-05-25 12:22:39 -04:00
|
|
|
}
|
2016-06-01 19:43:31 -04:00
|
|
|
return nil, err
|
2016-05-25 12:22:39 -04:00
|
|
|
}
|
2016-06-01 19:43:31 -04:00
|
|
|
uploadsInfo = uploadsJSON.Uploads
|
|
|
|
break
|
2016-05-25 12:22:39 -04:00
|
|
|
}
|
2016-06-01 19:43:31 -04:00
|
|
|
return uploadsInfo, nil
|
2016-05-20 23:48:47 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// isUploadIDExists - verify if a given uploadID exists and is valid.
|
|
|
|
func (xl xlObjects) isUploadIDExists(bucket, object, uploadID string) bool {
|
|
|
|
uploadIDPath := path.Join(mpartMetaPrefix, bucket, object, uploadID)
|
|
|
|
return xl.isObject(minioMetaBucket, uploadIDPath)
|
|
|
|
}
|
2016-05-28 16:23:08 -04:00
|
|
|
|
|
|
|
// Removes part given by partName belonging to a mulitpart upload from minioMetaBucket
|
|
|
|
func (xl xlObjects) removeObjectPart(bucket, object, uploadID, partName string) {
|
|
|
|
curpartPath := path.Join(mpartMetaPrefix, bucket, object, uploadID, partName)
|
|
|
|
wg := sync.WaitGroup{}
|
|
|
|
for i, disk := range xl.storageDisks {
|
2016-06-02 19:34:15 -04:00
|
|
|
if disk == nil {
|
|
|
|
continue
|
|
|
|
}
|
2016-05-28 16:23:08 -04:00
|
|
|
wg.Add(1)
|
|
|
|
go func(index int, disk StorageAPI) {
|
|
|
|
defer wg.Done()
|
|
|
|
// Ignoring failure to remove parts that weren't present in CompleteMultipartUpload
|
|
|
|
// requests. xl.json is the authoritative source of truth on which parts constitute
|
|
|
|
// the object. The presence of parts that don't belong in the object doesn't affect correctness.
|
|
|
|
_ = disk.DeleteFile(minioMetaBucket, curpartPath)
|
|
|
|
}(i, disk)
|
|
|
|
}
|
|
|
|
wg.Wait()
|
|
|
|
}
|