mirror of
https://github.com/minio/minio.git
synced 2025-11-10 05:59:43 -05:00
objAPI: Implement CopyObject API. (#3487)
This is written so that to simplify our handler code and provide a way to only update metadata instead of the data when source and destination in CopyObject request are same. Fixes #3316
This commit is contained in:
@@ -42,13 +42,105 @@ var objectOpIgnoredErrs = []error{
|
||||
|
||||
/// Object Operations
|
||||
|
||||
// CopyObject - copy object source object to destination object.
|
||||
// if source object and destination object are same we only
|
||||
// update metadata.
|
||||
func (xl xlObjects) CopyObject(srcBucket, srcObject, dstBucket, dstObject string, metadata map[string]string) (ObjectInfo, error) {
|
||||
// Read metadata associated with the object from all disks.
|
||||
metaArr, errs := readAllXLMetadata(xl.storageDisks, srcBucket, srcObject)
|
||||
// Do we have read quorum?
|
||||
if !isDiskQuorum(errs, xl.readQuorum) {
|
||||
return ObjectInfo{}, traceError(InsufficientReadQuorum{}, errs...)
|
||||
}
|
||||
|
||||
if reducedErr := reduceReadQuorumErrs(errs, objectOpIgnoredErrs, xl.readQuorum); reducedErr != nil {
|
||||
return ObjectInfo{}, toObjectErr(reducedErr, srcBucket, srcObject)
|
||||
}
|
||||
|
||||
// List all online disks.
|
||||
onlineDisks, modTime := listOnlineDisks(xl.storageDisks, metaArr, errs)
|
||||
|
||||
// Pick latest valid metadata.
|
||||
xlMeta, err := pickValidXLMeta(metaArr, modTime)
|
||||
if err != nil {
|
||||
return ObjectInfo{}, toObjectErr(err, srcBucket, srcObject)
|
||||
}
|
||||
|
||||
// Reorder online disks based on erasure distribution order.
|
||||
onlineDisks = getOrderedDisks(xlMeta.Erasure.Distribution, onlineDisks)
|
||||
|
||||
// Length of the file to read.
|
||||
length := xlMeta.Stat.Size
|
||||
|
||||
// Check if this request is only metadata update.
|
||||
cpMetadataOnly := strings.EqualFold(pathJoin(srcBucket, srcObject), pathJoin(dstBucket, dstObject))
|
||||
if cpMetadataOnly {
|
||||
xlMeta.Meta = metadata
|
||||
partsMetadata := make([]xlMetaV1, len(xl.storageDisks))
|
||||
// Update `xl.json` content on each disks.
|
||||
for index := range partsMetadata {
|
||||
partsMetadata[index] = xlMeta
|
||||
}
|
||||
|
||||
tempObj := mustGetUUID()
|
||||
|
||||
// Write unique `xl.json` for each disk.
|
||||
if err = writeUniqueXLMetadata(onlineDisks, minioMetaTmpBucket, tempObj, partsMetadata, xl.writeQuorum); err != nil {
|
||||
return ObjectInfo{}, toObjectErr(err, srcBucket, srcObject)
|
||||
}
|
||||
// Rename atomically `xl.json` from tmp location to destination for each disk.
|
||||
if err = renameXLMetadata(onlineDisks, minioMetaTmpBucket, tempObj, srcBucket, srcObject, xl.writeQuorum); err != nil {
|
||||
return ObjectInfo{}, toObjectErr(err, srcBucket, srcObject)
|
||||
}
|
||||
|
||||
objInfo := ObjectInfo{
|
||||
IsDir: false,
|
||||
Bucket: srcBucket,
|
||||
Name: srcObject,
|
||||
Size: xlMeta.Stat.Size,
|
||||
ModTime: xlMeta.Stat.ModTime,
|
||||
MD5Sum: xlMeta.Meta["md5Sum"],
|
||||
ContentType: xlMeta.Meta["content-type"],
|
||||
ContentEncoding: xlMeta.Meta["content-encoding"],
|
||||
}
|
||||
// md5Sum has already been extracted into objInfo.MD5Sum. We
|
||||
// need to remove it from xlMetaMap to avoid it from appearing as
|
||||
// part of response headers. e.g, X-Minio-* or X-Amz-*.
|
||||
delete(xlMeta.Meta, "md5Sum")
|
||||
objInfo.UserDefined = xlMeta.Meta
|
||||
return objInfo, nil
|
||||
}
|
||||
|
||||
// Initialize pipe.
|
||||
pipeReader, pipeWriter := io.Pipe()
|
||||
|
||||
go func() {
|
||||
startOffset := int64(0) // Read the whole file.
|
||||
if gerr := xl.GetObject(srcBucket, srcObject, startOffset, length, pipeWriter); gerr != nil {
|
||||
errorIf(gerr, "Unable to read %s of the object `%s/%s`.", srcBucket, srcObject)
|
||||
pipeWriter.CloseWithError(toObjectErr(gerr, srcBucket, srcObject))
|
||||
return
|
||||
}
|
||||
pipeWriter.Close() // Close writer explicitly signalling we wrote all data.
|
||||
}()
|
||||
|
||||
objInfo, err := xl.PutObject(dstBucket, dstObject, length, pipeReader, metadata, "")
|
||||
if err != nil {
|
||||
return ObjectInfo{}, toObjectErr(err, dstBucket, dstObject)
|
||||
}
|
||||
|
||||
// Explicitly close the reader.
|
||||
pipeReader.Close()
|
||||
|
||||
return objInfo, nil
|
||||
}
|
||||
|
||||
// GetObject - reads an object erasured coded across multiple
|
||||
// disks. Supports additional parameters like offset and length
|
||||
// which is synonymous with HTTP Range requests.
|
||||
// which are synonymous with HTTP Range requests.
|
||||
//
|
||||
// startOffset indicates the location at which the client requested
|
||||
// object to be read at. length indicates the total length of the
|
||||
// object requested by client.
|
||||
// startOffset indicates the starting read location of the object.
|
||||
// length indicates the total length of the object.
|
||||
func (xl xlObjects) GetObject(bucket, object string, startOffset int64, length int64, writer io.Writer) error {
|
||||
if err := checkGetObjArgs(bucket, object); err != nil {
|
||||
return err
|
||||
@@ -255,13 +347,13 @@ func (xl xlObjects) getObjectInfo(bucket, object string) (objInfo ObjectInfo, er
|
||||
return objInfo, nil
|
||||
}
|
||||
|
||||
func undoRename(disks []StorageAPI, srcBucket, srcEntry, dstBucket, dstEntry string, isPart bool, errs []error) {
|
||||
func undoRename(disks []StorageAPI, srcBucket, srcEntry, dstBucket, dstEntry string, isDir bool, errs []error) {
|
||||
var wg = &sync.WaitGroup{}
|
||||
// Undo rename object on disks where RenameFile succeeded.
|
||||
|
||||
// If srcEntry/dstEntry are objects then add a trailing slash to copy
|
||||
// over all the parts inside the object directory
|
||||
if !isPart {
|
||||
if isDir {
|
||||
srcEntry = retainSlash(srcEntry)
|
||||
dstEntry = retainSlash(dstEntry)
|
||||
}
|
||||
@@ -284,14 +376,14 @@ func undoRename(disks []StorageAPI, srcBucket, srcEntry, dstBucket, dstEntry str
|
||||
|
||||
// rename - common function that renamePart and renameObject use to rename
|
||||
// the respective underlying storage layer representations.
|
||||
func rename(disks []StorageAPI, srcBucket, srcEntry, dstBucket, dstEntry string, isPart bool, quorum int) error {
|
||||
func rename(disks []StorageAPI, srcBucket, srcEntry, dstBucket, dstEntry string, isDir bool, quorum int) error {
|
||||
// Initialize sync waitgroup.
|
||||
var wg = &sync.WaitGroup{}
|
||||
|
||||
// Initialize list of errors.
|
||||
var errs = make([]error, len(disks))
|
||||
|
||||
if !isPart {
|
||||
if isDir {
|
||||
dstEntry = retainSlash(dstEntry)
|
||||
srcEntry = retainSlash(srcEntry)
|
||||
}
|
||||
@@ -319,7 +411,7 @@ func rename(disks []StorageAPI, srcBucket, srcEntry, dstBucket, dstEntry string,
|
||||
// otherwise return failure. Cleanup successful renames.
|
||||
if !isDiskQuorum(errs, quorum) {
|
||||
// Undo all the partial rename operations.
|
||||
undoRename(disks, srcBucket, srcEntry, dstBucket, dstEntry, isPart, errs)
|
||||
undoRename(disks, srcBucket, srcEntry, dstBucket, dstEntry, isDir, errs)
|
||||
return traceError(errXLWriteQuorum)
|
||||
}
|
||||
return reduceWriteQuorumErrs(errs, objectOpIgnoredErrs, quorum)
|
||||
@@ -330,8 +422,8 @@ func rename(disks []StorageAPI, srcBucket, srcEntry, dstBucket, dstEntry string,
|
||||
// not have a readQuorum partially renamed files are renamed back to
|
||||
// its proper location.
|
||||
func renamePart(disks []StorageAPI, srcBucket, srcPart, dstBucket, dstPart string, quorum int) error {
|
||||
isPart := true
|
||||
return rename(disks, srcBucket, srcPart, dstBucket, dstPart, isPart, quorum)
|
||||
isDir := false
|
||||
return rename(disks, srcBucket, srcPart, dstBucket, dstPart, isDir, quorum)
|
||||
}
|
||||
|
||||
// renameObject - renames all source objects to destination object
|
||||
@@ -339,8 +431,8 @@ func renamePart(disks []StorageAPI, srcBucket, srcPart, dstBucket, dstPart strin
|
||||
// not have a readQuorum partially renamed files are renamed back to
|
||||
// its proper location.
|
||||
func renameObject(disks []StorageAPI, srcBucket, srcObject, dstBucket, dstObject string, quorum int) error {
|
||||
isPart := false
|
||||
return rename(disks, srcBucket, srcObject, dstBucket, dstObject, isPart, quorum)
|
||||
isDir := true
|
||||
return rename(disks, srcBucket, srcObject, dstBucket, dstObject, isDir, quorum)
|
||||
}
|
||||
|
||||
// PutObject - creates an object upon reading from the input stream
|
||||
|
||||
Reference in New Issue
Block a user