Use CompleteMultipartUpload in RestoreTransitionedObject

Signed-off-by: Krishnan Parthasarathi <kp@minio.io>
This commit is contained in:
Harshavardhana 2021-04-23 10:52:26 -07:00
parent 3831027c54
commit cbfdf97abf
5 changed files with 22 additions and 3701 deletions

3593
CREDITS

File diff suppressed because it is too large Load Diff

View File

@ -238,6 +238,9 @@ func expireTransitionedObject(ctx context.Context, objectAPI ObjectLayer, bucket
return err
}
// Send audit for the lifecycle delete operation
auditLogLifecycle(ctx, bucket, object)
eventName := event.ObjectRemovedDelete
if lcOpts.DeleteMarker {
eventName = event.ObjectRemovedDeleteMarkerCreated
@ -275,30 +278,9 @@ func genTransitionObjName() (string, error) {
if err != nil {
return "", err
}
<<<<<<< HEAD
// Send audit for the lifecycle delete operation
auditLogLifecycle(ctx, bucket, object)
eventName := event.ObjectRemovedDelete
if lcOpts.DeleteMarker {
eventName = event.ObjectRemovedDeleteMarkerCreated
}
// Notify object deleted event.
sendEvent(eventArgs{
EventName: eventName,
BucketName: bucket,
Object: objInfo,
Host: "Internal: [ILM-EXPIRY]",
})
// should never reach here
return nil
=======
us := u.String()
obj := fmt.Sprintf("%s/%s/%s", us[0:2], us[2:4], us)
return obj, nil
>>>>>>> ef4fac9f3... Support for remote tier management (#12090)
}
// transition object to target specified by the transition ARN. When an object is transitioned to another

View File

@ -1395,7 +1395,7 @@ func (er erasureObjects) RestoreTransitionedObject(ctx context.Context, bucket,
}
// update restore status header in the metadata
func (er erasureObjects) updateRestoreMetadata(ctx context.Context, bucket, object string, objInfo ObjectInfo, opts ObjectOptions, noLock bool, rerr error) error {
func (er erasureObjects) updateRestoreMetadata(ctx context.Context, bucket, object string, objInfo ObjectInfo, opts ObjectOptions, rerr error) error {
oi := objInfo.Clone()
oi.metadataOnly = true // Perform only metadata updates.
@ -1408,7 +1408,6 @@ func (er erasureObjects) updateRestoreMetadata(ctx context.Context, bucket, obje
VersionID: oi.VersionID,
}, ObjectOptions{
VersionID: oi.VersionID,
NoLock: noLock, // true if lock already taken
}); err != nil {
logger.LogIf(ctx, fmt.Errorf("Unable to update transition restore metadata for %s/%s(%s): %s", bucket, object, oi.VersionID, err))
return err
@ -1422,15 +1421,15 @@ func (er erasureObjects) restoreTransitionedObject(ctx context.Context, bucket s
defer func() {
ObjectPathUpdated(pathJoin(bucket, object))
}()
setRestoreHeaderFn := func(oi ObjectInfo, noLock bool, rerr error) error {
er.updateRestoreMetadata(ctx, bucket, object, oi, opts, noLock, rerr)
setRestoreHeaderFn := func(oi ObjectInfo, rerr error) error {
er.updateRestoreMetadata(ctx, bucket, object, oi, opts, rerr)
return rerr
}
var oi ObjectInfo
// get the file info on disk for transitioned object
actualfi, _, _, err := er.getObjectFileInfo(ctx, bucket, object, opts, false)
if err != nil {
return setRestoreHeaderFn(oi, false, toObjectErr(err, bucket, object))
return setRestoreHeaderFn(oi, toObjectErr(err, bucket, object))
}
oi = actualfi.ToObjectInfo(bucket, object)
@ -1438,40 +1437,40 @@ func (er erasureObjects) restoreTransitionedObject(ctx context.Context, bucket s
var rs *HTTPRangeSpec
gr, err := getTransitionedObjectReader(ctx, bucket, object, rs, http.Header{}, oi, opts)
if err != nil {
return setRestoreHeaderFn(oi, false, toObjectErr(err, bucket, object))
return setRestoreHeaderFn(oi, toObjectErr(err, bucket, object))
}
defer gr.Close()
hashReader, err := hash.NewReader(gr, gr.ObjInfo.Size, "", "", gr.ObjInfo.Size)
if err != nil {
return setRestoreHeaderFn(oi, false, toObjectErr(err, bucket, object))
return setRestoreHeaderFn(oi, toObjectErr(err, bucket, object))
}
pReader := NewPutObjReader(hashReader)
ropts := putRestoreOpts(bucket, object, opts.Transition.RestoreRequest, oi)
ropts.UserDefined[xhttp.AmzRestore] = completedRestoreObj(opts.Transition.RestoreExpiry).String()
_, err = er.PutObject(ctx, bucket, object, pReader, ropts)
return setRestoreHeaderFn(oi, false, toObjectErr(err, bucket, object))
return setRestoreHeaderFn(oi, toObjectErr(err, bucket, object))
}
uploadID, err := er.NewMultipartUpload(ctx, bucket, object, opts)
if err != nil {
return setRestoreHeaderFn(oi, false, err)
return setRestoreHeaderFn(oi, err)
}
var uploadedParts []CompletePart
var rs *HTTPRangeSpec
// get reader from the warm backend - note that even in the case of encrypted objects, this stream is still encrypted.
gr, err := getTransitionedObjectReader(ctx, bucket, object, rs, http.Header{}, oi, opts)
if err != nil {
return setRestoreHeaderFn(oi, false, err)
return setRestoreHeaderFn(oi, err)
}
defer gr.Close()
// rehydrate the parts back on disk as per the original xl.meta prior to transition
for _, partInfo := range oi.Parts {
hr, err := hash.NewReader(gr, partInfo.Size, "", "", partInfo.Size)
if err != nil {
return setRestoreHeaderFn(oi, false, err)
return setRestoreHeaderFn(oi, err)
}
pInfo, err := er.PutObjectPart(ctx, bucket, object, uploadID, partInfo.Number, NewPutObjReader(hr), ObjectOptions{})
if err != nil {
return setRestoreHeaderFn(oi, false, err)
return setRestoreHeaderFn(oi, err)
}
uploadedParts = append(uploadedParts, CompletePart{
PartNumber: pInfo.PartNumber,
@ -1487,20 +1486,20 @@ func (er erasureObjects) restoreTransitionedObject(ctx context.Context, bucket s
// get Quorum for this object
_, writeQuorum, err := objectQuorumFromMeta(ctx, partsMetadata, errs, er.defaultParityCount)
if err != nil {
return setRestoreHeaderFn(oi, false, toObjectErr(err, bucket, object))
return setRestoreHeaderFn(oi, toObjectErr(err, bucket, object))
}
reducedErr := reduceWriteQuorumErrs(ctx, errs, objectOpIgnoredErrs, writeQuorum)
if reducedErr == errErasureWriteQuorum {
return setRestoreHeaderFn(oi, false, toObjectErr(reducedErr, bucket, object))
return setRestoreHeaderFn(oi, toObjectErr(reducedErr, bucket, object))
}
onlineDisks, modTime, dataDir := listOnlineDisks(storageDisks, partsMetadata, errs)
_, modTime, dataDir := listOnlineDisks(storageDisks, partsMetadata, errs)
// Pick one from the first valid metadata.
fi, err := pickValidFileInfo(ctx, partsMetadata, modTime, dataDir, writeQuorum)
if err != nil {
return setRestoreHeaderFn(oi, false, err)
return setRestoreHeaderFn(oi, err)
}
//validate parts created via multipart to transitioned object's parts info in xl.meta
partsMatch := true
@ -1516,27 +1515,12 @@ func (er erasureObjects) restoreTransitionedObject(ctx context.Context, bucket s
}
if !partsMatch {
return setRestoreHeaderFn(oi, false, InvalidObjectState{Bucket: bucket, Object: object})
return setRestoreHeaderFn(oi, InvalidObjectState{Bucket: bucket, Object: object})
}
var currentFI = actualfi
currentFI.DataDir = fi.DataDir
// Hold namespace to complete the transaction
lk := er.NewNSLock(bucket, object)
ctx, err = lk.GetLock(ctx, globalOperationTimeout)
_, err = er.CompleteMultipartUpload(ctx, bucket, object, uploadID, uploadedParts, opts)
if err != nil {
return setRestoreHeaderFn(oi, false, err)
return setRestoreHeaderFn(oi, toObjectErr(err, minioMetaMultipartBucket, uploadIDPath))
}
defer lk.Unlock()
// Attempt to rename temp upload object to actual upload path object
_, err = rename(ctx, onlineDisks, minioMetaMultipartBucket, path.Join(uploadIDPath, fi.DataDir), bucket, path.Join(object, actualfi.DataDir), true, writeQuorum, nil)
if err != nil {
return setRestoreHeaderFn(oi, true, toObjectErr(err, minioMetaMultipartBucket, uploadIDPath))
}
// Cleanup multipart upload dir.
if err = er.deleteObject(ctx, minioMetaMultipartBucket, uploadIDPath, writeQuorum); err != nil {
return setRestoreHeaderFn(oi, true, toObjectErr(err, bucket, object, uploadID))
}
return setRestoreHeaderFn(oi, true, nil)
return setRestoreHeaderFn(oi, nil)
}

View File

@ -1,25 +0,0 @@
// Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
// +build fips
package madmin
// useAES always returns true since AES is the only
// option out of AES-GCM and ChaCha20-Poly1305 that
// is approved by the NIST.
func useAES() bool { return true }

View File

@ -1,27 +0,0 @@
// Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
// +build !fips
package madmin
import "github.com/secure-io/sio-go/sioutil"
// useAES returns true if the executing CPU provides
// AES-GCM hardware instructions and an optimized
// assembler implementation is available.
func useAES() bool { return sioutil.NativeAES() }