mirror of
https://github.com/minio/minio.git
synced 2025-11-21 02:09:08 -05:00
Support for remote tier management (#12090)
With this change, MinIO's ILM supports transitioning objects to a remote tier. This change includes support for Azure Blob Storage, AWS S3 compatible object storage incl. MinIO and Google Cloud Storage as remote tier storage backends. Some new additions include: - Admin APIs remote tier configuration management - Simple journal to track remote objects to be 'collected' This is used by object API handlers which 'mutate' object versions by overwriting/replacing content (Put/CopyObject) or removing the version itself (e.g DeleteObjectVersion). - Rework of previous ILM transition to fit the new model In the new model, a storage class (a.k.a remote tier) is defined by the 'remote' object storage type (one of s3, azure, GCS), bucket name and a prefix. * Fixed bugs, review comments, and more unit-tests - Leverage inline small object feature - Migrate legacy objects to the latest object format before transitioning - Fix restore to particular version if specified - Extend SharedDataDirCount to handle transitioned and restored objects - Restore-object should accept version-id for version-suspended bucket (#12091) - Check if remote tier creds have sufficient permissions - Bonus minor fixes to existing error messages Co-authored-by: Poorna Krishnamoorthy <poorna@minio.io> Co-authored-by: Krishna Srinivas <krishna@minio.io> Signed-off-by: Harshavardhana <harsha@minio.io>
This commit is contained in:
committed by
Harshavardhana
parent
069432566f
commit
c829e3a13b
@@ -1341,6 +1341,16 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re
|
||||
objInfo.ETag = remoteObjInfo.ETag
|
||||
objInfo.ModTime = remoteObjInfo.LastModified
|
||||
} else {
|
||||
|
||||
os := newObjSweeper(dstBucket, dstObject)
|
||||
// Get appropriate object info to identify the remote object to delete
|
||||
if !srcInfo.metadataOnly {
|
||||
goiOpts := os.GetOpts()
|
||||
if goi, gerr := getObjectInfo(ctx, dstBucket, dstObject, goiOpts); gerr == nil {
|
||||
os.SetTransitionState(goi)
|
||||
}
|
||||
}
|
||||
|
||||
copyObjectFn := objectAPI.CopyObject
|
||||
if api.CacheAPI() != nil {
|
||||
copyObjectFn = api.CacheAPI().CopyObject
|
||||
@@ -1353,6 +1363,9 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Remove the transitioned object whose object version is being overwritten.
|
||||
logger.LogIf(ctx, os.Sweep())
|
||||
}
|
||||
objInfo.ETag = getDecryptedETag(r.Header, objInfo, false)
|
||||
response := generateCopyObjectResponse(objInfo.ETag, objInfo.ModTime)
|
||||
@@ -1655,6 +1668,13 @@ func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Req
|
||||
// Ensure that metadata does not contain sensitive information
|
||||
crypto.RemoveSensitiveEntries(metadata)
|
||||
|
||||
oc := newObjSweeper(bucket, object)
|
||||
// Get appropriate object info to identify the remote object to delete
|
||||
goiOpts := oc.GetOpts()
|
||||
if goi, gerr := getObjectInfo(ctx, bucket, object, goiOpts); gerr == nil {
|
||||
oc.SetTransitionState(goi)
|
||||
}
|
||||
|
||||
// Create the object..
|
||||
objInfo, err := putObject(ctx, bucket, object, pReader, opts)
|
||||
if err != nil {
|
||||
@@ -1684,6 +1704,10 @@ func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Req
|
||||
if replicate, sync := mustReplicate(ctx, r, bucket, object, metadata, ""); replicate {
|
||||
scheduleReplication(ctx, objInfo.Clone(), objectAPI, sync, replication.ObjectReplicationType)
|
||||
}
|
||||
|
||||
// Remove the transitioned object whose object version is being overwritten.
|
||||
logger.LogIf(ctx, oc.Sweep())
|
||||
|
||||
setPutObjHeaders(w, objInfo, false)
|
||||
|
||||
writeSuccessResponseHeadersOnly(w)
|
||||
@@ -3046,6 +3070,13 @@ func (api objectAPIHandlers) CompleteMultipartUploadHandler(w http.ResponseWrite
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
os := newObjSweeper(bucket, object)
|
||||
// Get appropriate object info to identify the remote object to delete
|
||||
goiOpts := os.GetOpts()
|
||||
if goi, gerr := objectAPI.GetObjectInfo(ctx, bucket, object, goiOpts); gerr == nil {
|
||||
os.SetTransitionState(goi)
|
||||
}
|
||||
|
||||
setEventStreamHeaders(w)
|
||||
|
||||
w = &whiteSpaceWriter{ResponseWriter: w, Flusher: w.(http.Flusher)}
|
||||
@@ -3083,6 +3114,9 @@ func (api objectAPIHandlers) CompleteMultipartUploadHandler(w http.ResponseWrite
|
||||
scheduleReplication(ctx, objInfo.Clone(), objectAPI, sync, replication.ObjectReplicationType)
|
||||
}
|
||||
|
||||
// Remove the transitioned object whose object version is being overwritten.
|
||||
logger.LogIf(ctx, os.Sweep())
|
||||
|
||||
// Write success response.
|
||||
writeSuccessResponseXML(w, encodedSuccessResponse)
|
||||
|
||||
@@ -3144,21 +3178,20 @@ func (api objectAPIHandlers) DeleteObjectHandler(w http.ResponseWriter, r *http.
|
||||
return
|
||||
}
|
||||
var (
|
||||
hasLockEnabled, hasLifecycleConfig bool
|
||||
goi ObjectInfo
|
||||
gerr error
|
||||
goi ObjectInfo
|
||||
gerr error
|
||||
)
|
||||
replicateDeletes := hasReplicationRules(ctx, bucket, []ObjectToDelete{{ObjectName: object, VersionID: opts.VersionID}})
|
||||
if rcfg, _ := globalBucketObjectLockSys.Get(bucket); rcfg.LockEnabled {
|
||||
hasLockEnabled = true
|
||||
}
|
||||
if _, err := globalBucketMetadataSys.GetLifecycleConfig(bucket); err == nil {
|
||||
hasLifecycleConfig = true
|
||||
}
|
||||
if replicateDeletes || hasLockEnabled || hasLifecycleConfig {
|
||||
goi, gerr = getObjectInfo(ctx, bucket, object, ObjectOptions{
|
||||
VersionID: opts.VersionID,
|
||||
})
|
||||
|
||||
var goiOpts ObjectOptions
|
||||
os := newObjSweeper(bucket, object).WithVersion(singleDelete(*r))
|
||||
// Mutations of objects on versioning suspended buckets
|
||||
// affect its null version. Through opts below we select
|
||||
// the null version's remote object to delete if
|
||||
// transitioned.
|
||||
goiOpts = os.GetOpts()
|
||||
goi, gerr = getObjectInfo(ctx, bucket, object, goiOpts)
|
||||
if gerr == nil {
|
||||
os.SetTransitionState(goi)
|
||||
}
|
||||
|
||||
replicateDel, replicateSync := checkReplicateDelete(ctx, bucket, ObjectToDelete{ObjectName: object, VersionID: opts.VersionID}, goi, gerr)
|
||||
@@ -3266,16 +3299,9 @@ func (api objectAPIHandlers) DeleteObjectHandler(w http.ResponseWriter, r *http.
|
||||
scheduleReplicationDelete(ctx, dobj, objectAPI, replicateSync)
|
||||
}
|
||||
|
||||
if goi.TransitionStatus == lifecycle.TransitionComplete { // clean up transitioned tier
|
||||
deleteTransitionedObject(ctx, objectAPI, bucket, object, lifecycle.ObjectOpts{
|
||||
Name: object,
|
||||
UserTags: goi.UserTags,
|
||||
VersionID: goi.VersionID,
|
||||
DeleteMarker: goi.DeleteMarker,
|
||||
TransitionStatus: goi.TransitionStatus,
|
||||
IsLatest: goi.IsLatest,
|
||||
}, false, true)
|
||||
}
|
||||
// Remove the transitioned object whose object version is being overwritten.
|
||||
logger.LogIf(ctx, os.Sweep())
|
||||
|
||||
}
|
||||
|
||||
// PutObjectLegalHoldHandler - set legal hold configuration to object,
|
||||
@@ -3860,8 +3886,12 @@ func (api objectAPIHandlers) PostRestoreObjectHandler(w http.ResponseWriter, r *
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrEmptyRequestBody), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
objInfo, err := getObjectInfo(ctx, bucket, object, ObjectOptions{})
|
||||
opts, err := postRestoreOpts(ctx, r, bucket, object)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
objInfo, err := getObjectInfo(ctx, bucket, object, opts)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
@@ -3905,16 +3935,12 @@ func (api objectAPIHandlers) PostRestoreObjectHandler(w http.ResponseWriter, r *
|
||||
// update self with restore metadata
|
||||
if rreq.Type != SelectRestoreRequest {
|
||||
objInfo.metadataOnly = true // Perform only metadata updates.
|
||||
ongoingReq := true
|
||||
if alreadyRestored {
|
||||
ongoingReq = false
|
||||
}
|
||||
metadata[xhttp.AmzRestoreExpiryDays] = strconv.Itoa(rreq.Days)
|
||||
metadata[xhttp.AmzRestoreRequestDate] = time.Now().UTC().Format(http.TimeFormat)
|
||||
if alreadyRestored {
|
||||
metadata[xhttp.AmzRestore] = fmt.Sprintf("ongoing-request=%t, expiry-date=%s", ongoingReq, restoreExpiry.Format(http.TimeFormat))
|
||||
metadata[xhttp.AmzRestore] = completedRestoreObj(restoreExpiry).String()
|
||||
} else {
|
||||
metadata[xhttp.AmzRestore] = fmt.Sprintf("ongoing-request=%t", ongoingReq)
|
||||
metadata[xhttp.AmzRestore] = ongoingRestoreObj().String()
|
||||
}
|
||||
objInfo.UserDefined = metadata
|
||||
if _, err := objectAPI.CopyObject(GlobalContext, bucket, object, bucket, object, objInfo, ObjectOptions{
|
||||
@@ -3991,7 +4017,15 @@ func (api objectAPIHandlers) PostRestoreObjectHandler(w http.ResponseWriter, r *
|
||||
rreq.SelectParameters.Close()
|
||||
return
|
||||
}
|
||||
if err := restoreTransitionedObject(rctx, bucket, object, objectAPI, objInfo, rreq, restoreExpiry); err != nil {
|
||||
opts := ObjectOptions{
|
||||
Transition: TransitionOptions{
|
||||
RestoreRequest: rreq,
|
||||
RestoreExpiry: restoreExpiry,
|
||||
},
|
||||
VersionID: objInfo.VersionID,
|
||||
}
|
||||
if err := objectAPI.RestoreTransitionedObject(rctx, bucket, object, opts); err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user