mirror of
https://github.com/minio/minio.git
synced 2025-11-10 22:10:12 -05:00
Support for remote tier management (#12090)
With this change, MinIO's ILM supports transitioning objects to a remote tier. This change includes support for Azure Blob Storage, AWS S3 compatible object storage incl. MinIO and Google Cloud Storage as remote tier storage backends. Some new additions include: - Admin APIs remote tier configuration management - Simple journal to track remote objects to be 'collected' This is used by object API handlers which 'mutate' object versions by overwriting/replacing content (Put/CopyObject) or removing the version itself (e.g DeleteObjectVersion). - Rework of previous ILM transition to fit the new model In the new model, a storage class (a.k.a remote tier) is defined by the 'remote' object storage type (one of s3, azure, GCS), bucket name and a prefix. * Fixed bugs, review comments, and more unit-tests - Leverage inline small object feature - Migrate legacy objects to the latest object format before transitioning - Fix restore to particular version if specified - Extend SharedDataDirCount to handle transitioned and restored objects - Restore-object should accept version-id for version-suspended bucket (#12091) - Check if remote tier creds have sufficient permissions - Bonus minor fixes to existing error messages Co-authored-by: Poorna Krishnamoorthy <poorna@minio.io> Co-authored-by: Krishna Srinivas <krishna@minio.io> Signed-off-by: Harshavardhana <harsha@minio.io>
This commit is contained in:
committed by
Harshavardhana
parent
069432566f
commit
c829e3a13b
@@ -1010,6 +1010,50 @@ func toAdminAPIErr(ctx context.Context, err error) APIError {
|
||||
Description: err.Error(),
|
||||
HTTPStatusCode: http.StatusConflict,
|
||||
}
|
||||
|
||||
// Tier admin API errors
|
||||
case errors.Is(err, madmin.ErrTierNameEmpty):
|
||||
apiErr = APIError{
|
||||
Code: "XMinioAdminTierNameEmpty",
|
||||
Description: err.Error(),
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
}
|
||||
case errors.Is(err, madmin.ErrTierInvalidConfig):
|
||||
apiErr = APIError{
|
||||
Code: "XMinioAdminTierInvalidConfig",
|
||||
Description: err.Error(),
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
}
|
||||
case errors.Is(err, madmin.ErrTierInvalidConfigVersion):
|
||||
apiErr = APIError{
|
||||
Code: "XMinioAdminTierInvalidConfigVersion",
|
||||
Description: err.Error(),
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
}
|
||||
case errors.Is(err, madmin.ErrTierTypeUnsupported):
|
||||
apiErr = APIError{
|
||||
Code: "XMinioAdminTierTypeUnsupported",
|
||||
Description: err.Error(),
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
}
|
||||
case errors.Is(err, errTierBackendInUse):
|
||||
apiErr = APIError{
|
||||
Code: "XMinioAdminTierBackendInUse",
|
||||
Description: err.Error(),
|
||||
HTTPStatusCode: http.StatusConflict,
|
||||
}
|
||||
case errors.Is(err, errTierInsufficientCreds):
|
||||
apiErr = APIError{
|
||||
Code: "XMinioAdminTierInsufficientCreds",
|
||||
Description: err.Error(),
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
}
|
||||
case errIsTierPermError(err):
|
||||
apiErr = APIError{
|
||||
Code: "XMinioAdminTierInsufficientPermissions",
|
||||
Description: err.Error(),
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
}
|
||||
default:
|
||||
apiErr = errorCodes.ToAPIErrWithErr(toAdminAPIErrCode(ctx, err), err)
|
||||
}
|
||||
|
||||
@@ -189,6 +189,11 @@ func registerAdminRouter(router *mux.Router, enableConfigOps, enableIAMOps bool)
|
||||
// RemoveRemoteTargetHandler
|
||||
adminRouter.Methods(http.MethodDelete).Path(adminVersion+"/remove-remote-target").HandlerFunc(
|
||||
httpTraceHdrs(adminAPI.RemoveRemoteTargetHandler)).Queries("bucket", "{bucket:.*}", "arn", "{arn:.*}")
|
||||
|
||||
// Remote Tier management operations
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/tier").HandlerFunc(httpTraceHdrs(adminAPI.AddTierHandler))
|
||||
adminRouter.Methods(http.MethodPost).Path(adminVersion + "/tier/{tier}").HandlerFunc(httpTraceHdrs(adminAPI.EditTierHandler))
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/tier").HandlerFunc(httpTraceHdrs(adminAPI.ListTierHandler))
|
||||
}
|
||||
|
||||
if globalIsDistErasure {
|
||||
|
||||
@@ -36,8 +36,6 @@ type DeletedObject struct {
|
||||
DeleteMarkerMTime DeleteMarkerMTime `xml:"DeleteMarkerMTime,omitempty"`
|
||||
// Status of versioned delete (of object or DeleteMarker)
|
||||
VersionPurgeStatus VersionPurgeStatusType `xml:"VersionPurgeStatus,omitempty"`
|
||||
// PurgeTransitioned is nonempty if object is in transition tier
|
||||
PurgeTransitioned string `xml:"PurgeTransitioned,omitempty"`
|
||||
}
|
||||
|
||||
// DeleteMarkerMTime is an embedded type containing time.Time for XML marshal
|
||||
@@ -64,8 +62,6 @@ type ObjectToDelete struct {
|
||||
VersionPurgeStatus VersionPurgeStatusType `xml:"VersionPurgeStatus"`
|
||||
// Version ID of delete marker
|
||||
DeleteMarkerVersionID string `xml:"DeleteMarkerVersionId"`
|
||||
// PurgeTransitioned is nonempty if object is in transition tier
|
||||
PurgeTransitioned string `xml:"PurgeTransitioned"`
|
||||
}
|
||||
|
||||
// createBucketConfiguration container for bucket configuration request from client.
|
||||
|
||||
@@ -241,6 +241,7 @@ const (
|
||||
ErrClientDisconnected
|
||||
ErrOperationMaxedOut
|
||||
ErrInvalidRequest
|
||||
ErrTransitionStorageClassNotFoundError
|
||||
// MinIO storage class error codes
|
||||
ErrInvalidStorageClass
|
||||
ErrBackendDown
|
||||
@@ -943,6 +944,12 @@ var errorCodes = errorCodeMap{
|
||||
Description: "Object restore is already in progress",
|
||||
HTTPStatusCode: http.StatusConflict,
|
||||
},
|
||||
ErrTransitionStorageClassNotFoundError: {
|
||||
Code: "TransitionStorageClassNotFoundError",
|
||||
Description: "The transition storage class was not found",
|
||||
HTTPStatusCode: http.StatusNotFound,
|
||||
},
|
||||
|
||||
/// Bucket notification related errors.
|
||||
ErrEventNotification: {
|
||||
Code: "InvalidArgument",
|
||||
@@ -1972,6 +1979,11 @@ func toAPIErrorCode(ctx context.Context, err error) (apiErr APIErrorCode) {
|
||||
apiErr = ErrRemoteTargetNotVersionedError
|
||||
case BucketReplicationSourceNotVersioned:
|
||||
apiErr = ErrReplicationSourceNotVersionedError
|
||||
case TransitionStorageClassNotFound:
|
||||
apiErr = ErrTransitionStorageClassNotFoundError
|
||||
case InvalidObjectState:
|
||||
apiErr = ErrInvalidObjectState
|
||||
|
||||
case BucketQuotaExceeded:
|
||||
apiErr = ErrAdminBucketQuotaExceeded
|
||||
case *event.ErrInvalidEventName:
|
||||
|
||||
@@ -202,8 +202,28 @@ func setObjectHeaders(w http.ResponseWriter, objInfo ObjectInfo, rs *HTTPRangeSp
|
||||
}
|
||||
}
|
||||
if objInfo.TransitionStatus == lifecycle.TransitionComplete {
|
||||
w.Header()[xhttp.AmzStorageClass] = []string{objInfo.StorageClass}
|
||||
// Check if object is being restored. For more information on x-amz-restore header see
|
||||
// https://docs.aws.amazon.com/AmazonS3/latest/API/API_HeadObject.html#API_HeadObject_ResponseSyntax
|
||||
if onDisk := isRestoredObjectOnDisk(objInfo.UserDefined); !onDisk {
|
||||
w.Header()[xhttp.AmzStorageClass] = []string{objInfo.TransitionTier}
|
||||
}
|
||||
}
|
||||
ruleID, transitionTime := lc.PredictTransitionTime(lifecycle.ObjectOpts{
|
||||
Name: objInfo.Name,
|
||||
UserTags: objInfo.UserTags,
|
||||
VersionID: objInfo.VersionID,
|
||||
ModTime: objInfo.ModTime,
|
||||
IsLatest: objInfo.IsLatest,
|
||||
DeleteMarker: objInfo.DeleteMarker,
|
||||
TransitionStatus: objInfo.TransitionStatus,
|
||||
})
|
||||
if !transitionTime.IsZero() {
|
||||
// This header is a MinIO centric extension to show expected transition date in a similar spirit as x-amz-expiration
|
||||
w.Header()[xhttp.MinIOTransition] = []string{
|
||||
fmt.Sprintf(`transition-date="%s", rule-id="%s"`, transitionTime.Format(http.TimeFormat), ruleID),
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -1,20 +1,3 @@
|
||||
// Copyright (c) 2015-2021 MinIO, Inc.
|
||||
//
|
||||
// This file is part of MinIO Object Storage stack
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package cmd
|
||||
|
||||
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
|
||||
|
||||
@@ -1,20 +1,3 @@
|
||||
// Copyright (c) 2015-2021 MinIO, Inc.
|
||||
//
|
||||
// This file is part of MinIO Object Storage stack
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package cmd
|
||||
|
||||
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
|
||||
|
||||
@@ -43,7 +43,6 @@ import (
|
||||
"github.com/minio/minio/cmd/crypto"
|
||||
xhttp "github.com/minio/minio/cmd/http"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/bucket/lifecycle"
|
||||
objectlock "github.com/minio/minio/pkg/bucket/object/lock"
|
||||
"github.com/minio/minio/pkg/bucket/policy"
|
||||
"github.com/minio/minio/pkg/bucket/replication"
|
||||
@@ -448,18 +447,17 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
||||
getObjectInfoFn = api.CacheAPI().GetObjectInfo
|
||||
}
|
||||
var (
|
||||
hasLockEnabled, hasLifecycleConfig, replicateSync bool
|
||||
goi ObjectInfo
|
||||
gerr error
|
||||
hasLockEnabled, replicateSync bool
|
||||
goi ObjectInfo
|
||||
gerr error
|
||||
)
|
||||
replicateDeletes := hasReplicationRules(ctx, bucket, deleteObjects.Objects)
|
||||
if rcfg, _ := globalBucketObjectLockSys.Get(bucket); rcfg.LockEnabled {
|
||||
hasLockEnabled = true
|
||||
}
|
||||
if _, err := globalBucketMetadataSys.GetLifecycleConfig(bucket); err == nil {
|
||||
hasLifecycleConfig = true
|
||||
}
|
||||
|
||||
dErrs := make([]DeleteError, len(deleteObjects.Objects))
|
||||
oss := make([]*objSweeper, len(deleteObjects.Objects))
|
||||
for index, object := range deleteObjects.Objects {
|
||||
if apiErrCode := checkRequestAuthType(ctx, r, policy.DeleteObjectAction, bucket, object.ObjectName); apiErrCode != ErrNone {
|
||||
if apiErrCode == ErrSignatureDoesNotMatch || apiErrCode == ErrInvalidAccessKeyID {
|
||||
@@ -489,13 +487,15 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
||||
}
|
||||
}
|
||||
|
||||
if replicateDeletes || hasLockEnabled || hasLifecycleConfig {
|
||||
goi, gerr = getObjectInfoFn(ctx, bucket, object.ObjectName, ObjectOptions{
|
||||
VersionID: object.VersionID,
|
||||
})
|
||||
}
|
||||
if hasLifecycleConfig && gerr == nil {
|
||||
object.PurgeTransitioned = goi.TransitionStatus
|
||||
oss[index] = newObjSweeper(bucket, object.ObjectName).WithVersion(multiDelete(object))
|
||||
// Mutations of objects on versioning suspended buckets
|
||||
// affect its null version. Through opts below we select
|
||||
// the null version's remote object to delete if
|
||||
// transitioned.
|
||||
opts := oss[index].GetOpts()
|
||||
goi, gerr = getObjectInfoFn(ctx, bucket, object.ObjectName, opts)
|
||||
if gerr == nil {
|
||||
oss[index].SetTransitionState(goi)
|
||||
}
|
||||
if replicateDeletes {
|
||||
replicate, repsync := checkReplicateDelete(ctx, bucket, ObjectToDelete{
|
||||
@@ -565,7 +565,6 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
||||
VersionID: dObjects[i].VersionID,
|
||||
VersionPurgeStatus: dObjects[i].VersionPurgeStatus,
|
||||
DeleteMarkerReplicationStatus: dObjects[i].DeleteMarkerReplicationStatus,
|
||||
PurgeTransitioned: dObjects[i].PurgeTransitioned,
|
||||
}
|
||||
dindex := objectsToDelete[objToDel]
|
||||
if errs[i] == nil || isErrObjectNotFound(errs[i]) || isErrVersionNotFound(errs[i]) {
|
||||
@@ -613,14 +612,18 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
||||
}
|
||||
}
|
||||
|
||||
if hasLifecycleConfig && dobj.PurgeTransitioned == lifecycle.TransitionComplete { // clean up transitioned tier
|
||||
deleteTransitionedObject(ctx, objectAPI, bucket, dobj.ObjectName, lifecycle.ObjectOpts{
|
||||
Name: dobj.ObjectName,
|
||||
VersionID: dobj.VersionID,
|
||||
DeleteMarker: dobj.DeleteMarker,
|
||||
}, false, true)
|
||||
}
|
||||
}
|
||||
|
||||
// Clean up transitioned objects from remote tier
|
||||
for _, os := range oss {
|
||||
if os == nil { // skip objects that weren't deleted due to invalid versionID etc.
|
||||
continue
|
||||
}
|
||||
logger.LogIf(ctx, os.Sweep())
|
||||
}
|
||||
|
||||
// Notify deleted event for objects.
|
||||
for _, dobj := range deletedObjects {
|
||||
eventName := event.ObjectRemovedDelete
|
||||
objInfo := ObjectInfo{
|
||||
Name: dobj.ObjectName,
|
||||
|
||||
@@ -28,21 +28,25 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
miniogo "github.com/minio/minio-go/v7"
|
||||
"github.com/google/uuid"
|
||||
"github.com/minio/minio-go/v7/pkg/tags"
|
||||
xhttp "github.com/minio/minio/cmd/http"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
sse "github.com/minio/minio/pkg/bucket/encryption"
|
||||
"github.com/minio/minio/pkg/bucket/lifecycle"
|
||||
"github.com/minio/minio/pkg/event"
|
||||
"github.com/minio/minio/pkg/hash"
|
||||
"github.com/minio/minio/pkg/madmin"
|
||||
"github.com/minio/minio/pkg/s3select"
|
||||
)
|
||||
|
||||
const (
|
||||
// Disabled means the lifecycle rule is inactive
|
||||
Disabled = "Disabled"
|
||||
// TransitionStatus status of transition
|
||||
TransitionStatus = "transition-status"
|
||||
// TransitionedObjectName name of transitioned object
|
||||
TransitionedObjectName = "transitioned-object"
|
||||
// TransitionTier name of transition storage class
|
||||
TransitionTier = "transition-tier"
|
||||
)
|
||||
|
||||
// LifecycleSys - Bucket lifecycle subsystem.
|
||||
@@ -151,8 +155,9 @@ func (t *transitionState) addWorker(ctx context.Context, objectAPI ObjectLayer)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
if err := transitionObject(ctx, objectAPI, oi); err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
logger.LogIf(ctx, fmt.Errorf("Transition failed for %s/%s version:%s with %w", oi.Bucket, oi.Name, oi.VersionID, err))
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -173,13 +178,10 @@ func initBackgroundTransition(ctx context.Context, objectAPI ObjectLayer) {
|
||||
func validateLifecycleTransition(ctx context.Context, bucket string, lfc *lifecycle.Lifecycle) error {
|
||||
for _, rule := range lfc.Rules {
|
||||
if rule.Transition.StorageClass != "" {
|
||||
sameTarget, destbucket, err := validateTransitionDestination(ctx, bucket, rule.Transition.StorageClass)
|
||||
err := validateTransitionDestination(rule.Transition.StorageClass)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if sameTarget && destbucket == bucket {
|
||||
return fmt.Errorf("Transition destination cannot be the same as the source bucket")
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
@@ -187,162 +189,93 @@ func validateLifecycleTransition(ctx context.Context, bucket string, lfc *lifecy
|
||||
|
||||
// validateTransitionDestination returns error if transition destination bucket missing or not configured
|
||||
// It also returns true if transition destination is same as this server.
|
||||
func validateTransitionDestination(ctx context.Context, bucket string, targetLabel string) (bool, string, error) {
|
||||
tgt := globalBucketTargetSys.GetRemoteTargetWithLabel(ctx, bucket, targetLabel)
|
||||
if tgt == nil {
|
||||
return false, "", BucketRemoteTargetNotFound{Bucket: bucket}
|
||||
}
|
||||
arn, err := madmin.ParseARN(tgt.Arn)
|
||||
func validateTransitionDestination(sc string) error {
|
||||
backend, err := globalTierConfigMgr.getDriver(sc)
|
||||
if err != nil {
|
||||
return false, "", BucketRemoteTargetNotFound{Bucket: bucket}
|
||||
return TransitionStorageClassNotFound{}
|
||||
}
|
||||
if arn.Type != madmin.ILMService {
|
||||
return false, "", BucketRemoteArnTypeInvalid{}
|
||||
}
|
||||
clnt := globalBucketTargetSys.GetRemoteTargetClient(ctx, tgt.Arn)
|
||||
if clnt == nil {
|
||||
return false, "", BucketRemoteTargetNotFound{Bucket: bucket}
|
||||
}
|
||||
if found, _ := clnt.BucketExists(ctx, arn.Bucket); !found {
|
||||
return false, "", BucketRemoteDestinationNotFound{Bucket: arn.Bucket}
|
||||
}
|
||||
sameTarget, _ := isLocalHost(clnt.EndpointURL().Hostname(), clnt.EndpointURL().Port(), globalMinioPort)
|
||||
return sameTarget, arn.Bucket, nil
|
||||
}
|
||||
|
||||
// transitionSC returns storage class label for this bucket
|
||||
func transitionSC(ctx context.Context, bucket string) string {
|
||||
cfg, err := globalBucketMetadataSys.GetLifecycleConfig(bucket)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
for _, rule := range cfg.Rules {
|
||||
if rule.Status == Disabled {
|
||||
continue
|
||||
}
|
||||
if rule.Transition.StorageClass != "" {
|
||||
return rule.Transition.StorageClass
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// return true if ARN representing transition storage class is present in a active rule
|
||||
// for the lifecycle configured on this bucket
|
||||
func transitionSCInUse(ctx context.Context, lfc *lifecycle.Lifecycle, bucket, arnStr string) bool {
|
||||
tgtLabel := globalBucketTargetSys.GetRemoteLabelWithArn(ctx, bucket, arnStr)
|
||||
if tgtLabel == "" {
|
||||
return false
|
||||
}
|
||||
for _, rule := range lfc.Rules {
|
||||
if rule.Status == Disabled {
|
||||
continue
|
||||
}
|
||||
if rule.Transition.StorageClass != "" && rule.Transition.StorageClass == tgtLabel {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// set PutObjectOptions for PUT operation to transition data to target cluster
|
||||
func putTransitionOpts(objInfo ObjectInfo) (putOpts miniogo.PutObjectOptions, err error) {
|
||||
meta := make(map[string]string)
|
||||
|
||||
putOpts = miniogo.PutObjectOptions{
|
||||
UserMetadata: meta,
|
||||
ContentType: objInfo.ContentType,
|
||||
ContentEncoding: objInfo.ContentEncoding,
|
||||
StorageClass: objInfo.StorageClass,
|
||||
Internal: miniogo.AdvancedPutOptions{
|
||||
SourceVersionID: objInfo.VersionID,
|
||||
SourceMTime: objInfo.ModTime,
|
||||
SourceETag: objInfo.ETag,
|
||||
},
|
||||
}
|
||||
|
||||
if objInfo.UserTags != "" {
|
||||
tag, _ := tags.ParseObjectTags(objInfo.UserTags)
|
||||
if tag != nil {
|
||||
putOpts.UserTags = tag.ToMap()
|
||||
}
|
||||
}
|
||||
|
||||
lkMap := caseInsensitiveMap(objInfo.UserDefined)
|
||||
if lang, ok := lkMap.Lookup(xhttp.ContentLanguage); ok {
|
||||
putOpts.ContentLanguage = lang
|
||||
}
|
||||
if disp, ok := lkMap.Lookup(xhttp.ContentDisposition); ok {
|
||||
putOpts.ContentDisposition = disp
|
||||
}
|
||||
if cc, ok := lkMap.Lookup(xhttp.CacheControl); ok {
|
||||
putOpts.CacheControl = cc
|
||||
}
|
||||
if mode, ok := lkMap.Lookup(xhttp.AmzObjectLockMode); ok {
|
||||
rmode := miniogo.RetentionMode(mode)
|
||||
putOpts.Mode = rmode
|
||||
}
|
||||
if retainDateStr, ok := lkMap.Lookup(xhttp.AmzObjectLockRetainUntilDate); ok {
|
||||
rdate, err := time.Parse(time.RFC3339, retainDateStr)
|
||||
if err != nil {
|
||||
return putOpts, err
|
||||
}
|
||||
putOpts.RetainUntilDate = rdate
|
||||
}
|
||||
if lhold, ok := lkMap.Lookup(xhttp.AmzObjectLockLegalHold); ok {
|
||||
putOpts.LegalHold = miniogo.LegalHoldStatus(lhold)
|
||||
}
|
||||
|
||||
return putOpts, nil
|
||||
}
|
||||
|
||||
// handle deletes of transitioned objects or object versions when one of the following is true:
|
||||
// 1. temporarily restored copies of objects (restored with the PostRestoreObject API) expired.
|
||||
// 2. life cycle expiry date is met on the object.
|
||||
// 3. Object is removed through DELETE api call
|
||||
func deleteTransitionedObject(ctx context.Context, objectAPI ObjectLayer, bucket, object string, lcOpts lifecycle.ObjectOpts, restoredObject, isDeleteTierOnly bool) error {
|
||||
if lcOpts.TransitionStatus == "" && !isDeleteTierOnly {
|
||||
return nil
|
||||
}
|
||||
lc, err := globalLifecycleSys.Get(bucket)
|
||||
if err != nil {
|
||||
_, err = backend.Get(context.Background(), "probeobject", WarmBackendGetOpts{})
|
||||
if !isErrObjectNotFound(err) {
|
||||
return err
|
||||
}
|
||||
arn := getLifecycleTransitionTargetArn(ctx, lc, bucket, lcOpts)
|
||||
if arn == nil {
|
||||
return fmt.Errorf("remote target not configured")
|
||||
}
|
||||
tgt := globalBucketTargetSys.GetRemoteTargetClient(ctx, arn.String())
|
||||
if tgt == nil {
|
||||
return fmt.Errorf("remote target not configured")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// expireAction represents different actions to be performed on expiry of a
|
||||
// restored/transitioned object
|
||||
type expireAction int
|
||||
|
||||
const (
|
||||
// ignore the zero value
|
||||
_ expireAction = iota
|
||||
// expireObj indicates expiry of 'regular' transitioned objects.
|
||||
expireObj
|
||||
// expireRestoredObj indicates expiry of restored objects.
|
||||
expireRestoredObj
|
||||
)
|
||||
|
||||
// expireTransitionedObject handles expiry of transitioned/restored objects
|
||||
// (versions) in one of the following situations:
|
||||
//
|
||||
// 1. when a restored (via PostRestoreObject API) object expires.
|
||||
// 2. when a transitioned object expires (based on an ILM rule).
|
||||
func expireTransitionedObject(ctx context.Context, objectAPI ObjectLayer, bucket, object string, lcOpts lifecycle.ObjectOpts, remoteObject, tier string, action expireAction) error {
|
||||
var opts ObjectOptions
|
||||
opts.Versioned = globalBucketVersioningSys.Enabled(bucket)
|
||||
opts.VersionID = lcOpts.VersionID
|
||||
if restoredObject {
|
||||
switch action {
|
||||
case expireObj:
|
||||
// When an object is past expiry or when a transitioned object is being
|
||||
// deleted, 'mark' the data in the remote tier for delete.
|
||||
if err := globalTierJournal.AddEntry(jentry{ObjName: remoteObject, TierName: tier}); err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return err
|
||||
}
|
||||
// Delete metadata on source, now that data in remote tier has been
|
||||
// marked for deletion.
|
||||
if _, err := objectAPI.DeleteObject(ctx, bucket, object, opts); err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return err
|
||||
}
|
||||
|
||||
eventName := event.ObjectRemovedDelete
|
||||
if lcOpts.DeleteMarker {
|
||||
eventName = event.ObjectRemovedDeleteMarkerCreated
|
||||
}
|
||||
objInfo := ObjectInfo{
|
||||
Name: object,
|
||||
VersionID: lcOpts.VersionID,
|
||||
DeleteMarker: lcOpts.DeleteMarker,
|
||||
}
|
||||
// Notify object deleted event.
|
||||
sendEvent(eventArgs{
|
||||
EventName: eventName,
|
||||
BucketName: bucket,
|
||||
Object: objInfo,
|
||||
Host: "Internal: [ILM-EXPIRY]",
|
||||
})
|
||||
|
||||
case expireRestoredObj:
|
||||
// delete locally restored copy of object or object version
|
||||
// from the source, while leaving metadata behind. The data on
|
||||
// transitioned tier lies untouched and still accessible
|
||||
opts.TransitionStatus = lcOpts.TransitionStatus
|
||||
_, err = objectAPI.DeleteObject(ctx, bucket, object, opts)
|
||||
opts.Transition.ExpireRestored = true
|
||||
_, err := objectAPI.DeleteObject(ctx, bucket, object, opts)
|
||||
return err
|
||||
default:
|
||||
return fmt.Errorf("Unknown expire action %v", action)
|
||||
}
|
||||
|
||||
// When an object is past expiry, delete the data from transitioned tier and
|
||||
// metadata from source
|
||||
if err := tgt.RemoveObject(context.Background(), arn.Bucket, object, miniogo.RemoveObjectOptions{VersionID: lcOpts.VersionID}); err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
if isDeleteTierOnly {
|
||||
return nil
|
||||
}
|
||||
|
||||
objInfo, err := objectAPI.DeleteObject(ctx, bucket, object, opts)
|
||||
// generate an object name for transitioned object
|
||||
func genTransitionObjName() (string, error) {
|
||||
u, err := uuid.NewRandom()
|
||||
if err != nil {
|
||||
return err
|
||||
return "", err
|
||||
}
|
||||
<<<<<<< HEAD
|
||||
|
||||
// Send audit for the lifecycle delete operation
|
||||
auditLogLifecycle(ctx, bucket, object)
|
||||
@@ -361,130 +294,72 @@ func deleteTransitionedObject(ctx context.Context, objectAPI ObjectLayer, bucket
|
||||
|
||||
// should never reach here
|
||||
return nil
|
||||
=======
|
||||
us := u.String()
|
||||
obj := fmt.Sprintf("%s/%s/%s", us[0:2], us[2:4], us)
|
||||
return obj, nil
|
||||
>>>>>>> ef4fac9f3... Support for remote tier management (#12090)
|
||||
}
|
||||
|
||||
// transition object to target specified by the transition ARN. When an object is transitioned to another
|
||||
// storage specified by the transition ARN, the metadata is left behind on source cluster and original content
|
||||
// is moved to the transition tier. Note that in the case of encrypted objects, entire encrypted stream is moved
|
||||
// to the transition tier without decrypting or re-encrypting.
|
||||
func transitionObject(ctx context.Context, objectAPI ObjectLayer, objInfo ObjectInfo) error {
|
||||
lc, err := globalLifecycleSys.Get(objInfo.Bucket)
|
||||
func transitionObject(ctx context.Context, objectAPI ObjectLayer, oi ObjectInfo) error {
|
||||
lc, err := globalLifecycleSys.Get(oi.Bucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
lcOpts := lifecycle.ObjectOpts{
|
||||
Name: objInfo.Name,
|
||||
UserTags: objInfo.UserTags,
|
||||
Name: oi.Name,
|
||||
UserTags: oi.UserTags,
|
||||
}
|
||||
arn := getLifecycleTransitionTargetArn(ctx, lc, objInfo.Bucket, lcOpts)
|
||||
if arn == nil {
|
||||
return fmt.Errorf("remote target not configured")
|
||||
tierName := getLifeCycleTransitionTier(ctx, lc, oi.Bucket, lcOpts)
|
||||
opts := ObjectOptions{Transition: TransitionOptions{
|
||||
Status: lifecycle.TransitionPending,
|
||||
Tier: tierName,
|
||||
ETag: oi.ETag,
|
||||
},
|
||||
VersionID: oi.VersionID,
|
||||
Versioned: globalBucketVersioningSys.Enabled(oi.Bucket),
|
||||
MTime: oi.ModTime,
|
||||
}
|
||||
tgt := globalBucketTargetSys.GetRemoteTargetClient(ctx, arn.String())
|
||||
if tgt == nil {
|
||||
return fmt.Errorf("remote target not configured")
|
||||
}
|
||||
|
||||
gr, err := objectAPI.GetObjectNInfo(ctx, objInfo.Bucket, objInfo.Name, nil, http.Header{}, readLock, ObjectOptions{
|
||||
VersionID: objInfo.VersionID,
|
||||
TransitionStatus: lifecycle.TransitionPending,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
oi := gr.ObjInfo
|
||||
if oi.TransitionStatus == lifecycle.TransitionComplete {
|
||||
gr.Close()
|
||||
return nil
|
||||
}
|
||||
|
||||
putOpts, err := putTransitionOpts(oi)
|
||||
if err != nil {
|
||||
gr.Close()
|
||||
return err
|
||||
|
||||
}
|
||||
if _, err = tgt.PutObject(ctx, arn.Bucket, oi.Name, gr, oi.Size, putOpts); err != nil {
|
||||
gr.Close()
|
||||
return err
|
||||
}
|
||||
gr.Close()
|
||||
|
||||
var opts ObjectOptions
|
||||
opts.Versioned = globalBucketVersioningSys.Enabled(oi.Bucket)
|
||||
opts.VersionID = oi.VersionID
|
||||
opts.TransitionStatus = lifecycle.TransitionComplete
|
||||
eventName := event.ObjectTransitionComplete
|
||||
|
||||
objInfo, err = objectAPI.DeleteObject(ctx, oi.Bucket, oi.Name, opts)
|
||||
if err != nil {
|
||||
eventName = event.ObjectTransitionFailed
|
||||
}
|
||||
|
||||
// Notify object deleted event.
|
||||
sendEvent(eventArgs{
|
||||
EventName: eventName,
|
||||
BucketName: objInfo.Bucket,
|
||||
Object: objInfo,
|
||||
Host: "Internal: [ILM-Transition]",
|
||||
})
|
||||
|
||||
return err
|
||||
return objectAPI.TransitionObject(ctx, oi.Bucket, oi.Name, opts)
|
||||
}
|
||||
|
||||
// getLifecycleTransitionTargetArn returns transition ARN for storage class specified in the config.
|
||||
func getLifecycleTransitionTargetArn(ctx context.Context, lc *lifecycle.Lifecycle, bucket string, obj lifecycle.ObjectOpts) *madmin.ARN {
|
||||
// getLifeCycleTransitionTier returns storage class for transition target
|
||||
func getLifeCycleTransitionTier(ctx context.Context, lc *lifecycle.Lifecycle, bucket string, obj lifecycle.ObjectOpts) string {
|
||||
for _, rule := range lc.FilterActionableRules(obj) {
|
||||
if rule.Transition.StorageClass != "" {
|
||||
return globalBucketTargetSys.GetRemoteArnWithLabel(ctx, bucket, rule.Transition.StorageClass)
|
||||
return rule.Transition.StorageClass
|
||||
}
|
||||
}
|
||||
return nil
|
||||
return ""
|
||||
}
|
||||
|
||||
// getTransitionedObjectReader returns a reader from the transitioned tier.
|
||||
func getTransitionedObjectReader(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, h http.Header, oi ObjectInfo, opts ObjectOptions) (gr *GetObjectReader, err error) {
|
||||
var lc *lifecycle.Lifecycle
|
||||
lc, err = globalLifecycleSys.Get(bucket)
|
||||
tgtClient, err := globalTierConfigMgr.getDriver(oi.TransitionTier)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
arn := getLifecycleTransitionTargetArn(ctx, lc, bucket, lifecycle.ObjectOpts{
|
||||
Name: object,
|
||||
UserTags: oi.UserTags,
|
||||
ModTime: oi.ModTime,
|
||||
VersionID: oi.VersionID,
|
||||
DeleteMarker: oi.DeleteMarker,
|
||||
IsLatest: oi.IsLatest,
|
||||
})
|
||||
if arn == nil {
|
||||
return nil, fmt.Errorf("remote target not configured")
|
||||
}
|
||||
tgt := globalBucketTargetSys.GetRemoteTargetClient(ctx, arn.String())
|
||||
if tgt == nil {
|
||||
return nil, fmt.Errorf("remote target not configured")
|
||||
return nil, fmt.Errorf("transition storage class not configured")
|
||||
}
|
||||
fn, off, length, err := NewGetObjectReader(rs, oi, opts)
|
||||
if err != nil {
|
||||
return nil, ErrorRespToObjectError(err, bucket, object)
|
||||
}
|
||||
gopts := miniogo.GetObjectOptions{VersionID: opts.VersionID}
|
||||
gopts := WarmBackendGetOpts{}
|
||||
|
||||
// get correct offsets for encrypted object
|
||||
// get correct offsets for object
|
||||
if off >= 0 && length >= 0 {
|
||||
if err := gopts.SetRange(off, off+length-1); err != nil {
|
||||
return nil, ErrorRespToObjectError(err, bucket, object)
|
||||
}
|
||||
gopts.startOffset = off
|
||||
gopts.length = length
|
||||
}
|
||||
|
||||
reader, err := tgt.GetObject(ctx, arn.Bucket, object, gopts)
|
||||
reader, err := tgtClient.Get(ctx, oi.transitionedObjName, gopts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
closeReader := func() { reader.Close() }
|
||||
|
||||
return fn(reader, h, opts.CheckPrecondFn, closeReader)
|
||||
return fn(reader, h, opts.CheckPrecondFn)
|
||||
}
|
||||
|
||||
// RestoreRequestType represents type of restore.
|
||||
@@ -611,6 +486,36 @@ func (r *RestoreObjectRequest) validate(ctx context.Context, objAPI ObjectLayer)
|
||||
return nil
|
||||
}
|
||||
|
||||
// postRestoreOpts returns ObjectOptions with version-id from the POST restore object request for a given bucket and object.
|
||||
func postRestoreOpts(ctx context.Context, r *http.Request, bucket, object string) (opts ObjectOptions, err error) {
|
||||
versioned := globalBucketVersioningSys.Enabled(bucket)
|
||||
versionSuspended := globalBucketVersioningSys.Suspended(bucket)
|
||||
vid := strings.TrimSpace(r.URL.Query().Get(xhttp.VersionID))
|
||||
if vid != "" && vid != nullVersionID {
|
||||
_, err := uuid.Parse(vid)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return opts, InvalidVersionID{
|
||||
Bucket: bucket,
|
||||
Object: object,
|
||||
VersionID: vid,
|
||||
}
|
||||
}
|
||||
if !versioned && !versionSuspended {
|
||||
return opts, InvalidArgument{
|
||||
Bucket: bucket,
|
||||
Object: object,
|
||||
Err: fmt.Errorf("version-id specified %s but versioning is not enabled on %s", opts.VersionID, bucket),
|
||||
}
|
||||
}
|
||||
}
|
||||
return ObjectOptions{
|
||||
Versioned: versioned,
|
||||
VersionSuspended: versionSuspended,
|
||||
VersionID: vid,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// set ObjectOptions for PUT call to restore temporary copy of transitioned data
|
||||
func putRestoreOpts(bucket, object string, rreq *RestoreObjectRequest, objInfo ObjectInfo) (putOpts ObjectOptions) {
|
||||
meta := make(map[string]string)
|
||||
@@ -657,60 +562,113 @@ func putRestoreOpts(bucket, object string, rreq *RestoreObjectRequest, objInfo O
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
errRestoreHDRMissing = fmt.Errorf("x-amz-restore header not found")
|
||||
errRestoreHDRMalformed = fmt.Errorf("x-amz-restore header malformed")
|
||||
)
|
||||
var errRestoreHDRMalformed = fmt.Errorf("x-amz-restore header malformed")
|
||||
|
||||
// parse x-amz-restore header from user metadata to get the status of ongoing request and expiry of restoration
|
||||
// if any. This header value is of format: ongoing-request=true|false, expires=time
|
||||
func parseRestoreHeaderFromMeta(meta map[string]string) (ongoing bool, expiry time.Time, err error) {
|
||||
restoreHdr, ok := meta[xhttp.AmzRestore]
|
||||
if !ok {
|
||||
return ongoing, expiry, errRestoreHDRMissing
|
||||
}
|
||||
rslc := strings.SplitN(restoreHdr, ",", 2)
|
||||
if len(rslc) != 2 {
|
||||
return ongoing, expiry, errRestoreHDRMalformed
|
||||
}
|
||||
rstatusSlc := strings.SplitN(rslc[0], "=", 2)
|
||||
if len(rstatusSlc) != 2 {
|
||||
return ongoing, expiry, errRestoreHDRMalformed
|
||||
}
|
||||
rExpSlc := strings.SplitN(rslc[1], "=", 2)
|
||||
if len(rExpSlc) != 2 {
|
||||
return ongoing, expiry, errRestoreHDRMalformed
|
||||
}
|
||||
|
||||
expiry, err = time.Parse(http.TimeFormat, rExpSlc[1])
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
return rstatusSlc[1] == "true", expiry, nil
|
||||
// restoreObjStatus represents a restore-object's status. It can be either
|
||||
// ongoing or completed.
|
||||
type restoreObjStatus struct {
|
||||
ongoing bool
|
||||
expiry time.Time
|
||||
}
|
||||
|
||||
// restoreTransitionedObject is similar to PostObjectRestore from AWS GLACIER
|
||||
// storage class. When PostObjectRestore API is called, a temporary copy of the object
|
||||
// is restored locally to the bucket on source cluster until the restore expiry date.
|
||||
// The copy that was transitioned continues to reside in the transitioned tier.
|
||||
func restoreTransitionedObject(ctx context.Context, bucket, object string, objAPI ObjectLayer, objInfo ObjectInfo, rreq *RestoreObjectRequest, restoreExpiry time.Time) error {
|
||||
var rs *HTTPRangeSpec
|
||||
gr, err := getTransitionedObjectReader(ctx, bucket, object, rs, http.Header{}, objInfo, ObjectOptions{
|
||||
VersionID: objInfo.VersionID})
|
||||
if err != nil {
|
||||
return err
|
||||
// ongoingRestoreObj constructs restoreObjStatus for an ongoing restore-object.
|
||||
func ongoingRestoreObj() restoreObjStatus {
|
||||
return restoreObjStatus{
|
||||
ongoing: true,
|
||||
}
|
||||
defer gr.Close()
|
||||
hashReader, err := hash.NewReader(gr, objInfo.Size, "", "", objInfo.Size)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// completeRestoreObj constructs restoreObjStatus for a completed restore-object with given expiry.
|
||||
func completedRestoreObj(expiry time.Time) restoreObjStatus {
|
||||
return restoreObjStatus{
|
||||
ongoing: false,
|
||||
expiry: expiry.UTC(),
|
||||
}
|
||||
pReader := NewPutObjReader(hashReader)
|
||||
opts := putRestoreOpts(bucket, object, rreq, objInfo)
|
||||
opts.UserDefined[xhttp.AmzRestore] = fmt.Sprintf("ongoing-request=%t, expiry-date=%s", false, restoreExpiry.Format(http.TimeFormat))
|
||||
if _, err := objAPI.PutObject(ctx, bucket, object, pReader, opts); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// String returns x-amz-restore compatible representation of r.
|
||||
func (r restoreObjStatus) String() string {
|
||||
if r.Ongoing() {
|
||||
return "ongoing-request=true"
|
||||
}
|
||||
return fmt.Sprintf("ongoing-request=false, expiry-date=%s", r.expiry.Format(http.TimeFormat))
|
||||
}
|
||||
|
||||
// Expiry returns expiry of restored object and true if restore-object has completed.
|
||||
// Otherwise returns zero value of time.Time and false.
|
||||
func (r restoreObjStatus) Expiry() (time.Time, bool) {
|
||||
if r.Ongoing() {
|
||||
return time.Time{}, false
|
||||
}
|
||||
return r.expiry, true
|
||||
}
|
||||
|
||||
// Ongoing returns true if restore-object is ongoing.
|
||||
func (r restoreObjStatus) Ongoing() bool {
|
||||
return r.ongoing
|
||||
}
|
||||
|
||||
// OnDisk returns true if restored object contents exist in MinIO. Otherwise returns false.
|
||||
// The restore operation could be in one of the following states,
|
||||
// - in progress (no content on MinIO's disks yet)
|
||||
// - completed
|
||||
// - completed but expired (again, no content on MinIO's disks)
|
||||
func (r restoreObjStatus) OnDisk() bool {
|
||||
if expiry, ok := r.Expiry(); ok && time.Now().UTC().Before(expiry) {
|
||||
// completed
|
||||
return true
|
||||
}
|
||||
return false // in progress or completed but expired
|
||||
}
|
||||
|
||||
// parseRestoreObjStatus parses restoreHdr from AmzRestore header. If the value is valid it returns a
|
||||
// restoreObjStatus value with the status and expiry (if any). Otherwise returns
|
||||
// the empty value and an error indicating the parse failure.
|
||||
func parseRestoreObjStatus(restoreHdr string) (restoreObjStatus, error) {
|
||||
tokens := strings.SplitN(restoreHdr, ",", 2)
|
||||
progressTokens := strings.SplitN(tokens[0], "=", 2)
|
||||
if len(progressTokens) != 2 {
|
||||
return restoreObjStatus{}, errRestoreHDRMalformed
|
||||
}
|
||||
if strings.TrimSpace(progressTokens[0]) != "ongoing-request" {
|
||||
return restoreObjStatus{}, errRestoreHDRMalformed
|
||||
}
|
||||
|
||||
return nil
|
||||
switch progressTokens[1] {
|
||||
case "true":
|
||||
if len(tokens) == 1 {
|
||||
return ongoingRestoreObj(), nil
|
||||
}
|
||||
|
||||
case "false":
|
||||
if len(tokens) != 2 {
|
||||
return restoreObjStatus{}, errRestoreHDRMalformed
|
||||
}
|
||||
expiryTokens := strings.SplitN(tokens[1], "=", 2)
|
||||
if len(expiryTokens) != 2 {
|
||||
return restoreObjStatus{}, errRestoreHDRMalformed
|
||||
}
|
||||
if strings.TrimSpace(expiryTokens[0]) != "expiry-date" {
|
||||
return restoreObjStatus{}, errRestoreHDRMalformed
|
||||
}
|
||||
|
||||
expiry, err := time.Parse(http.TimeFormat, expiryTokens[1])
|
||||
if err != nil {
|
||||
return restoreObjStatus{}, errRestoreHDRMalformed
|
||||
}
|
||||
return completedRestoreObj(expiry), nil
|
||||
}
|
||||
return restoreObjStatus{}, errRestoreHDRMalformed
|
||||
}
|
||||
|
||||
// isRestoredObjectOnDisk returns true if the restored object is on disk. Note
|
||||
// this function must be called only if object version's transition status is
|
||||
// complete.
|
||||
func isRestoredObjectOnDisk(meta map[string]string) (onDisk bool) {
|
||||
if restoreHdr, ok := meta[xhttp.AmzRestore]; ok {
|
||||
if restoreStatus, err := parseRestoreObjStatus(restoreHdr); err == nil {
|
||||
return restoreStatus.OnDisk()
|
||||
}
|
||||
}
|
||||
return onDisk
|
||||
}
|
||||
|
||||
157
cmd/bucket-lifecycle_test.go
Normal file
157
cmd/bucket-lifecycle_test.go
Normal file
@@ -0,0 +1,157 @@
|
||||
// Copyright (c) 2015-2021 MinIO, Inc.
|
||||
//
|
||||
// This file is part of MinIO Object Storage stack
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
xhttp "github.com/minio/minio/cmd/http"
|
||||
)
|
||||
|
||||
// TestParseRestoreObjStatus tests parseRestoreObjStatus
|
||||
func TestParseRestoreObjStatus(t *testing.T) {
|
||||
testCases := []struct {
|
||||
restoreHdr string
|
||||
expectedStatus restoreObjStatus
|
||||
expectedErr error
|
||||
}{
|
||||
{
|
||||
// valid: represents a restored object, 'pending' expiry.
|
||||
restoreHdr: "ongoing-request=false, expiry-date=Fri, 21 Dec 2012 00:00:00 GMT",
|
||||
expectedStatus: restoreObjStatus{
|
||||
ongoing: false,
|
||||
expiry: time.Date(2012, 12, 21, 0, 0, 0, 0, time.UTC),
|
||||
},
|
||||
expectedErr: nil,
|
||||
},
|
||||
{
|
||||
// valid: represents an ongoing restore object request.
|
||||
restoreHdr: "ongoing-request=true",
|
||||
expectedStatus: restoreObjStatus{
|
||||
ongoing: true,
|
||||
},
|
||||
expectedErr: nil,
|
||||
},
|
||||
{
|
||||
// invalid; ongoing restore object request can't have expiry set on it.
|
||||
restoreHdr: "ongoing-request=true, expiry-date=Fri, 21 Dec 2012 00:00:00 GMT",
|
||||
expectedStatus: restoreObjStatus{},
|
||||
expectedErr: errRestoreHDRMalformed,
|
||||
},
|
||||
{
|
||||
// invalid; completed restore object request must have expiry set on it.
|
||||
restoreHdr: "ongoing-request=false",
|
||||
expectedStatus: restoreObjStatus{},
|
||||
expectedErr: errRestoreHDRMalformed,
|
||||
},
|
||||
}
|
||||
for i, tc := range testCases {
|
||||
actual, err := parseRestoreObjStatus(tc.restoreHdr)
|
||||
if err != tc.expectedErr {
|
||||
t.Fatalf("Test %d: got %v expected %v", i+1, err, tc.expectedErr)
|
||||
}
|
||||
if actual != tc.expectedStatus {
|
||||
t.Fatalf("Test %d: got %v expected %v", i+1, actual, tc.expectedStatus)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestRestoreObjStatusRoundTrip restoreObjStatus roundtrip
|
||||
func TestRestoreObjStatusRoundTrip(t *testing.T) {
|
||||
testCases := []restoreObjStatus{
|
||||
ongoingRestoreObj(),
|
||||
completedRestoreObj(time.Now().UTC()),
|
||||
}
|
||||
for i, tc := range testCases {
|
||||
actual, err := parseRestoreObjStatus(tc.String())
|
||||
if err != nil {
|
||||
t.Fatalf("Test %d: parse restore object failed: %v", i+1, err)
|
||||
}
|
||||
if actual.ongoing != tc.ongoing || actual.expiry.Format(http.TimeFormat) != tc.expiry.Format(http.TimeFormat) {
|
||||
t.Fatalf("Test %d: got %v expected %v", i+1, actual, tc)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestRestoreObjOnDisk tests restoreObjStatus' OnDisk method
|
||||
func TestRestoreObjOnDisk(t *testing.T) {
|
||||
testCases := []struct {
|
||||
restoreStatus restoreObjStatus
|
||||
ondisk bool
|
||||
}{
|
||||
{
|
||||
// restore in progress
|
||||
restoreStatus: ongoingRestoreObj(),
|
||||
ondisk: false,
|
||||
},
|
||||
{
|
||||
// restore completed but expired
|
||||
restoreStatus: completedRestoreObj(time.Now().Add(-time.Hour)),
|
||||
ondisk: false,
|
||||
},
|
||||
{
|
||||
// restore completed
|
||||
restoreStatus: completedRestoreObj(time.Now().Add(time.Hour)),
|
||||
ondisk: true,
|
||||
},
|
||||
}
|
||||
|
||||
for i, tc := range testCases {
|
||||
if actual := tc.restoreStatus.OnDisk(); actual != tc.ondisk {
|
||||
t.Fatalf("Test %d: expected %v but got %v", i+1, tc.ondisk, actual)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestIsRestoredObjectOnDisk tests isRestoredObjectOnDisk helper function
|
||||
func TestIsRestoredObjectOnDisk(t *testing.T) {
|
||||
testCases := []struct {
|
||||
meta map[string]string
|
||||
ondisk bool
|
||||
}{
|
||||
{
|
||||
// restore in progress
|
||||
meta: map[string]string{
|
||||
xhttp.AmzRestore: ongoingRestoreObj().String(),
|
||||
},
|
||||
ondisk: false,
|
||||
},
|
||||
{
|
||||
// restore completed
|
||||
meta: map[string]string{
|
||||
xhttp.AmzRestore: completedRestoreObj(time.Now().Add(time.Hour)).String(),
|
||||
},
|
||||
ondisk: true,
|
||||
},
|
||||
{
|
||||
// restore completed but expired
|
||||
meta: map[string]string{
|
||||
xhttp.AmzRestore: completedRestoreObj(time.Now().Add(-time.Hour)).String(),
|
||||
},
|
||||
ondisk: false,
|
||||
},
|
||||
}
|
||||
|
||||
for i, tc := range testCases {
|
||||
if actual := isRestoredObjectOnDisk(tc.meta); actual != tc.ondisk {
|
||||
t.Fatalf("Test %d: expected %v but got %v for %v", i+1, tc.ondisk, actual, tc.meta)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,20 +1,3 @@
|
||||
// Copyright (c) 2015-2021 MinIO, Inc.
|
||||
//
|
||||
// This file is part of MinIO Object Storage stack
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package cmd
|
||||
|
||||
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
|
||||
|
||||
@@ -1,20 +1,3 @@
|
||||
// Copyright (c) 2015-2021 MinIO, Inc.
|
||||
//
|
||||
// This file is part of MinIO Object Storage stack
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package cmd
|
||||
|
||||
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
|
||||
|
||||
@@ -30,6 +30,7 @@ import (
|
||||
miniogo "github.com/minio/minio-go/v7"
|
||||
"github.com/minio/minio-go/v7/pkg/encrypt"
|
||||
"github.com/minio/minio-go/v7/pkg/tags"
|
||||
"github.com/minio/minio/cmd/config/storageclass"
|
||||
"github.com/minio/minio/cmd/crypto"
|
||||
xhttp "github.com/minio/minio/cmd/http"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
@@ -352,7 +353,6 @@ func getCopyObjMetadata(oi ObjectInfo, dest replication.Destination) map[string]
|
||||
if equals(k, xhttp.AmzMetaUnencryptedContentLength, xhttp.AmzMetaUnencryptedContentMD5) {
|
||||
continue
|
||||
}
|
||||
|
||||
meta[k] = v
|
||||
}
|
||||
|
||||
@@ -373,9 +373,11 @@ func getCopyObjMetadata(oi ObjectInfo, dest replication.Destination) map[string]
|
||||
if sc == "" {
|
||||
sc = oi.StorageClass
|
||||
}
|
||||
if sc != "" {
|
||||
// drop non standard storage classes for tiering from replication
|
||||
if sc != "" && (sc == storageclass.RRS || sc == storageclass.STANDARD) {
|
||||
meta[xhttp.AmzStorageClass] = sc
|
||||
}
|
||||
|
||||
meta[xhttp.MinIOSourceETag] = oi.ETag
|
||||
meta[xhttp.MinIOSourceMTime] = oi.ModTime.Format(time.RFC3339Nano)
|
||||
meta[xhttp.AmzBucketReplicationStatus] = replication.Replica.String()
|
||||
@@ -415,7 +417,7 @@ func putReplicationOpts(ctx context.Context, dest replication.Destination, objIn
|
||||
}
|
||||
|
||||
sc := dest.StorageClass
|
||||
if sc == "" {
|
||||
if sc == "" && (objInfo.StorageClass == storageclass.STANDARD || objInfo.StorageClass == storageclass.RRS) {
|
||||
sc = objInfo.StorageClass
|
||||
}
|
||||
putOpts = miniogo.PutObjectOptions{
|
||||
@@ -612,6 +614,7 @@ func replicateObject(ctx context.Context, ri ReplicateObjectInfo, objectAPI Obje
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to update replicate for %s/%s(%s): %w", bucket, object, objInfo.VersionID, err))
|
||||
return
|
||||
}
|
||||
|
||||
defer gr.Close() // hold write lock for entire transaction
|
||||
|
||||
objInfo = gr.ObjInfo
|
||||
@@ -720,7 +723,7 @@ func replicateObject(ctx context.Context, ri ReplicateObjectInfo, objectAPI Obje
|
||||
r := bandwidth.NewMonitoredReader(ctx, globalBucketMonitor, gr, opts)
|
||||
if _, err = c.PutObject(ctx, dest.Bucket, object, r, size, "", "", putOpts); err != nil {
|
||||
replicationStatus = replication.Failed
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to replicate for object %s/%s(%s): %w", bucket, objInfo.Name, objInfo.VersionID, err))
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to replicate for object %s/%s(%s): %s", bucket, objInfo.Name, objInfo.VersionID, err))
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,20 +1,3 @@
|
||||
// Copyright (c) 2015-2021 MinIO, Inc.
|
||||
//
|
||||
// This file is part of MinIO Object Storage stack
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package cmd
|
||||
|
||||
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
|
||||
|
||||
@@ -1,20 +1,3 @@
|
||||
// Copyright (c) 2015-2021 MinIO, Inc.
|
||||
//
|
||||
// This file is part of MinIO Object Storage stack
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package cmd
|
||||
|
||||
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
|
||||
|
||||
@@ -23,7 +23,6 @@ import (
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
@@ -122,45 +121,24 @@ func (sys *BucketTargetSys) SetTarget(ctx context.Context, bucket string, tgt *m
|
||||
return NotImplemented{Message: "Synchronous replication does not support bandwidth limits"}
|
||||
}
|
||||
}
|
||||
if tgt.Type == madmin.ILMService {
|
||||
if globalBucketVersioningSys.Enabled(bucket) {
|
||||
vcfg, err := clnt.GetBucketVersioning(ctx, tgt.TargetBucket)
|
||||
if err != nil {
|
||||
if minio.ToErrorResponse(err).Code == "NoSuchBucket" {
|
||||
return BucketRemoteTargetNotFound{Bucket: tgt.TargetBucket}
|
||||
}
|
||||
return BucketRemoteConnectionErr{Bucket: tgt.TargetBucket, Err: err}
|
||||
}
|
||||
if vcfg.Status != string(versioning.Enabled) {
|
||||
return BucketRemoteTargetNotVersioned{Bucket: tgt.TargetBucket}
|
||||
}
|
||||
}
|
||||
}
|
||||
sys.Lock()
|
||||
defer sys.Unlock()
|
||||
|
||||
tgts := sys.targetsMap[bucket]
|
||||
|
||||
newtgts := make([]madmin.BucketTarget, len(tgts))
|
||||
labels := make(map[string]struct{}, len(tgts))
|
||||
found := false
|
||||
for idx, t := range tgts {
|
||||
labels[t.Label] = struct{}{}
|
||||
if t.Type == tgt.Type {
|
||||
if t.Arn == tgt.Arn && !update {
|
||||
return BucketRemoteAlreadyExists{Bucket: t.TargetBucket}
|
||||
}
|
||||
if t.Label == tgt.Label && !update {
|
||||
return BucketRemoteLabelInUse{Bucket: t.TargetBucket}
|
||||
}
|
||||
newtgts[idx] = *tgt
|
||||
found = true
|
||||
continue
|
||||
}
|
||||
newtgts[idx] = t
|
||||
}
|
||||
if _, ok := labels[tgt.Label]; ok && !update {
|
||||
return BucketRemoteLabelInUse{Bucket: tgt.TargetBucket}
|
||||
}
|
||||
if !found && !update {
|
||||
newtgts = append(newtgts, *tgt)
|
||||
}
|
||||
@@ -194,15 +172,6 @@ func (sys *BucketTargetSys) RemoveTarget(ctx context.Context, bucket, arnStr str
|
||||
}
|
||||
}
|
||||
}
|
||||
if arn.Type == madmin.ILMService {
|
||||
// reject removal of remote target if lifecycle transition uses this arn
|
||||
config, err := globalBucketMetadataSys.GetLifecycleConfig(bucket)
|
||||
if err == nil && transitionSCInUse(ctx, config, bucket, arnStr) {
|
||||
if _, ok := sys.arnRemotesMap[arnStr]; ok {
|
||||
return BucketRemoteRemoveDisallowed{Bucket: bucket}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// delete ARN type from list of matching targets
|
||||
sys.Lock()
|
||||
@@ -235,44 +204,6 @@ func (sys *BucketTargetSys) GetRemoteTargetClient(ctx context.Context, arn strin
|
||||
return sys.arnRemotesMap[arn]
|
||||
}
|
||||
|
||||
// GetRemoteTargetWithLabel returns bucket target given a target label
|
||||
func (sys *BucketTargetSys) GetRemoteTargetWithLabel(ctx context.Context, bucket, targetLabel string) *madmin.BucketTarget {
|
||||
sys.RLock()
|
||||
defer sys.RUnlock()
|
||||
for _, t := range sys.targetsMap[bucket] {
|
||||
if strings.ToUpper(t.Label) == strings.ToUpper(targetLabel) {
|
||||
tgt := t.Clone()
|
||||
return &tgt
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetRemoteArnWithLabel returns bucket target's ARN given its target label
|
||||
func (sys *BucketTargetSys) GetRemoteArnWithLabel(ctx context.Context, bucket, tgtLabel string) *madmin.ARN {
|
||||
tgt := sys.GetRemoteTargetWithLabel(ctx, bucket, tgtLabel)
|
||||
if tgt == nil {
|
||||
return nil
|
||||
}
|
||||
arn, err := madmin.ParseARN(tgt.Arn)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
return arn
|
||||
}
|
||||
|
||||
// GetRemoteLabelWithArn returns a bucket target's label given its ARN
|
||||
func (sys *BucketTargetSys) GetRemoteLabelWithArn(ctx context.Context, bucket, arnStr string) string {
|
||||
sys.RLock()
|
||||
defer sys.RUnlock()
|
||||
for _, t := range sys.targetsMap[bucket] {
|
||||
if t.Arn == arnStr {
|
||||
return t.Label
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// NewBucketTargetSys - creates new replication system.
|
||||
func NewBucketTargetSys() *BucketTargetSys {
|
||||
return &BucketTargetSys{
|
||||
@@ -362,7 +293,6 @@ var getRemoteTargetInstanceTransportOnce sync.Once
|
||||
func (sys *BucketTargetSys) getRemoteTargetClient(tcfg *madmin.BucketTarget) (*TargetClient, error) {
|
||||
config := tcfg.Credentials
|
||||
creds := credentials.NewStaticV4(config.AccessKey, config.SecretKey, "")
|
||||
|
||||
getRemoteTargetInstanceTransportOnce.Do(func() {
|
||||
getRemoteTargetInstanceTransport = NewRemoteTargetHTTPTransport()
|
||||
})
|
||||
@@ -382,8 +312,9 @@ func (sys *BucketTargetSys) getRemoteTargetClient(tcfg *madmin.BucketTarget) (*T
|
||||
tc := &TargetClient{
|
||||
Client: api,
|
||||
healthCheckDuration: hcDuration,
|
||||
bucket: tcfg.TargetBucket,
|
||||
replicateSync: tcfg.ReplicationSync,
|
||||
Bucket: tcfg.TargetBucket,
|
||||
StorageClass: tcfg.StorageClass,
|
||||
}
|
||||
go tc.healthCheck()
|
||||
return tc, nil
|
||||
@@ -459,8 +390,9 @@ type TargetClient struct {
|
||||
*miniogo.Client
|
||||
up int32
|
||||
healthCheckDuration time.Duration
|
||||
bucket string // remote bucket target
|
||||
Bucket string // remote bucket target
|
||||
replicateSync bool
|
||||
StorageClass string // storage class on remote
|
||||
}
|
||||
|
||||
func (tc *TargetClient) isOffline() bool {
|
||||
@@ -469,7 +401,7 @@ func (tc *TargetClient) isOffline() bool {
|
||||
|
||||
func (tc *TargetClient) healthCheck() {
|
||||
for {
|
||||
_, err := tc.BucketExists(GlobalContext, tc.bucket)
|
||||
_, err := tc.BucketExists(GlobalContext, tc.Bucket)
|
||||
if err != nil {
|
||||
atomic.StoreInt32(&tc.up, 0)
|
||||
time.Sleep(tc.healthCheckDuration)
|
||||
|
||||
@@ -383,6 +383,9 @@ func handleCommonEnvVars() {
|
||||
}
|
||||
GlobalKMS = KMS
|
||||
}
|
||||
if tiers := env.Get("_MINIO_DEBUG_REMOTE_TIERS_IMMEDIATELY", ""); tiers != "" {
|
||||
globalDebugRemoteTiersImmediately = strings.Split(tiers, ",")
|
||||
}
|
||||
}
|
||||
|
||||
func logStartupMessage(msg string) {
|
||||
|
||||
@@ -862,17 +862,18 @@ func (i *scannerItem) applyLifecycle(ctx context.Context, o ObjectLayer, meta ac
|
||||
versionID := meta.oi.VersionID
|
||||
action := i.lifeCycle.ComputeAction(
|
||||
lifecycle.ObjectOpts{
|
||||
Name: i.objectPath(),
|
||||
UserTags: meta.oi.UserTags,
|
||||
ModTime: meta.oi.ModTime,
|
||||
VersionID: meta.oi.VersionID,
|
||||
DeleteMarker: meta.oi.DeleteMarker,
|
||||
IsLatest: meta.oi.IsLatest,
|
||||
NumVersions: meta.oi.NumVersions,
|
||||
SuccessorModTime: meta.oi.SuccessorModTime,
|
||||
RestoreOngoing: meta.oi.RestoreOngoing,
|
||||
RestoreExpires: meta.oi.RestoreExpires,
|
||||
TransitionStatus: meta.oi.TransitionStatus,
|
||||
Name: i.objectPath(),
|
||||
UserTags: meta.oi.UserTags,
|
||||
ModTime: meta.oi.ModTime,
|
||||
VersionID: meta.oi.VersionID,
|
||||
DeleteMarker: meta.oi.DeleteMarker,
|
||||
IsLatest: meta.oi.IsLatest,
|
||||
NumVersions: meta.oi.NumVersions,
|
||||
SuccessorModTime: meta.oi.SuccessorModTime,
|
||||
RestoreOngoing: meta.oi.RestoreOngoing,
|
||||
RestoreExpires: meta.oi.RestoreExpires,
|
||||
TransitionStatus: meta.oi.TransitionStatus,
|
||||
RemoteTiersImmediately: globalDebugRemoteTiersImmediately,
|
||||
})
|
||||
if i.debug {
|
||||
if versionID != "" {
|
||||
@@ -948,17 +949,18 @@ func (i *scannerItem) applyActions(ctx context.Context, o ObjectLayer, meta acti
|
||||
|
||||
func evalActionFromLifecycle(ctx context.Context, lc lifecycle.Lifecycle, obj ObjectInfo, debug bool) (action lifecycle.Action) {
|
||||
lcOpts := lifecycle.ObjectOpts{
|
||||
Name: obj.Name,
|
||||
UserTags: obj.UserTags,
|
||||
ModTime: obj.ModTime,
|
||||
VersionID: obj.VersionID,
|
||||
DeleteMarker: obj.DeleteMarker,
|
||||
IsLatest: obj.IsLatest,
|
||||
NumVersions: obj.NumVersions,
|
||||
SuccessorModTime: obj.SuccessorModTime,
|
||||
RestoreOngoing: obj.RestoreOngoing,
|
||||
RestoreExpires: obj.RestoreExpires,
|
||||
TransitionStatus: obj.TransitionStatus,
|
||||
Name: obj.Name,
|
||||
UserTags: obj.UserTags,
|
||||
ModTime: obj.ModTime,
|
||||
VersionID: obj.VersionID,
|
||||
DeleteMarker: obj.DeleteMarker,
|
||||
IsLatest: obj.IsLatest,
|
||||
NumVersions: obj.NumVersions,
|
||||
SuccessorModTime: obj.SuccessorModTime,
|
||||
RestoreOngoing: obj.RestoreOngoing,
|
||||
RestoreExpires: obj.RestoreExpires,
|
||||
TransitionStatus: obj.TransitionStatus,
|
||||
RemoteTiersImmediately: globalDebugRemoteTiersImmediately,
|
||||
}
|
||||
|
||||
action = lc.ComputeAction(lcOpts)
|
||||
@@ -995,17 +997,14 @@ func evalActionFromLifecycle(ctx context.Context, lc lifecycle.Lifecycle, obj Ob
|
||||
}
|
||||
|
||||
func applyTransitionAction(ctx context.Context, action lifecycle.Action, objLayer ObjectLayer, obj ObjectInfo) bool {
|
||||
opts := ObjectOptions{}
|
||||
srcOpts := ObjectOptions{}
|
||||
if obj.TransitionStatus == "" {
|
||||
opts.Versioned = globalBucketVersioningSys.Enabled(obj.Bucket)
|
||||
opts.VersionID = obj.VersionID
|
||||
opts.TransitionStatus = lifecycle.TransitionPending
|
||||
if _, err := objLayer.DeleteObject(ctx, obj.Bucket, obj.Name, opts); err != nil {
|
||||
if isErrObjectNotFound(err) || isErrVersionNotFound(err) {
|
||||
return false
|
||||
}
|
||||
// Assume it is still there.
|
||||
logger.LogIf(ctx, err)
|
||||
srcOpts.Versioned = globalBucketVersioningSys.Enabled(obj.Bucket)
|
||||
srcOpts.VersionID = obj.VersionID
|
||||
// mark transition as pending
|
||||
obj.UserDefined[ReservedMetadataPrefixLower+TransitionStatus] = lifecycle.TransitionPending
|
||||
obj.metadataOnly = true // Perform only metadata updates.
|
||||
if obj.DeleteMarker {
|
||||
return false
|
||||
}
|
||||
}
|
||||
@@ -1029,14 +1028,18 @@ func applyExpiryOnTransitionedObject(ctx context.Context, objLayer ObjectLayer,
|
||||
TransitionStatus: obj.TransitionStatus,
|
||||
}
|
||||
|
||||
if err := deleteTransitionedObject(ctx, objLayer, obj.Bucket, obj.Name, lcOpts, restoredObject, false); err != nil {
|
||||
action := expireObj
|
||||
if restoredObject {
|
||||
action = expireRestoredObj
|
||||
}
|
||||
if err := expireTransitionedObject(ctx, objLayer, obj.Bucket, obj.Name, lcOpts, obj.transitionedObjName, obj.TransitionTier, action); err != nil {
|
||||
if isErrObjectNotFound(err) || isErrVersionNotFound(err) {
|
||||
return false
|
||||
}
|
||||
logger.LogIf(ctx, err)
|
||||
return false
|
||||
}
|
||||
// Notification already sent at *deleteTransitionedObject*, just return 'true' here.
|
||||
// Notification already sent in *expireTransitionedObject*, just return 'true' here.
|
||||
return true
|
||||
}
|
||||
|
||||
|
||||
@@ -1,20 +1,3 @@
|
||||
// Copyright (c) 2015-2021 MinIO, Inc.
|
||||
//
|
||||
// This file is part of MinIO Object Storage stack
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package cmd
|
||||
|
||||
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
|
||||
|
||||
@@ -1,20 +1,3 @@
|
||||
// Copyright (c) 2015-2021 MinIO, Inc.
|
||||
//
|
||||
// This file is part of MinIO Object Storage stack
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package cmd
|
||||
|
||||
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
|
||||
|
||||
@@ -257,6 +257,20 @@ func (l EndpointServerPools) Localhost() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
// FirstLocalDiskPath returns the disk path of first (in cmdline args order)
|
||||
// local endpoint.
|
||||
func (l EndpointServerPools) FirstLocalDiskPath() string {
|
||||
var diskPath string
|
||||
for _, ep := range l {
|
||||
for _, endpoint := range ep.Endpoints {
|
||||
if endpoint.IsLocal {
|
||||
return endpoint.Path
|
||||
}
|
||||
}
|
||||
}
|
||||
return diskPath
|
||||
}
|
||||
|
||||
// FirstLocal returns true if the first endpoint is local.
|
||||
func (l EndpointServerPools) FirstLocal() bool {
|
||||
return l[0].Endpoints[0].IsLocal
|
||||
|
||||
@@ -22,6 +22,7 @@ import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/minio/minio/pkg/bucket/lifecycle"
|
||||
"github.com/minio/minio/pkg/madmin"
|
||||
)
|
||||
|
||||
@@ -279,9 +280,13 @@ func disksWithAllParts(ctx context.Context, onlineDisks []StorageAPI, partsMetad
|
||||
// disk has a valid xl.meta but may not have all the
|
||||
// parts. This is considered an outdated disk, since
|
||||
// it needs healing too.
|
||||
dataErrs[i] = onlineDisk.VerifyFile(ctx, bucket, object, partsMetadata[i])
|
||||
if partsMetadata[i].TransitionStatus != lifecycle.TransitionComplete {
|
||||
dataErrs[i] = onlineDisk.VerifyFile(ctx, bucket, object, partsMetadata[i])
|
||||
}
|
||||
case madmin.HealNormalScan:
|
||||
dataErrs[i] = onlineDisk.CheckParts(ctx, bucket, object, partsMetadata[i])
|
||||
if partsMetadata[i].TransitionStatus != lifecycle.TransitionComplete {
|
||||
dataErrs[i] = onlineDisk.CheckParts(ctx, bucket, object, partsMetadata[i])
|
||||
}
|
||||
}
|
||||
|
||||
if dataErrs[i] == nil {
|
||||
|
||||
@@ -213,13 +213,15 @@ func shouldHealObjectOnDisk(erErr, dataErr error, meta FileInfo, quorumModTime t
|
||||
return true
|
||||
}
|
||||
if erErr == nil {
|
||||
// If xl.meta was read fine but there may be problem with the part.N files.
|
||||
if IsErr(dataErr, []error{
|
||||
errFileNotFound,
|
||||
errFileVersionNotFound,
|
||||
errFileCorrupt,
|
||||
}...) {
|
||||
return true
|
||||
if meta.TransitionStatus != lifecycle.TransitionComplete {
|
||||
// If xl.meta was read fine but there may be problem with the part.N files.
|
||||
if IsErr(dataErr, []error{
|
||||
errFileNotFound,
|
||||
errFileVersionNotFound,
|
||||
errFileCorrupt,
|
||||
}...) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
if !quorumModTime.Equal(meta.ModTime) {
|
||||
return true
|
||||
@@ -250,11 +252,13 @@ func (er erasureObjects) healObject(ctx context.Context, bucket string, object s
|
||||
DataBlocks: len(storageDisks) - er.defaultParityCount,
|
||||
}
|
||||
|
||||
lk := er.NewNSLock(bucket, object)
|
||||
if ctx, err = lk.GetLock(ctx, globalOperationTimeout); err != nil {
|
||||
return result, err
|
||||
if !opts.NoLock {
|
||||
lk := er.NewNSLock(bucket, object)
|
||||
if ctx, err = lk.GetLock(ctx, globalOperationTimeout); err != nil {
|
||||
return result, err
|
||||
}
|
||||
defer lk.Unlock()
|
||||
}
|
||||
defer lk.Unlock()
|
||||
|
||||
// Re-read when we have lock...
|
||||
partsMetadata, errs := readAllFileInfo(ctx, storageDisks, bucket, object, versionID, true)
|
||||
@@ -331,7 +335,6 @@ func (er erasureObjects) healObject(ctx context.Context, bucket string, object s
|
||||
// File is fully gone, fileInfo is empty.
|
||||
return defaultHealResult(FileInfo{}, storageDisks, storageEndpoints, errs, bucket, object, versionID, er.defaultParityCount), err
|
||||
}
|
||||
|
||||
// If less than read quorum number of disks have all the parts
|
||||
// of the data, we can't reconstruct the erasure-coded data.
|
||||
if numAvailableDisks < result.DataBlocks {
|
||||
@@ -362,7 +365,9 @@ func (er erasureObjects) healObject(ctx context.Context, bucket string, object s
|
||||
nfi := fi
|
||||
nfi.Erasure.Index = 0
|
||||
nfi.Erasure.Checksums = nil
|
||||
nfi.Parts = nil
|
||||
if fi.TransitionStatus != lifecycle.TransitionComplete {
|
||||
nfi.Parts = nil
|
||||
}
|
||||
return nfi
|
||||
}
|
||||
|
||||
@@ -394,7 +399,7 @@ func (er erasureObjects) healObject(ctx context.Context, bucket string, object s
|
||||
inlineBuffers = make([]*bytes.Buffer, len(outDatedDisks))
|
||||
}
|
||||
|
||||
if !latestMeta.Deleted || latestMeta.TransitionStatus != lifecycle.TransitionComplete {
|
||||
if !latestMeta.Deleted && latestMeta.TransitionStatus != lifecycle.TransitionComplete {
|
||||
result.DataBlocks = latestMeta.Erasure.DataBlocks
|
||||
result.ParityBlocks = latestMeta.Erasure.ParityBlocks
|
||||
|
||||
@@ -450,6 +455,7 @@ func (er erasureObjects) healObject(ctx context.Context, bucket string, object s
|
||||
if err != nil {
|
||||
return result, toObjectErr(err, bucket, object)
|
||||
}
|
||||
|
||||
// outDatedDisks that had write errors should not be
|
||||
// written to for remaining parts, so we nil it out.
|
||||
for i, disk := range outDatedDisks {
|
||||
@@ -483,7 +489,9 @@ func (er erasureObjects) healObject(ctx context.Context, bucket string, object s
|
||||
if disksToHealCount == 0 {
|
||||
return result, fmt.Errorf("all disks had write errors, unable to heal")
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
defer er.deleteObject(context.Background(), minioMetaTmpBucket, tmpID, len(storageDisks)/2+1)
|
||||
@@ -721,7 +729,6 @@ func (er erasureObjects) purgeObjectDangling(ctx context.Context, bucket, object
|
||||
|
||||
storageDisks := er.getDisks()
|
||||
storageEndpoints := er.getEndpoints()
|
||||
|
||||
// Check if the object is dangling, if yes and user requested
|
||||
// remove we simply delete it from namespace.
|
||||
m, ok := isObjectDangling(metaArr, errs, dataErrs)
|
||||
|
||||
@@ -151,6 +151,8 @@ func (fi FileInfo) ToObjectInfo(bucket, object string) ObjectInfo {
|
||||
}
|
||||
|
||||
objInfo.TransitionStatus = fi.TransitionStatus
|
||||
objInfo.transitionedObjName = fi.TransitionedObjName
|
||||
objInfo.TransitionTier = fi.TransitionTier
|
||||
|
||||
// etag/md5Sum has already been extracted. We need to
|
||||
// remove to avoid it from appearing as part of
|
||||
@@ -167,11 +169,15 @@ func (fi FileInfo) ToObjectInfo(bucket, object string) ObjectInfo {
|
||||
} else {
|
||||
objInfo.StorageClass = globalMinioDefaultStorageClass
|
||||
}
|
||||
|
||||
objInfo.VersionPurgeStatus = fi.VersionPurgeStatus
|
||||
// set restore status for transitioned object
|
||||
if ongoing, exp, err := parseRestoreHeaderFromMeta(fi.Metadata); err == nil {
|
||||
objInfo.RestoreOngoing = ongoing
|
||||
objInfo.RestoreExpires = exp
|
||||
restoreHdr, ok := fi.Metadata[xhttp.AmzRestore]
|
||||
if ok {
|
||||
if restoreStatus, err := parseRestoreObjStatus(restoreHdr); err == nil {
|
||||
objInfo.RestoreOngoing = restoreStatus.Ongoing()
|
||||
objInfo.RestoreExpires, _ = restoreStatus.Expiry()
|
||||
}
|
||||
}
|
||||
// Success.
|
||||
return objInfo
|
||||
|
||||
@@ -27,13 +27,14 @@ import (
|
||||
"path"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/minio/minio-go/v7/pkg/tags"
|
||||
xhttp "github.com/minio/minio/cmd/http"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/bucket/lifecycle"
|
||||
"github.com/minio/minio/pkg/bucket/replication"
|
||||
"github.com/minio/minio/pkg/event"
|
||||
"github.com/minio/minio/pkg/hash"
|
||||
"github.com/minio/minio/pkg/madmin"
|
||||
"github.com/minio/minio/pkg/mimedb"
|
||||
"github.com/minio/minio/pkg/sync/errgroup"
|
||||
@@ -63,14 +64,14 @@ func (er erasureObjects) CopyObject(ctx context.Context, srcBucket, srcObject, d
|
||||
}
|
||||
|
||||
defer ObjectPathUpdated(pathJoin(dstBucket, dstObject))
|
||||
|
||||
lk := er.NewNSLock(dstBucket, dstObject)
|
||||
ctx, err = lk.GetLock(ctx, globalOperationTimeout)
|
||||
if err != nil {
|
||||
return oi, err
|
||||
if !dstOpts.NoLock {
|
||||
lk := er.NewNSLock(dstBucket, dstObject)
|
||||
ctx, err = lk.GetLock(ctx, globalOperationTimeout)
|
||||
if err != nil {
|
||||
return oi, err
|
||||
}
|
||||
defer lk.Unlock()
|
||||
}
|
||||
defer lk.Unlock()
|
||||
|
||||
// Read metadata associated with the object from all disks.
|
||||
storageDisks := er.getDisks()
|
||||
metaArr, errs := readAllFileInfo(ctx, storageDisks, srcBucket, srcObject, srcOpts.VersionID, true)
|
||||
@@ -181,8 +182,7 @@ func (er erasureObjects) GetObjectNInfo(ctx context.Context, bucket, object stri
|
||||
}
|
||||
if objInfo.TransitionStatus == lifecycle.TransitionComplete {
|
||||
// If transitioned, stream from transition tier unless object is restored locally or restore date is past.
|
||||
restoreHdr, ok := caseInsensitiveMap(objInfo.UserDefined).Lookup(xhttp.AmzRestore)
|
||||
if !ok || !strings.HasPrefix(restoreHdr, "ongoing-request=false") || (!objInfo.RestoreExpires.IsZero() && time.Now().After(objInfo.RestoreExpires)) {
|
||||
if onDisk := isRestoredObjectOnDisk(objInfo.UserDefined); !onDisk {
|
||||
return getTransitionedObjectReader(ctx, bucket, object, rs, h, objInfo, opts)
|
||||
}
|
||||
}
|
||||
@@ -460,13 +460,7 @@ func (er erasureObjects) getObjectInfo(ctx context.Context, bucket, object strin
|
||||
|
||||
}
|
||||
objInfo = fi.ToObjectInfo(bucket, object)
|
||||
if objInfo.TransitionStatus == lifecycle.TransitionComplete {
|
||||
// overlay storage class for transitioned objects with transition tier SC Label
|
||||
if sc := transitionSC(ctx, bucket); sc != "" {
|
||||
objInfo.StorageClass = sc
|
||||
}
|
||||
}
|
||||
if !fi.VersionPurgeStatus.Empty() && opts.VersionID != "" {
|
||||
if !fi.VersionPurgeStatus.Empty() {
|
||||
// Make sure to return object info to provide extra information.
|
||||
return objInfo, toObjectErr(errMethodNotAllowed, bucket, object)
|
||||
}
|
||||
@@ -643,12 +637,9 @@ func (er erasureObjects) putObject(ctx context.Context, bucket string, object st
|
||||
partsMetadata := make([]FileInfo, len(storageDisks))
|
||||
|
||||
fi := newFileInfo(pathJoin(bucket, object), dataDrives, parityDrives)
|
||||
|
||||
if opts.Versioned {
|
||||
fi.VersionID = opts.VersionID
|
||||
if fi.VersionID == "" {
|
||||
fi.VersionID = mustGetUUID()
|
||||
}
|
||||
fi.VersionID = opts.VersionID
|
||||
if opts.Versioned && fi.VersionID == "" {
|
||||
fi.VersionID = mustGetUUID()
|
||||
}
|
||||
fi.DataDir = mustGetUUID()
|
||||
|
||||
@@ -1001,7 +992,6 @@ func (er erasureObjects) DeleteObjects(ctx context.Context, bucket string, objec
|
||||
DeleteMarkerReplicationStatus: versions[objIndex].DeleteMarkerReplicationStatus,
|
||||
ObjectName: versions[objIndex].Name,
|
||||
VersionPurgeStatus: versions[objIndex].VersionPurgeStatus,
|
||||
PurgeTransitioned: objects[objIndex].PurgeTransitioned,
|
||||
}
|
||||
} else {
|
||||
dobjects[objIndex] = DeletedObject{
|
||||
@@ -1009,7 +999,6 @@ func (er erasureObjects) DeleteObjects(ctx context.Context, bucket string, objec
|
||||
VersionID: versions[objIndex].VersionID,
|
||||
VersionPurgeStatus: versions[objIndex].VersionPurgeStatus,
|
||||
DeleteMarkerReplicationStatus: versions[objIndex].DeleteMarkerReplicationStatus,
|
||||
PurgeTransitioned: objects[objIndex].PurgeTransitioned,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1093,6 +1082,7 @@ func (er erasureObjects) DeleteObject(ctx context.Context, bucket, object string
|
||||
if opts.MTime.IsZero() {
|
||||
modTime = UTCNow()
|
||||
}
|
||||
|
||||
if markDelete {
|
||||
if opts.Versioned || opts.VersionSuspended {
|
||||
fi := FileInfo{
|
||||
@@ -1102,6 +1092,8 @@ func (er erasureObjects) DeleteObject(ctx context.Context, bucket, object string
|
||||
ModTime: modTime,
|
||||
DeleteMarkerReplicationStatus: opts.DeleteMarkerReplicationStatus,
|
||||
VersionPurgeStatus: opts.VersionPurgeStatus,
|
||||
TransitionStatus: opts.Transition.Status,
|
||||
ExpireRestored: opts.Transition.ExpireRestored,
|
||||
}
|
||||
if opts.Versioned {
|
||||
fi.VersionID = mustGetUUID()
|
||||
@@ -1109,8 +1101,6 @@ func (er erasureObjects) DeleteObject(ctx context.Context, bucket, object string
|
||||
fi.VersionID = opts.VersionID
|
||||
}
|
||||
}
|
||||
fi.TransitionStatus = opts.TransitionStatus
|
||||
|
||||
// versioning suspended means we add `null`
|
||||
// version as delete marker
|
||||
// Add delete marker, since we don't have any version specified explicitly.
|
||||
@@ -1131,7 +1121,8 @@ func (er erasureObjects) DeleteObject(ctx context.Context, bucket, object string
|
||||
ModTime: modTime,
|
||||
DeleteMarkerReplicationStatus: opts.DeleteMarkerReplicationStatus,
|
||||
VersionPurgeStatus: opts.VersionPurgeStatus,
|
||||
TransitionStatus: opts.TransitionStatus,
|
||||
TransitionStatus: opts.Transition.Status,
|
||||
ExpireRestored: opts.Transition.ExpireRestored,
|
||||
}, opts.DeleteMarker); err != nil {
|
||||
return objInfo, toObjectErr(err, bucket, object)
|
||||
}
|
||||
@@ -1302,3 +1293,250 @@ func (er erasureObjects) GetObjectTags(ctx context.Context, bucket, object strin
|
||||
|
||||
return tags.ParseObjectTags(oi.UserTags)
|
||||
}
|
||||
|
||||
// TransitionObject - transition object content to target tier.
|
||||
func (er erasureObjects) TransitionObject(ctx context.Context, bucket, object string, opts ObjectOptions) error {
|
||||
tgtClient, err := globalTierConfigMgr.getDriver(opts.Transition.Tier)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Acquire write lock before starting to transition the object.
|
||||
lk := er.NewNSLock(bucket, object)
|
||||
ctx, err = lk.GetLock(ctx, globalDeleteOperationTimeout)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer lk.Unlock()
|
||||
|
||||
fi, metaArr, onlineDisks, err := er.getObjectFileInfo(ctx, bucket, object, opts, true)
|
||||
if err != nil {
|
||||
return toObjectErr(err, bucket, object)
|
||||
}
|
||||
if fi.Deleted {
|
||||
if opts.VersionID == "" {
|
||||
return toObjectErr(errFileNotFound, bucket, object)
|
||||
}
|
||||
// Make sure to return object info to provide extra information.
|
||||
return toObjectErr(errMethodNotAllowed, bucket, object)
|
||||
}
|
||||
// verify that the object queued for transition is identical to that on disk.
|
||||
if !opts.MTime.Equal(fi.ModTime) || !strings.EqualFold(opts.Transition.ETag, extractETag(fi.Metadata)) {
|
||||
return toObjectErr(errFileNotFound, bucket, object)
|
||||
}
|
||||
// if object already transitioned, return
|
||||
if fi.TransitionStatus == lifecycle.TransitionComplete {
|
||||
return nil
|
||||
}
|
||||
if fi.XLV1 {
|
||||
if _, err = er.HealObject(ctx, bucket, object, "", madmin.HealOpts{NoLock: true}); err != nil {
|
||||
return err
|
||||
}
|
||||
// Fetch FileInfo again. HealObject migrates object the latest
|
||||
// format. Among other things this changes fi.DataDir and
|
||||
// possibly fi.Data (if data is inlined).
|
||||
fi, metaArr, onlineDisks, err = er.getObjectFileInfo(ctx, bucket, object, opts, true)
|
||||
if err != nil {
|
||||
return toObjectErr(err, bucket, object)
|
||||
}
|
||||
}
|
||||
destObj, err := genTransitionObjName()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
pr, pw := io.Pipe()
|
||||
go func() {
|
||||
err := er.getObjectWithFileInfo(ctx, bucket, object, 0, fi.Size, pw, fi, metaArr, onlineDisks)
|
||||
pw.CloseWithError(err)
|
||||
}()
|
||||
if err = tgtClient.Put(ctx, destObj, pr, fi.Size); err != nil {
|
||||
pr.Close()
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to transition %s/%s(%s) to %s tier: %w", bucket, object, opts.VersionID, opts.Transition.Tier, err))
|
||||
return err
|
||||
}
|
||||
pr.Close()
|
||||
fi.TransitionStatus = lifecycle.TransitionComplete
|
||||
fi.TransitionedObjName = destObj
|
||||
fi.TransitionTier = opts.Transition.Tier
|
||||
eventName := event.ObjectTransitionComplete
|
||||
|
||||
storageDisks := er.getDisks()
|
||||
writeQuorum := len(storageDisks)/2 + 1
|
||||
if err = er.deleteObjectVersion(ctx, bucket, object, writeQuorum, fi, false); err != nil {
|
||||
eventName = event.ObjectTransitionFailed
|
||||
}
|
||||
for _, disk := range storageDisks {
|
||||
if disk != nil && disk.IsOnline() {
|
||||
continue
|
||||
}
|
||||
er.addPartial(bucket, object, opts.VersionID)
|
||||
break
|
||||
}
|
||||
// Notify object deleted event.
|
||||
sendEvent(eventArgs{
|
||||
EventName: eventName,
|
||||
BucketName: bucket,
|
||||
Object: ObjectInfo{
|
||||
Name: object,
|
||||
VersionID: opts.VersionID,
|
||||
},
|
||||
Host: "Internal: [ILM-Transition]",
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
// RestoreTransitionedObject - restore transitioned object content locally on this cluster.
|
||||
// This is similar to PostObjectRestore from AWS GLACIER
|
||||
// storage class. When PostObjectRestore API is called, a temporary copy of the object
|
||||
// is restored locally to the bucket on source cluster until the restore expiry date.
|
||||
// The copy that was transitioned continues to reside in the transitioned tier.
|
||||
func (er erasureObjects) RestoreTransitionedObject(ctx context.Context, bucket, object string, opts ObjectOptions) error {
|
||||
return er.restoreTransitionedObject(ctx, bucket, object, opts)
|
||||
}
|
||||
|
||||
// update restore status header in the metadata
|
||||
func (er erasureObjects) updateRestoreMetadata(ctx context.Context, bucket, object string, objInfo ObjectInfo, opts ObjectOptions, noLock bool, rerr error) error {
|
||||
oi := objInfo.Clone()
|
||||
oi.metadataOnly = true // Perform only metadata updates.
|
||||
|
||||
if rerr == nil {
|
||||
oi.UserDefined[xhttp.AmzRestore] = completedRestoreObj(opts.Transition.RestoreExpiry).String()
|
||||
} else { // allow retry in the case of failure to restore
|
||||
delete(oi.UserDefined, xhttp.AmzRestore)
|
||||
}
|
||||
if _, err := er.CopyObject(ctx, bucket, object, bucket, object, oi, ObjectOptions{
|
||||
VersionID: oi.VersionID,
|
||||
}, ObjectOptions{
|
||||
VersionID: oi.VersionID,
|
||||
NoLock: noLock, // true if lock already taken
|
||||
}); err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to update transition restore metadata for %s/%s(%s): %s", bucket, object, oi.VersionID, err))
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// restoreTransitionedObject for multipart object chunks the file stream from remote tier into the same number of parts
|
||||
// as in the xl.meta for this version and rehydrates the part.n into the fi.DataDir for this version as in the xl.meta
|
||||
func (er erasureObjects) restoreTransitionedObject(ctx context.Context, bucket string, object string, opts ObjectOptions) error {
|
||||
defer func() {
|
||||
ObjectPathUpdated(pathJoin(bucket, object))
|
||||
}()
|
||||
setRestoreHeaderFn := func(oi ObjectInfo, noLock bool, rerr error) error {
|
||||
er.updateRestoreMetadata(ctx, bucket, object, oi, opts, noLock, rerr)
|
||||
return rerr
|
||||
}
|
||||
var oi ObjectInfo
|
||||
// get the file info on disk for transitioned object
|
||||
actualfi, _, _, err := er.getObjectFileInfo(ctx, bucket, object, opts, false)
|
||||
if err != nil {
|
||||
return setRestoreHeaderFn(oi, false, toObjectErr(err, bucket, object))
|
||||
}
|
||||
|
||||
oi = actualfi.ToObjectInfo(bucket, object)
|
||||
if len(oi.Parts) == 1 {
|
||||
var rs *HTTPRangeSpec
|
||||
gr, err := getTransitionedObjectReader(ctx, bucket, object, rs, http.Header{}, oi, opts)
|
||||
if err != nil {
|
||||
return setRestoreHeaderFn(oi, false, toObjectErr(err, bucket, object))
|
||||
}
|
||||
defer gr.Close()
|
||||
hashReader, err := hash.NewReader(gr, gr.ObjInfo.Size, "", "", gr.ObjInfo.Size)
|
||||
if err != nil {
|
||||
return setRestoreHeaderFn(oi, false, toObjectErr(err, bucket, object))
|
||||
}
|
||||
pReader := NewPutObjReader(hashReader)
|
||||
ropts := putRestoreOpts(bucket, object, opts.Transition.RestoreRequest, oi)
|
||||
ropts.UserDefined[xhttp.AmzRestore] = completedRestoreObj(opts.Transition.RestoreExpiry).String()
|
||||
_, err = er.PutObject(ctx, bucket, object, pReader, ropts)
|
||||
return setRestoreHeaderFn(oi, false, toObjectErr(err, bucket, object))
|
||||
}
|
||||
uploadID, err := er.NewMultipartUpload(ctx, bucket, object, opts)
|
||||
if err != nil {
|
||||
return setRestoreHeaderFn(oi, false, err)
|
||||
}
|
||||
var uploadedParts []CompletePart
|
||||
var rs *HTTPRangeSpec
|
||||
// get reader from the warm backend - note that even in the case of encrypted objects, this stream is still encrypted.
|
||||
gr, err := getTransitionedObjectReader(ctx, bucket, object, rs, http.Header{}, oi, opts)
|
||||
if err != nil {
|
||||
return setRestoreHeaderFn(oi, false, err)
|
||||
}
|
||||
defer gr.Close()
|
||||
// rehydrate the parts back on disk as per the original xl.meta prior to transition
|
||||
for _, partInfo := range oi.Parts {
|
||||
hr, err := hash.NewReader(gr, partInfo.Size, "", "", partInfo.Size)
|
||||
if err != nil {
|
||||
return setRestoreHeaderFn(oi, false, err)
|
||||
}
|
||||
pInfo, err := er.PutObjectPart(ctx, bucket, object, uploadID, partInfo.Number, NewPutObjReader(hr), ObjectOptions{})
|
||||
if err != nil {
|
||||
return setRestoreHeaderFn(oi, false, err)
|
||||
}
|
||||
uploadedParts = append(uploadedParts, CompletePart{
|
||||
PartNumber: pInfo.PartNumber,
|
||||
ETag: pInfo.ETag,
|
||||
})
|
||||
}
|
||||
|
||||
uploadIDPath := er.getUploadIDDir(bucket, object, uploadID)
|
||||
storageDisks := er.getDisks()
|
||||
// Read metadata associated with the object from all disks.
|
||||
partsMetadata, errs := readAllFileInfo(ctx, storageDisks, minioMetaMultipartBucket, uploadIDPath, "", false)
|
||||
|
||||
// get Quorum for this object
|
||||
_, writeQuorum, err := objectQuorumFromMeta(ctx, partsMetadata, errs, er.defaultParityCount)
|
||||
if err != nil {
|
||||
return setRestoreHeaderFn(oi, false, toObjectErr(err, bucket, object))
|
||||
}
|
||||
|
||||
reducedErr := reduceWriteQuorumErrs(ctx, errs, objectOpIgnoredErrs, writeQuorum)
|
||||
if reducedErr == errErasureWriteQuorum {
|
||||
return setRestoreHeaderFn(oi, false, toObjectErr(reducedErr, bucket, object))
|
||||
}
|
||||
|
||||
onlineDisks, modTime := listOnlineDisks(storageDisks, partsMetadata, errs)
|
||||
|
||||
// Pick one from the first valid metadata.
|
||||
fi, err := pickValidFileInfo(ctx, partsMetadata, modTime, writeQuorum)
|
||||
if err != nil {
|
||||
return setRestoreHeaderFn(oi, false, err)
|
||||
}
|
||||
//validate parts created via multipart to transitioned object's parts info in xl.meta
|
||||
partsMatch := true
|
||||
if len(actualfi.Parts) != len(fi.Parts) {
|
||||
partsMatch = false
|
||||
}
|
||||
if len(actualfi.Parts) == len(fi.Parts) {
|
||||
for i, pi := range actualfi.Parts {
|
||||
if fi.Parts[i].Size != pi.Size {
|
||||
partsMatch = false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !partsMatch {
|
||||
return setRestoreHeaderFn(oi, false, InvalidObjectState{Bucket: bucket, Object: object})
|
||||
}
|
||||
var currentFI = actualfi
|
||||
currentFI.DataDir = fi.DataDir
|
||||
|
||||
// Hold namespace to complete the transaction
|
||||
lk := er.NewNSLock(bucket, object)
|
||||
ctx, err = lk.GetLock(ctx, globalOperationTimeout)
|
||||
if err != nil {
|
||||
return setRestoreHeaderFn(oi, false, err)
|
||||
}
|
||||
defer lk.Unlock()
|
||||
|
||||
// Attempt to rename temp upload object to actual upload path object
|
||||
_, err = rename(ctx, onlineDisks, minioMetaMultipartBucket, path.Join(uploadIDPath, fi.DataDir), bucket, path.Join(object, actualfi.DataDir), true, writeQuorum, nil)
|
||||
if err != nil {
|
||||
return setRestoreHeaderFn(oi, true, toObjectErr(err, minioMetaMultipartBucket, uploadIDPath))
|
||||
}
|
||||
// Cleanup multipart upload dir.
|
||||
if err = er.deleteObject(ctx, minioMetaMultipartBucket, uploadIDPath, writeQuorum); err != nil {
|
||||
return setRestoreHeaderFn(oi, true, toObjectErr(err, bucket, object, uploadID))
|
||||
}
|
||||
return setRestoreHeaderFn(oi, true, nil)
|
||||
}
|
||||
|
||||
@@ -1846,3 +1846,33 @@ func (z *erasureServerPools) GetObjectTags(ctx context.Context, bucket, object s
|
||||
|
||||
return z.serverPools[idx].GetObjectTags(ctx, bucket, object, opts)
|
||||
}
|
||||
|
||||
// TransitionObject - transition object content to target tier.
|
||||
func (z *erasureServerPools) TransitionObject(ctx context.Context, bucket, object string, opts ObjectOptions) error {
|
||||
object = encodeDirObject(object)
|
||||
if z.SinglePool() {
|
||||
return z.serverPools[0].TransitionObject(ctx, bucket, object, opts)
|
||||
}
|
||||
|
||||
idx, err := z.getPoolIdxExisting(ctx, bucket, object)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return z.serverPools[idx].TransitionObject(ctx, bucket, object, opts)
|
||||
}
|
||||
|
||||
// RestoreTransitionedObject - restore transitioned object content locally on this cluster.
|
||||
func (z *erasureServerPools) RestoreTransitionedObject(ctx context.Context, bucket, object string, opts ObjectOptions) error {
|
||||
object = encodeDirObject(object)
|
||||
if z.SinglePool() {
|
||||
return z.serverPools[0].RestoreTransitionedObject(ctx, bucket, object, opts)
|
||||
}
|
||||
|
||||
idx, err := z.getPoolIdxExisting(ctx, bucket, object)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return z.serverPools[idx].RestoreTransitionedObject(ctx, bucket, object, opts)
|
||||
}
|
||||
|
||||
@@ -1407,3 +1407,13 @@ func (s *erasureSets) healMRFRoutine() {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TransitionObject - transition object content to target tier.
|
||||
func (s *erasureSets) TransitionObject(ctx context.Context, bucket, object string, opts ObjectOptions) error {
|
||||
return s.getHashedSet(object).TransitionObject(ctx, bucket, object, opts)
|
||||
}
|
||||
|
||||
// RestoreTransitionedObject - restore transitioned object content locally on this cluster.
|
||||
func (s *erasureSets) RestoreTransitionedObject(ctx context.Context, bucket, object string, opts ObjectOptions) error {
|
||||
return s.getHashedSet(object).RestoreTransitionedObject(ctx, bucket, object, opts)
|
||||
}
|
||||
|
||||
@@ -1,20 +1,3 @@
|
||||
// Copyright (c) 2015-2021 MinIO, Inc.
|
||||
//
|
||||
// This file is part of MinIO Object Storage stack
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// Code generated by "stringer -type=format -trimprefix=format untar.go"; DO NOT EDIT.
|
||||
|
||||
package cmd
|
||||
|
||||
10
cmd/fs-v1.go
10
cmd/fs-v1.go
@@ -1608,3 +1608,13 @@ func (fs *FSObjects) ReadHealth(ctx context.Context) bool {
|
||||
_, err := os.Stat(fs.fsPath)
|
||||
return err == nil
|
||||
}
|
||||
|
||||
// TransitionObject - transition object content to target tier.
|
||||
func (fs *FSObjects) TransitionObject(ctx context.Context, bucket, object string, opts ObjectOptions) error {
|
||||
return NotImplemented{}
|
||||
}
|
||||
|
||||
// RestoreTransitionedObject - restore transitioned object content locally on this cluster.
|
||||
func (fs *FSObjects) RestoreTransitionedObject(ctx context.Context, bucket, object string, opts ObjectOptions) error {
|
||||
return NotImplemented{}
|
||||
}
|
||||
|
||||
@@ -273,3 +273,13 @@ func (a GatewayUnsupported) Health(_ context.Context, _ HealthOptions) HealthRes
|
||||
func (a GatewayUnsupported) ReadHealth(_ context.Context) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// TransitionObject - transition object content to target tier.
|
||||
func (a GatewayUnsupported) TransitionObject(ctx context.Context, bucket, object string, opts ObjectOptions) error {
|
||||
return NotImplemented{}
|
||||
}
|
||||
|
||||
// RestoreTransitionedObject - restore transitioned object content locally on this cluster.
|
||||
func (a GatewayUnsupported) RestoreTransitionedObject(ctx context.Context, bucket, object string, opts ObjectOptions) error {
|
||||
return NotImplemented{}
|
||||
}
|
||||
|
||||
@@ -294,6 +294,12 @@ var (
|
||||
globalDNSCache *xhttp.DNSCache
|
||||
|
||||
globalForwarder *handlers.Forwarder
|
||||
|
||||
globalTierConfigMgr *TierConfigMgr
|
||||
|
||||
globalTierJournal *tierJournal
|
||||
|
||||
globalDebugRemoteTiersImmediately []string
|
||||
// Add new variable global values here.
|
||||
)
|
||||
|
||||
|
||||
@@ -170,6 +170,8 @@ const (
|
||||
MinIOSourceProxyRequest = "X-Minio-Source-Proxy-Request"
|
||||
// Header indicates that this request is a replication request to create a REPLICA
|
||||
MinIOSourceReplicationRequest = "X-Minio-Source-Replication-Request"
|
||||
// predicted date/time of transition
|
||||
MinIOTransition = "X-Minio-Transition"
|
||||
)
|
||||
|
||||
// Common http query params S3 API
|
||||
|
||||
@@ -1,20 +1,3 @@
|
||||
// Copyright (c) 2015-2021 MinIO, Inc.
|
||||
//
|
||||
// This file is part of MinIO Object Storage stack
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package cmd
|
||||
|
||||
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
|
||||
|
||||
@@ -1,20 +1,3 @@
|
||||
// Copyright (c) 2015-2021 MinIO, Inc.
|
||||
//
|
||||
// This file is part of MinIO Object Storage stack
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package cmd
|
||||
|
||||
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
|
||||
|
||||
@@ -1,20 +1,3 @@
|
||||
// Copyright (c) 2015-2021 MinIO, Inc.
|
||||
//
|
||||
// This file is part of MinIO Object Storage stack
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package cmd
|
||||
|
||||
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
|
||||
|
||||
@@ -1,20 +1,3 @@
|
||||
// Copyright (c) 2015-2021 MinIO, Inc.
|
||||
//
|
||||
// This file is part of MinIO Object Storage stack
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package cmd
|
||||
|
||||
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
|
||||
|
||||
@@ -26,15 +26,15 @@ import (
|
||||
|
||||
// WARNING:
|
||||
//
|
||||
// Expected source line number is hard coded, 31, in the
|
||||
// Expected source line number is hard coded, 35, in the
|
||||
// following test. Adding new code before this test or changing its
|
||||
// position will cause the line number to change and the test to FAIL
|
||||
// Tests getSource().
|
||||
func TestGetSource(t *testing.T) {
|
||||
currentSource := func() string { return getSource(2) }
|
||||
gotSource := currentSource()
|
||||
// Hard coded line number, 34, in the "expectedSource" value
|
||||
expectedSource := "[namespace-lock_test.go:34:TestGetSource()]"
|
||||
// Hard coded line number, 35, in the "expectedSource" value
|
||||
expectedSource := "[namespace-lock_test.go:35:TestGetSource()]"
|
||||
if gotSource != expectedSource {
|
||||
t.Errorf("expected : %s, got : %s", expectedSource, gotSource)
|
||||
}
|
||||
|
||||
@@ -729,6 +729,27 @@ func (sys *NotificationSys) GetClusterBucketStats(ctx context.Context, bucketNam
|
||||
return bucketStats
|
||||
}
|
||||
|
||||
// LoadTransitionTierConfig notifies remote peers to load their remote tier
|
||||
// configs from config store.
|
||||
func (sys *NotificationSys) LoadTransitionTierConfig(ctx context.Context) {
|
||||
ng := WithNPeers(len(sys.peerClients))
|
||||
for idx, client := range sys.peerClients {
|
||||
if client == nil {
|
||||
continue
|
||||
}
|
||||
client := client
|
||||
ng.Go(ctx, func() error {
|
||||
return client.LoadTransitionTierConfig(ctx)
|
||||
}, idx, *client.host)
|
||||
}
|
||||
for _, nErr := range ng.Wait() {
|
||||
reqInfo := (&logger.ReqInfo{}).AppendTags("peerAddress", nErr.Host.String())
|
||||
if nErr.Err != nil {
|
||||
logger.LogIf(logger.SetReqInfo(ctx, reqInfo), nErr.Err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Loads notification policies for all buckets into NotificationSys.
|
||||
func (sys *NotificationSys) load(buckets []BucketInfo) {
|
||||
for _, bucket := range buckets {
|
||||
|
||||
@@ -115,6 +115,10 @@ type ObjectInfo struct {
|
||||
|
||||
// TransitionStatus indicates if transition is complete/pending
|
||||
TransitionStatus string
|
||||
// Name of transitioned object on remote tier
|
||||
transitionedObjName string
|
||||
// Name of remote tier object has transitioned to
|
||||
TransitionTier string
|
||||
|
||||
// RestoreExpires indicates date a restored object expires
|
||||
RestoreExpires time.Time
|
||||
|
||||
@@ -484,6 +484,20 @@ func (e BucketReplicationSourceNotVersioned) Error() string {
|
||||
return "Replication source does not have versioning enabled: " + e.Bucket
|
||||
}
|
||||
|
||||
// TransitionStorageClassNotFound remote tier not configured.
|
||||
type TransitionStorageClassNotFound GenericError
|
||||
|
||||
func (e TransitionStorageClassNotFound) Error() string {
|
||||
return "Transition storage class not found "
|
||||
}
|
||||
|
||||
// InvalidObjectState restore-object doesn't apply for the current state of the object.
|
||||
type InvalidObjectState GenericError
|
||||
|
||||
func (e InvalidObjectState) Error() string {
|
||||
return "The operation is not valid for the current state of the object" + e.Bucket + "/" + e.Object
|
||||
}
|
||||
|
||||
/// Bucket related errors.
|
||||
|
||||
// BucketNameInvalid - bucketname provided is invalid.
|
||||
@@ -669,6 +683,12 @@ func isErrVersionNotFound(err error) bool {
|
||||
return errors.As(err, &versionNotFound)
|
||||
}
|
||||
|
||||
// isErrSignatureDoesNotMatch - Check if error type is SignatureDoesNotMatch.
|
||||
func isErrSignatureDoesNotMatch(err error) bool {
|
||||
var signatureDoesNotMatch SignatureDoesNotMatch
|
||||
return errors.As(err, &signatureDoesNotMatch)
|
||||
}
|
||||
|
||||
// PreConditionFailed - Check if copy precondition failed
|
||||
type PreConditionFailed struct{}
|
||||
|
||||
|
||||
@@ -45,13 +45,13 @@ type ObjectOptions struct {
|
||||
MTime time.Time // Is only set in POST/PUT operations
|
||||
Expires time.Time // Is only used in POST/PUT operations
|
||||
|
||||
DeleteMarker bool // Is only set in DELETE operations for delete marker replication
|
||||
UserDefined map[string]string // only set in case of POST/PUT operations
|
||||
PartNumber int // only useful in case of GetObject/HeadObject
|
||||
CheckPrecondFn CheckPreconditionFn // only set during GetObject/HeadObject/CopyObjectPart preconditional valuation
|
||||
DeleteMarkerReplicationStatus string // Is only set in DELETE operations
|
||||
VersionPurgeStatus VersionPurgeStatusType // Is only set in DELETE operations for delete marker version to be permanently deleted.
|
||||
TransitionStatus string // status of the transition
|
||||
DeleteMarker bool // Is only set in DELETE operations for delete marker replication
|
||||
UserDefined map[string]string // only set in case of POST/PUT operations
|
||||
PartNumber int // only useful in case of GetObject/HeadObject
|
||||
CheckPrecondFn CheckPreconditionFn // only set during GetObject/HeadObject/CopyObjectPart preconditional valuation
|
||||
DeleteMarkerReplicationStatus string // Is only set in DELETE operations
|
||||
VersionPurgeStatus VersionPurgeStatusType // Is only set in DELETE operations for delete marker version to be permanently deleted.
|
||||
Transition TransitionOptions
|
||||
NoLock bool // indicates to lower layers if the caller is expecting to hold locks.
|
||||
ProxyRequest bool // only set for GET/HEAD in active-active replication scenario
|
||||
ProxyHeaderSet bool // only set for GET/HEAD in active-active replication scenario
|
||||
@@ -62,6 +62,16 @@ type ObjectOptions struct {
|
||||
MaxParity bool
|
||||
}
|
||||
|
||||
// TransitionOptions represents object options for transition ObjectLayer operation
|
||||
type TransitionOptions struct {
|
||||
Status string
|
||||
Tier string
|
||||
ETag string
|
||||
RestoreRequest *RestoreObjectRequest
|
||||
RestoreExpiry time.Time
|
||||
ExpireRestored bool
|
||||
}
|
||||
|
||||
// BucketOptions represents bucket options for ObjectLayer bucket operations
|
||||
type BucketOptions struct {
|
||||
Location string
|
||||
@@ -123,6 +133,8 @@ type ObjectLayer interface {
|
||||
CopyObject(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (objInfo ObjectInfo, err error)
|
||||
DeleteObject(ctx context.Context, bucket, object string, opts ObjectOptions) (ObjectInfo, error)
|
||||
DeleteObjects(ctx context.Context, bucket string, objects []ObjectToDelete, opts ObjectOptions) ([]DeletedObject, []error)
|
||||
TransitionObject(ctx context.Context, bucket, object string, opts ObjectOptions) error
|
||||
RestoreTransitionedObject(ctx context.Context, bucket, object string, opts ObjectOptions) error
|
||||
|
||||
// Multipart operations.
|
||||
ListMultipartUploads(ctx context.Context, bucket, prefix, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (result ListMultipartsInfo, err error)
|
||||
|
||||
@@ -45,7 +45,6 @@ import (
|
||||
"github.com/minio/minio/cmd/crypto"
|
||||
xhttp "github.com/minio/minio/cmd/http"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/bucket/lifecycle"
|
||||
"github.com/minio/minio/pkg/hash"
|
||||
"github.com/minio/minio/pkg/ioutil"
|
||||
"github.com/minio/minio/pkg/trie"
|
||||
@@ -614,8 +613,8 @@ func NewGetObjectReader(rs *HTTPRangeSpec, oi ObjectInfo, opts ObjectOptions, cl
|
||||
return nil, 0, 0, err
|
||||
}
|
||||
|
||||
// if object is encrypted, transition content without decrypting.
|
||||
if opts.TransitionStatus == lifecycle.TransitionPending && (isEncrypted || isCompressed) {
|
||||
// if object is encrypted and it is a restore request, fetch content without decrypting.
|
||||
if opts.Transition.RestoreRequest != nil {
|
||||
isEncrypted = false
|
||||
isCompressed = false
|
||||
}
|
||||
|
||||
@@ -1341,6 +1341,16 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re
|
||||
objInfo.ETag = remoteObjInfo.ETag
|
||||
objInfo.ModTime = remoteObjInfo.LastModified
|
||||
} else {
|
||||
|
||||
os := newObjSweeper(dstBucket, dstObject)
|
||||
// Get appropriate object info to identify the remote object to delete
|
||||
if !srcInfo.metadataOnly {
|
||||
goiOpts := os.GetOpts()
|
||||
if goi, gerr := getObjectInfo(ctx, dstBucket, dstObject, goiOpts); gerr == nil {
|
||||
os.SetTransitionState(goi)
|
||||
}
|
||||
}
|
||||
|
||||
copyObjectFn := objectAPI.CopyObject
|
||||
if api.CacheAPI() != nil {
|
||||
copyObjectFn = api.CacheAPI().CopyObject
|
||||
@@ -1353,6 +1363,9 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Remove the transitioned object whose object version is being overwritten.
|
||||
logger.LogIf(ctx, os.Sweep())
|
||||
}
|
||||
objInfo.ETag = getDecryptedETag(r.Header, objInfo, false)
|
||||
response := generateCopyObjectResponse(objInfo.ETag, objInfo.ModTime)
|
||||
@@ -1655,6 +1668,13 @@ func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Req
|
||||
// Ensure that metadata does not contain sensitive information
|
||||
crypto.RemoveSensitiveEntries(metadata)
|
||||
|
||||
oc := newObjSweeper(bucket, object)
|
||||
// Get appropriate object info to identify the remote object to delete
|
||||
goiOpts := oc.GetOpts()
|
||||
if goi, gerr := getObjectInfo(ctx, bucket, object, goiOpts); gerr == nil {
|
||||
oc.SetTransitionState(goi)
|
||||
}
|
||||
|
||||
// Create the object..
|
||||
objInfo, err := putObject(ctx, bucket, object, pReader, opts)
|
||||
if err != nil {
|
||||
@@ -1684,6 +1704,10 @@ func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Req
|
||||
if replicate, sync := mustReplicate(ctx, r, bucket, object, metadata, ""); replicate {
|
||||
scheduleReplication(ctx, objInfo.Clone(), objectAPI, sync, replication.ObjectReplicationType)
|
||||
}
|
||||
|
||||
// Remove the transitioned object whose object version is being overwritten.
|
||||
logger.LogIf(ctx, oc.Sweep())
|
||||
|
||||
setPutObjHeaders(w, objInfo, false)
|
||||
|
||||
writeSuccessResponseHeadersOnly(w)
|
||||
@@ -3046,6 +3070,13 @@ func (api objectAPIHandlers) CompleteMultipartUploadHandler(w http.ResponseWrite
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
os := newObjSweeper(bucket, object)
|
||||
// Get appropriate object info to identify the remote object to delete
|
||||
goiOpts := os.GetOpts()
|
||||
if goi, gerr := objectAPI.GetObjectInfo(ctx, bucket, object, goiOpts); gerr == nil {
|
||||
os.SetTransitionState(goi)
|
||||
}
|
||||
|
||||
setEventStreamHeaders(w)
|
||||
|
||||
w = &whiteSpaceWriter{ResponseWriter: w, Flusher: w.(http.Flusher)}
|
||||
@@ -3083,6 +3114,9 @@ func (api objectAPIHandlers) CompleteMultipartUploadHandler(w http.ResponseWrite
|
||||
scheduleReplication(ctx, objInfo.Clone(), objectAPI, sync, replication.ObjectReplicationType)
|
||||
}
|
||||
|
||||
// Remove the transitioned object whose object version is being overwritten.
|
||||
logger.LogIf(ctx, os.Sweep())
|
||||
|
||||
// Write success response.
|
||||
writeSuccessResponseXML(w, encodedSuccessResponse)
|
||||
|
||||
@@ -3144,21 +3178,20 @@ func (api objectAPIHandlers) DeleteObjectHandler(w http.ResponseWriter, r *http.
|
||||
return
|
||||
}
|
||||
var (
|
||||
hasLockEnabled, hasLifecycleConfig bool
|
||||
goi ObjectInfo
|
||||
gerr error
|
||||
goi ObjectInfo
|
||||
gerr error
|
||||
)
|
||||
replicateDeletes := hasReplicationRules(ctx, bucket, []ObjectToDelete{{ObjectName: object, VersionID: opts.VersionID}})
|
||||
if rcfg, _ := globalBucketObjectLockSys.Get(bucket); rcfg.LockEnabled {
|
||||
hasLockEnabled = true
|
||||
}
|
||||
if _, err := globalBucketMetadataSys.GetLifecycleConfig(bucket); err == nil {
|
||||
hasLifecycleConfig = true
|
||||
}
|
||||
if replicateDeletes || hasLockEnabled || hasLifecycleConfig {
|
||||
goi, gerr = getObjectInfo(ctx, bucket, object, ObjectOptions{
|
||||
VersionID: opts.VersionID,
|
||||
})
|
||||
|
||||
var goiOpts ObjectOptions
|
||||
os := newObjSweeper(bucket, object).WithVersion(singleDelete(*r))
|
||||
// Mutations of objects on versioning suspended buckets
|
||||
// affect its null version. Through opts below we select
|
||||
// the null version's remote object to delete if
|
||||
// transitioned.
|
||||
goiOpts = os.GetOpts()
|
||||
goi, gerr = getObjectInfo(ctx, bucket, object, goiOpts)
|
||||
if gerr == nil {
|
||||
os.SetTransitionState(goi)
|
||||
}
|
||||
|
||||
replicateDel, replicateSync := checkReplicateDelete(ctx, bucket, ObjectToDelete{ObjectName: object, VersionID: opts.VersionID}, goi, gerr)
|
||||
@@ -3266,16 +3299,9 @@ func (api objectAPIHandlers) DeleteObjectHandler(w http.ResponseWriter, r *http.
|
||||
scheduleReplicationDelete(ctx, dobj, objectAPI, replicateSync)
|
||||
}
|
||||
|
||||
if goi.TransitionStatus == lifecycle.TransitionComplete { // clean up transitioned tier
|
||||
deleteTransitionedObject(ctx, objectAPI, bucket, object, lifecycle.ObjectOpts{
|
||||
Name: object,
|
||||
UserTags: goi.UserTags,
|
||||
VersionID: goi.VersionID,
|
||||
DeleteMarker: goi.DeleteMarker,
|
||||
TransitionStatus: goi.TransitionStatus,
|
||||
IsLatest: goi.IsLatest,
|
||||
}, false, true)
|
||||
}
|
||||
// Remove the transitioned object whose object version is being overwritten.
|
||||
logger.LogIf(ctx, os.Sweep())
|
||||
|
||||
}
|
||||
|
||||
// PutObjectLegalHoldHandler - set legal hold configuration to object,
|
||||
@@ -3860,8 +3886,12 @@ func (api objectAPIHandlers) PostRestoreObjectHandler(w http.ResponseWriter, r *
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrEmptyRequestBody), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
objInfo, err := getObjectInfo(ctx, bucket, object, ObjectOptions{})
|
||||
opts, err := postRestoreOpts(ctx, r, bucket, object)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
objInfo, err := getObjectInfo(ctx, bucket, object, opts)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
@@ -3905,16 +3935,12 @@ func (api objectAPIHandlers) PostRestoreObjectHandler(w http.ResponseWriter, r *
|
||||
// update self with restore metadata
|
||||
if rreq.Type != SelectRestoreRequest {
|
||||
objInfo.metadataOnly = true // Perform only metadata updates.
|
||||
ongoingReq := true
|
||||
if alreadyRestored {
|
||||
ongoingReq = false
|
||||
}
|
||||
metadata[xhttp.AmzRestoreExpiryDays] = strconv.Itoa(rreq.Days)
|
||||
metadata[xhttp.AmzRestoreRequestDate] = time.Now().UTC().Format(http.TimeFormat)
|
||||
if alreadyRestored {
|
||||
metadata[xhttp.AmzRestore] = fmt.Sprintf("ongoing-request=%t, expiry-date=%s", ongoingReq, restoreExpiry.Format(http.TimeFormat))
|
||||
metadata[xhttp.AmzRestore] = completedRestoreObj(restoreExpiry).String()
|
||||
} else {
|
||||
metadata[xhttp.AmzRestore] = fmt.Sprintf("ongoing-request=%t", ongoingReq)
|
||||
metadata[xhttp.AmzRestore] = ongoingRestoreObj().String()
|
||||
}
|
||||
objInfo.UserDefined = metadata
|
||||
if _, err := objectAPI.CopyObject(GlobalContext, bucket, object, bucket, object, objInfo, ObjectOptions{
|
||||
@@ -3991,7 +4017,15 @@ func (api objectAPIHandlers) PostRestoreObjectHandler(w http.ResponseWriter, r *
|
||||
rreq.SelectParameters.Close()
|
||||
return
|
||||
}
|
||||
if err := restoreTransitionedObject(rctx, bucket, object, objectAPI, objInfo, rreq, restoreExpiry); err != nil {
|
||||
opts := ObjectOptions{
|
||||
Transition: TransitionOptions{
|
||||
RestoreRequest: rreq,
|
||||
RestoreExpiry: restoreExpiry,
|
||||
},
|
||||
VersionID: objInfo.VersionID,
|
||||
}
|
||||
if err := objectAPI.RestoreTransitionedObject(rctx, bucket, object, opts); err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
@@ -1,20 +1,3 @@
|
||||
// Copyright (c) 2015-2021 MinIO, Inc.
|
||||
//
|
||||
// This file is part of MinIO Object Storage stack
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// Code generated by "stringer -type=osMetric -trimprefix=osMetric os-instrumented.go"; DO NOT EDIT.
|
||||
|
||||
package cmd
|
||||
|
||||
@@ -698,6 +698,16 @@ func (client *peerRESTClient) UpdateMetacacheListing(ctx context.Context, m meta
|
||||
|
||||
}
|
||||
|
||||
func (client *peerRESTClient) LoadTransitionTierConfig(ctx context.Context) error {
|
||||
respBody, err := client.callWithContext(ctx, peerRESTMethodLoadTransitionTierConfig, nil, nil, 0)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return err
|
||||
}
|
||||
defer http.DrainBody(respBody)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (client *peerRESTClient) doTrace(traceCh chan interface{}, doneCh <-chan struct{}, traceOpts madmin.ServiceTraceOpts) {
|
||||
values := make(url.Values)
|
||||
values.Set(peerRESTTraceErr, strconv.FormatBool(traceOpts.OnlyErrors))
|
||||
|
||||
@@ -18,49 +18,50 @@
|
||||
package cmd
|
||||
|
||||
const (
|
||||
peerRESTVersion = "v14" // Add GetBucketStats API
|
||||
peerRESTVersion = "v15" // Add LoadTransitionTierConfig
|
||||
peerRESTVersionPrefix = SlashSeparator + peerRESTVersion
|
||||
peerRESTPrefix = minioReservedBucketPath + "/peer"
|
||||
peerRESTPath = peerRESTPrefix + peerRESTVersionPrefix
|
||||
)
|
||||
|
||||
const (
|
||||
peerRESTMethodHealth = "/health"
|
||||
peerRESTMethodServerInfo = "/serverinfo"
|
||||
peerRESTMethodDriveInfo = "/driveinfo"
|
||||
peerRESTMethodNetInfo = "/netinfo"
|
||||
peerRESTMethodCPUInfo = "/cpuinfo"
|
||||
peerRESTMethodDiskHwInfo = "/diskhwinfo"
|
||||
peerRESTMethodOsInfo = "/osinfo"
|
||||
peerRESTMethodMemInfo = "/meminfo"
|
||||
peerRESTMethodProcInfo = "/procinfo"
|
||||
peerRESTMethodDispatchNetInfo = "/dispatchnetinfo"
|
||||
peerRESTMethodDeleteBucketMetadata = "/deletebucketmetadata"
|
||||
peerRESTMethodLoadBucketMetadata = "/loadbucketmetadata"
|
||||
peerRESTMethodGetBucketStats = "/getbucketstats"
|
||||
peerRESTMethodServerUpdate = "/serverupdate"
|
||||
peerRESTMethodSignalService = "/signalservice"
|
||||
peerRESTMethodBackgroundHealStatus = "/backgroundhealstatus"
|
||||
peerRESTMethodGetLocks = "/getlocks"
|
||||
peerRESTMethodLoadUser = "/loaduser"
|
||||
peerRESTMethodLoadServiceAccount = "/loadserviceaccount"
|
||||
peerRESTMethodDeleteUser = "/deleteuser"
|
||||
peerRESTMethodDeleteServiceAccount = "/deleteserviceaccount"
|
||||
peerRESTMethodLoadPolicy = "/loadpolicy"
|
||||
peerRESTMethodLoadPolicyMapping = "/loadpolicymapping"
|
||||
peerRESTMethodDeletePolicy = "/deletepolicy"
|
||||
peerRESTMethodLoadGroup = "/loadgroup"
|
||||
peerRESTMethodStartProfiling = "/startprofiling"
|
||||
peerRESTMethodDownloadProfilingData = "/downloadprofilingdata"
|
||||
peerRESTMethodCycleBloom = "/cyclebloom"
|
||||
peerRESTMethodTrace = "/trace"
|
||||
peerRESTMethodListen = "/listen"
|
||||
peerRESTMethodLog = "/log"
|
||||
peerRESTMethodGetLocalDiskIDs = "/getlocaldiskids"
|
||||
peerRESTMethodGetBandwidth = "/bandwidth"
|
||||
peerRESTMethodGetMetacacheListing = "/getmetacache"
|
||||
peerRESTMethodUpdateMetacacheListing = "/updatemetacache"
|
||||
peerRESTMethodGetPeerMetrics = "/peermetrics"
|
||||
peerRESTMethodHealth = "/health"
|
||||
peerRESTMethodServerInfo = "/serverinfo"
|
||||
peerRESTMethodDriveInfo = "/driveinfo"
|
||||
peerRESTMethodNetInfo = "/netinfo"
|
||||
peerRESTMethodCPUInfo = "/cpuinfo"
|
||||
peerRESTMethodDiskHwInfo = "/diskhwinfo"
|
||||
peerRESTMethodOsInfo = "/osinfo"
|
||||
peerRESTMethodMemInfo = "/meminfo"
|
||||
peerRESTMethodProcInfo = "/procinfo"
|
||||
peerRESTMethodDispatchNetInfo = "/dispatchnetinfo"
|
||||
peerRESTMethodDeleteBucketMetadata = "/deletebucketmetadata"
|
||||
peerRESTMethodLoadBucketMetadata = "/loadbucketmetadata"
|
||||
peerRESTMethodGetBucketStats = "/getbucketstats"
|
||||
peerRESTMethodServerUpdate = "/serverupdate"
|
||||
peerRESTMethodSignalService = "/signalservice"
|
||||
peerRESTMethodBackgroundHealStatus = "/backgroundhealstatus"
|
||||
peerRESTMethodGetLocks = "/getlocks"
|
||||
peerRESTMethodLoadUser = "/loaduser"
|
||||
peerRESTMethodLoadServiceAccount = "/loadserviceaccount"
|
||||
peerRESTMethodDeleteUser = "/deleteuser"
|
||||
peerRESTMethodDeleteServiceAccount = "/deleteserviceaccount"
|
||||
peerRESTMethodLoadPolicy = "/loadpolicy"
|
||||
peerRESTMethodLoadPolicyMapping = "/loadpolicymapping"
|
||||
peerRESTMethodDeletePolicy = "/deletepolicy"
|
||||
peerRESTMethodLoadGroup = "/loadgroup"
|
||||
peerRESTMethodStartProfiling = "/startprofiling"
|
||||
peerRESTMethodDownloadProfilingData = "/downloadprofilingdata"
|
||||
peerRESTMethodCycleBloom = "/cyclebloom"
|
||||
peerRESTMethodTrace = "/trace"
|
||||
peerRESTMethodListen = "/listen"
|
||||
peerRESTMethodLog = "/log"
|
||||
peerRESTMethodGetLocalDiskIDs = "/getlocaldiskids"
|
||||
peerRESTMethodGetBandwidth = "/bandwidth"
|
||||
peerRESTMethodGetMetacacheListing = "/getmetacache"
|
||||
peerRESTMethodUpdateMetacacheListing = "/updatemetacache"
|
||||
peerRESTMethodGetPeerMetrics = "/peermetrics"
|
||||
peerRESTMethodLoadTransitionTierConfig = "/loadtransitiontierconfig"
|
||||
)
|
||||
|
||||
const (
|
||||
|
||||
@@ -1006,6 +1006,19 @@ func (s *peerRESTServer) BackgroundHealStatusHandler(w http.ResponseWriter, r *h
|
||||
logger.LogIf(ctx, gob.NewEncoder(w).Encode(state))
|
||||
}
|
||||
|
||||
func (s *peerRESTServer) LoadTransitionTierConfigHandler(w http.ResponseWriter, r *http.Request) {
|
||||
if !s.IsValid(w, r) {
|
||||
s.writeErrorResponse(w, errors.New("invalid request"))
|
||||
return
|
||||
}
|
||||
go func() {
|
||||
err := globalTierConfigMgr.Reload(context.Background(), newObjectLayerFn())
|
||||
if err != nil {
|
||||
logger.LogIf(context.Background(), fmt.Errorf("Failed to reload remote tier config %s", err))
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// ConsoleLogHandler sends console logs of this node back to peer rest client
|
||||
func (s *peerRESTServer) ConsoleLogHandler(w http.ResponseWriter, r *http.Request) {
|
||||
if !s.IsValid(w, r) {
|
||||
@@ -1139,4 +1152,5 @@ func registerPeerRESTHandlers(router *mux.Router) {
|
||||
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodGetMetacacheListing).HandlerFunc(httpTraceHdrs(server.GetMetacacheListingHandler))
|
||||
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodUpdateMetacacheListing).HandlerFunc(httpTraceHdrs(server.UpdateMetacacheListingHandler))
|
||||
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodGetPeerMetrics).HandlerFunc(httpTraceHdrs(server.GetPeerMetrics))
|
||||
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodLoadTransitionTierConfig).HandlerFunc(httpTraceHdrs(server.LoadTransitionTierConfigHandler))
|
||||
}
|
||||
|
||||
@@ -243,6 +243,9 @@ func newAllSubsystems() {
|
||||
|
||||
// Create new bucket replication subsytem
|
||||
globalBucketTargetSys = NewBucketTargetSys()
|
||||
|
||||
// Create new ILM tier configuration subsystem
|
||||
globalTierConfigMgr = NewTierConfigMgr()
|
||||
}
|
||||
|
||||
func configRetriableErrors(err error) bool {
|
||||
@@ -401,6 +404,13 @@ func initAllSubsystems(ctx context.Context, newObject ObjectLayer) (err error) {
|
||||
// Initialize bucket targets sub-system.
|
||||
globalBucketTargetSys.Init(ctx, buckets, newObject)
|
||||
|
||||
if globalIsErasure {
|
||||
// Initialize transition tier configuration manager
|
||||
err = globalTierConfigMgr.Init(ctx, newObject)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -510,7 +520,6 @@ func serverMain(ctx *cli.Context) {
|
||||
if err != nil {
|
||||
logFatalErrs(err, Endpoint{}, true)
|
||||
}
|
||||
|
||||
logger.SetDeploymentID(globalDeploymentID)
|
||||
|
||||
// Enable background operations for erasure coding
|
||||
@@ -538,7 +547,12 @@ func serverMain(ctx *cli.Context) {
|
||||
|
||||
if globalIsErasure { // to be done after config init
|
||||
initBackgroundReplication(GlobalContext, newObject)
|
||||
globalTierJournal, err = initTierDeletionJournal(GlobalContext.Done())
|
||||
if err != nil {
|
||||
logger.FatalIf(err, "Unable to initialize remote tier pending deletes journal")
|
||||
}
|
||||
}
|
||||
|
||||
if globalCacheConfig.Enabled {
|
||||
// initialize the new disk cache objects.
|
||||
var cacheAPI CacheObjectLayer
|
||||
|
||||
@@ -132,6 +132,13 @@ type FileInfo struct {
|
||||
// TransitionStatus is set to Pending/Complete for transitioned
|
||||
// entries based on state of transition
|
||||
TransitionStatus string
|
||||
// TransitionedObjName is the object name on the remote tier corresponding
|
||||
// to object (version) on the source tier.
|
||||
TransitionedObjName string
|
||||
// TransitionTier is the storage class label assigned to remote tier.
|
||||
TransitionTier string
|
||||
// ExpireRestored indicates that the restored object is to be expired.
|
||||
ExpireRestored bool
|
||||
|
||||
// DataDir of the file
|
||||
DataDir string
|
||||
|
||||
@@ -1,20 +1,3 @@
|
||||
// Copyright (c) 2015-2021 MinIO, Inc.
|
||||
//
|
||||
// This file is part of MinIO Object Storage stack
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package cmd
|
||||
|
||||
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
|
||||
@@ -551,8 +534,8 @@ func (z *FileInfo) DecodeMsg(dc *msgp.Reader) (err error) {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
if zb0001 != 20 {
|
||||
err = msgp.ArrayError{Wanted: 20, Got: zb0001}
|
||||
if zb0001 != 23 {
|
||||
err = msgp.ArrayError{Wanted: 23, Got: zb0001}
|
||||
return
|
||||
}
|
||||
z.Volume, err = dc.ReadString()
|
||||
@@ -585,6 +568,21 @@ func (z *FileInfo) DecodeMsg(dc *msgp.Reader) (err error) {
|
||||
err = msgp.WrapError(err, "TransitionStatus")
|
||||
return
|
||||
}
|
||||
z.TransitionedObjName, err = dc.ReadString()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "TransitionedObjName")
|
||||
return
|
||||
}
|
||||
z.TransitionTier, err = dc.ReadString()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "TransitionTier")
|
||||
return
|
||||
}
|
||||
z.ExpireRestored, err = dc.ReadBool()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ExpireRestored")
|
||||
return
|
||||
}
|
||||
z.DataDir, err = dc.ReadString()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "DataDir")
|
||||
@@ -701,8 +699,8 @@ func (z *FileInfo) DecodeMsg(dc *msgp.Reader) (err error) {
|
||||
|
||||
// EncodeMsg implements msgp.Encodable
|
||||
func (z *FileInfo) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
// array header, size 20
|
||||
err = en.Append(0xdc, 0x0, 0x14)
|
||||
// array header, size 23
|
||||
err = en.Append(0xdc, 0x0, 0x17)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
@@ -736,6 +734,21 @@ func (z *FileInfo) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
err = msgp.WrapError(err, "TransitionStatus")
|
||||
return
|
||||
}
|
||||
err = en.WriteString(z.TransitionedObjName)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "TransitionedObjName")
|
||||
return
|
||||
}
|
||||
err = en.WriteString(z.TransitionTier)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "TransitionTier")
|
||||
return
|
||||
}
|
||||
err = en.WriteBool(z.ExpireRestored)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ExpireRestored")
|
||||
return
|
||||
}
|
||||
err = en.WriteString(z.DataDir)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "DataDir")
|
||||
@@ -831,14 +844,17 @@ func (z *FileInfo) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
// MarshalMsg implements msgp.Marshaler
|
||||
func (z *FileInfo) MarshalMsg(b []byte) (o []byte, err error) {
|
||||
o = msgp.Require(b, z.Msgsize())
|
||||
// array header, size 20
|
||||
o = append(o, 0xdc, 0x0, 0x14)
|
||||
// array header, size 23
|
||||
o = append(o, 0xdc, 0x0, 0x17)
|
||||
o = msgp.AppendString(o, z.Volume)
|
||||
o = msgp.AppendString(o, z.Name)
|
||||
o = msgp.AppendString(o, z.VersionID)
|
||||
o = msgp.AppendBool(o, z.IsLatest)
|
||||
o = msgp.AppendBool(o, z.Deleted)
|
||||
o = msgp.AppendString(o, z.TransitionStatus)
|
||||
o = msgp.AppendString(o, z.TransitionedObjName)
|
||||
o = msgp.AppendString(o, z.TransitionTier)
|
||||
o = msgp.AppendBool(o, z.ExpireRestored)
|
||||
o = msgp.AppendString(o, z.DataDir)
|
||||
o = msgp.AppendBool(o, z.XLV1)
|
||||
o = msgp.AppendTime(o, z.ModTime)
|
||||
@@ -879,8 +895,8 @@ func (z *FileInfo) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
if zb0001 != 20 {
|
||||
err = msgp.ArrayError{Wanted: 20, Got: zb0001}
|
||||
if zb0001 != 23 {
|
||||
err = msgp.ArrayError{Wanted: 23, Got: zb0001}
|
||||
return
|
||||
}
|
||||
z.Volume, bts, err = msgp.ReadStringBytes(bts)
|
||||
@@ -913,6 +929,21 @@ func (z *FileInfo) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
||||
err = msgp.WrapError(err, "TransitionStatus")
|
||||
return
|
||||
}
|
||||
z.TransitionedObjName, bts, err = msgp.ReadStringBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "TransitionedObjName")
|
||||
return
|
||||
}
|
||||
z.TransitionTier, bts, err = msgp.ReadStringBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "TransitionTier")
|
||||
return
|
||||
}
|
||||
z.ExpireRestored, bts, err = msgp.ReadBoolBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ExpireRestored")
|
||||
return
|
||||
}
|
||||
z.DataDir, bts, err = msgp.ReadStringBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "DataDir")
|
||||
@@ -1030,7 +1061,7 @@ func (z *FileInfo) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
||||
|
||||
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
|
||||
func (z *FileInfo) Msgsize() (s int) {
|
||||
s = 3 + msgp.StringPrefixSize + len(z.Volume) + msgp.StringPrefixSize + len(z.Name) + msgp.StringPrefixSize + len(z.VersionID) + msgp.BoolSize + msgp.BoolSize + msgp.StringPrefixSize + len(z.TransitionStatus) + msgp.StringPrefixSize + len(z.DataDir) + msgp.BoolSize + msgp.TimeSize + msgp.Int64Size + msgp.Uint32Size + msgp.MapHeaderSize
|
||||
s = 3 + msgp.StringPrefixSize + len(z.Volume) + msgp.StringPrefixSize + len(z.Name) + msgp.StringPrefixSize + len(z.VersionID) + msgp.BoolSize + msgp.BoolSize + msgp.StringPrefixSize + len(z.TransitionStatus) + msgp.StringPrefixSize + len(z.TransitionedObjName) + msgp.StringPrefixSize + len(z.TransitionTier) + msgp.BoolSize + msgp.StringPrefixSize + len(z.DataDir) + msgp.BoolSize + msgp.TimeSize + msgp.Int64Size + msgp.Uint32Size + msgp.MapHeaderSize
|
||||
if z.Metadata != nil {
|
||||
for za0001, za0002 := range z.Metadata {
|
||||
_ = za0002
|
||||
|
||||
@@ -1,20 +1,3 @@
|
||||
// Copyright (c) 2015-2021 MinIO, Inc.
|
||||
//
|
||||
// This file is part of MinIO Object Storage stack
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package cmd
|
||||
|
||||
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
|
||||
|
||||
@@ -1,20 +1,3 @@
|
||||
// Copyright (c) 2015-2021 MinIO, Inc.
|
||||
//
|
||||
// This file is part of MinIO Object Storage stack
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// Code generated by "stringer -type=storageMetric -trimprefix=storageMetric xl-storage-disk-id-check.go"; DO NOT EDIT.
|
||||
|
||||
package cmd
|
||||
|
||||
@@ -1,20 +1,3 @@
|
||||
// Copyright (c) 2015-2021 MinIO, Inc.
|
||||
//
|
||||
// This file is part of MinIO Object Storage stack
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// Code generated by "stringer -type=STSErrorCode -trimprefix=Err sts-errors.go"; DO NOT EDIT.
|
||||
|
||||
package cmd
|
||||
|
||||
190
cmd/tier-handlers.go
Normal file
190
cmd/tier-handlers.go
Normal file
@@ -0,0 +1,190 @@
|
||||
// Copyright (c) 2015-2021 MinIO, Inc.
|
||||
//
|
||||
// This file is part of MinIO Object Storage stack
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io"
|
||||
"net/http"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
iampolicy "github.com/minio/minio/pkg/iam/policy"
|
||||
"github.com/minio/minio/pkg/madmin"
|
||||
)
|
||||
|
||||
var (
|
||||
// error returned when remote tier already exists
|
||||
errTierAlreadyExists = AdminError{
|
||||
Code: "XMinioAdminTierAlreadyExists",
|
||||
Message: "Specified remote tier already exists",
|
||||
StatusCode: http.StatusConflict,
|
||||
}
|
||||
// error returned when remote tier is not found
|
||||
errTierNotFound = AdminError{
|
||||
Code: "XMinioAdminTierNotFound",
|
||||
Message: "Specified remote tier was not found",
|
||||
StatusCode: http.StatusNotFound,
|
||||
}
|
||||
// error returned when remote tier name is not in uppercase
|
||||
errTierNameNotUppercase = AdminError{
|
||||
Code: "XMinioAdminTierNameNotUpperCase",
|
||||
Message: "Tier name must be in uppercase",
|
||||
StatusCode: http.StatusBadRequest,
|
||||
}
|
||||
// error returned when remote tier bucket is not found
|
||||
errTierBucketNotFound = AdminError{
|
||||
Code: "XMinioAdminTierBucketNotFound",
|
||||
Message: "Remote tier bucket not found",
|
||||
StatusCode: http.StatusBadRequest,
|
||||
}
|
||||
// error returned when remote tier credentials are invalid.
|
||||
errTierInvalidCredentials = AdminError{
|
||||
Code: "XMinioAdminTierInvalidCredentials",
|
||||
Message: "Invalid remote tier credentials",
|
||||
StatusCode: http.StatusBadRequest,
|
||||
}
|
||||
)
|
||||
|
||||
func (api adminAPIHandlers) AddTierHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "AddTier")
|
||||
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
if !globalIsErasure {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
objAPI, cred := validateAdminUsersReq(ctx, w, r, iampolicy.SetTierAction)
|
||||
if objAPI == nil || globalNotificationSys == nil || globalTierConfigMgr == nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
password := cred.SecretKey
|
||||
reqBytes, err := madmin.DecryptData(password, io.LimitReader(r.Body, r.ContentLength))
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrAdminConfigBadJSON, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
var cfg madmin.TierConfig
|
||||
if err := json.Unmarshal(reqBytes, &cfg); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Refresh from the disk in case we had missed notifications about edits from peers.
|
||||
if err := globalTierConfigMgr.Reload(ctx, objAPI); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
err = globalTierConfigMgr.Add(ctx, cfg)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
err = globalTierConfigMgr.Save(ctx, objAPI)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
globalNotificationSys.LoadTransitionTierConfig(ctx)
|
||||
|
||||
writeSuccessNoContent(w)
|
||||
}
|
||||
|
||||
func (api adminAPIHandlers) ListTierHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "ListTier")
|
||||
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
if !globalIsErasure {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
objAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.ListTierAction)
|
||||
if objAPI == nil || globalNotificationSys == nil || globalTierConfigMgr == nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
tiers := globalTierConfigMgr.ListTiers()
|
||||
data, err := json.Marshal(tiers)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
writeSuccessResponseJSON(w, data)
|
||||
}
|
||||
|
||||
func (api adminAPIHandlers) EditTierHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "EditTier")
|
||||
|
||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
||||
|
||||
if !globalIsErasure {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
objAPI, cred := validateAdminUsersReq(ctx, w, r, iampolicy.SetTierAction)
|
||||
if objAPI == nil || globalNotificationSys == nil || globalTierConfigMgr == nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return
|
||||
}
|
||||
vars := mux.Vars(r)
|
||||
scName := vars["tier"]
|
||||
|
||||
password := cred.SecretKey
|
||||
reqBytes, err := madmin.DecryptData(password, io.LimitReader(r.Body, r.ContentLength))
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrAdminConfigBadJSON, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
var creds madmin.TierCreds
|
||||
if err := json.Unmarshal(reqBytes, &creds); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Refresh from the disk in case we had missed notifications about edits from peers.
|
||||
if err := globalTierConfigMgr.Reload(ctx, objAPI); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if err := globalTierConfigMgr.Edit(ctx, scName, creds); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if err := globalTierConfigMgr.Save(ctx, objAPI); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
globalNotificationSys.LoadTransitionTierConfig(ctx)
|
||||
|
||||
writeSuccessNoContent(w)
|
||||
}
|
||||
270
cmd/tier-journal.go
Normal file
270
cmd/tier-journal.go
Normal file
@@ -0,0 +1,270 @@
|
||||
// Copyright (c) 2015-2021 MinIO, Inc.
|
||||
//
|
||||
// This file is part of MinIO Object Storage stack
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/tinylib/msgp/msgp"
|
||||
)
|
||||
|
||||
//go:generate msgp -file $GOFILE -unexported
|
||||
//msgp:ignore tierJournal walkfn
|
||||
|
||||
type tierJournal struct {
|
||||
sync.RWMutex
|
||||
diskPath string
|
||||
file *os.File // active journal file
|
||||
}
|
||||
|
||||
type jentry struct {
|
||||
ObjName string `msg:"obj"`
|
||||
TierName string `msg:"tier"`
|
||||
}
|
||||
|
||||
const (
|
||||
tierJournalVersion = 1
|
||||
tierJournalHdrLen = 2 // 2 bytes
|
||||
)
|
||||
|
||||
func initTierDeletionJournal(done <-chan struct{}) (*tierJournal, error) {
|
||||
diskPath := globalEndpoints.FirstLocalDiskPath()
|
||||
j := &tierJournal{
|
||||
diskPath: diskPath,
|
||||
}
|
||||
|
||||
if err := os.MkdirAll(filepath.Dir(j.JournalPath()), os.FileMode(0700)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err := j.Open()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
go j.deletePending(done)
|
||||
return j, nil
|
||||
}
|
||||
|
||||
// rotate rotates the journal. If a read-only journal already exists it does
|
||||
// nothing. Otherwise renames the active journal to a read-only journal and
|
||||
// opens a new active journal.
|
||||
func (j *tierJournal) rotate() error {
|
||||
// Do nothing if a read-only journal file already exists.
|
||||
if _, err := os.Stat(j.ReadOnlyPath()); err == nil {
|
||||
return nil
|
||||
}
|
||||
// Close the active journal if present.
|
||||
j.Close()
|
||||
// Open a new active journal for subsequent journalling.
|
||||
return j.Open()
|
||||
}
|
||||
|
||||
type walkFn func(objName, tierName string) error
|
||||
|
||||
func (j *tierJournal) ReadOnlyPath() string {
|
||||
return filepath.Join(j.diskPath, minioMetaBucket, "ilm", "deletion-journal.ro.bin")
|
||||
}
|
||||
|
||||
func (j *tierJournal) JournalPath() string {
|
||||
return filepath.Join(j.diskPath, minioMetaBucket, "ilm", "deletion-journal.bin")
|
||||
}
|
||||
|
||||
func (j *tierJournal) WalkEntries(fn walkFn) {
|
||||
err := j.rotate()
|
||||
if err != nil {
|
||||
logger.LogIf(context.Background(), fmt.Errorf("tier-journal: failed to rotate pending deletes journal %s", err))
|
||||
return
|
||||
}
|
||||
|
||||
ro, err := j.OpenRO()
|
||||
switch {
|
||||
case errors.Is(err, os.ErrNotExist):
|
||||
return // No read-only journal to process; nothing to do.
|
||||
case err != nil:
|
||||
logger.LogIf(context.Background(), fmt.Errorf("tier-journal: failed open read-only journal for processing %s", err))
|
||||
return
|
||||
}
|
||||
defer ro.Close()
|
||||
mr := msgp.NewReader(ro)
|
||||
done := false
|
||||
for {
|
||||
var entry jentry
|
||||
err := entry.DecodeMsg(mr)
|
||||
if errors.Is(err, io.EOF) {
|
||||
done = true
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
logger.LogIf(context.Background(), fmt.Errorf("tier-journal: failed to decode journal entry %s", err))
|
||||
break
|
||||
}
|
||||
err = fn(entry.ObjName, entry.TierName)
|
||||
if err != nil && !isErrObjectNotFound(err) {
|
||||
logger.LogIf(context.Background(), fmt.Errorf("tier-journal: failed to delete transitioned object %s from %s due to %s", entry.ObjName, entry.TierName, err))
|
||||
j.AddEntry(entry)
|
||||
}
|
||||
}
|
||||
if done {
|
||||
os.Remove(j.ReadOnlyPath())
|
||||
}
|
||||
}
|
||||
|
||||
func deleteObjectFromRemoteTier(objName, tierName string) error {
|
||||
w, err := globalTierConfigMgr.getDriver(tierName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = w.Remove(context.Background(), objName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (j *tierJournal) deletePending(done <-chan struct{}) {
|
||||
ticker := time.NewTicker(30 * time.Minute)
|
||||
defer ticker.Stop()
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
j.WalkEntries(deleteObjectFromRemoteTier)
|
||||
|
||||
case <-done:
|
||||
j.Close()
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (j *tierJournal) AddEntry(je jentry) error {
|
||||
// Open journal if it hasn't been
|
||||
err := j.Open()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
b, err := je.MarshalMsg(nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
j.Lock()
|
||||
defer j.Unlock()
|
||||
_, err = j.file.Write(b)
|
||||
if err != nil {
|
||||
j.file = nil // reset to allow subsequent reopen when file/disk is available.
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Close closes the active journal and renames it to read-only for pending
|
||||
// deletes processing. Note: calling Close on a closed journal is a no-op.
|
||||
func (j *tierJournal) Close() error {
|
||||
j.Lock()
|
||||
defer j.Unlock()
|
||||
if j.file == nil { // already closed
|
||||
return nil
|
||||
}
|
||||
|
||||
var (
|
||||
f *os.File
|
||||
fi os.FileInfo
|
||||
err error
|
||||
)
|
||||
// Setting j.file to nil
|
||||
f, j.file = j.file, f
|
||||
if fi, err = f.Stat(); err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
// Skip renaming active journal if empty.
|
||||
if fi.Size() == tierJournalHdrLen {
|
||||
return nil
|
||||
}
|
||||
|
||||
jPath := j.JournalPath()
|
||||
jroPath := j.ReadOnlyPath()
|
||||
// Rotate active journal to perform pending deletes.
|
||||
err = os.Rename(jPath, jroPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Open opens a new active journal. Note: calling Open on an opened journal is a
|
||||
// no-op.
|
||||
func (j *tierJournal) Open() error {
|
||||
j.Lock()
|
||||
defer j.Unlock()
|
||||
if j.file != nil { // already open
|
||||
return nil
|
||||
}
|
||||
|
||||
var err error
|
||||
j.file, err = os.OpenFile(j.JournalPath(), os.O_APPEND|os.O_CREATE|os.O_WRONLY|writeMode, 0644)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// write journal version header if active journal is empty
|
||||
fi, err := j.file.Stat()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if fi.Size() == 0 {
|
||||
var data [tierJournalHdrLen]byte
|
||||
binary.LittleEndian.PutUint16(data[:], tierJournalVersion)
|
||||
_, err = j.file.Write(data[:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (j *tierJournal) OpenRO() (io.ReadCloser, error) {
|
||||
file, err := os.Open(j.ReadOnlyPath())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// read journal version header
|
||||
var data [tierJournalHdrLen]byte
|
||||
if _, err := io.ReadFull(file, data[:]); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
switch binary.LittleEndian.Uint16(data[:]) {
|
||||
case tierJournalVersion:
|
||||
default:
|
||||
return nil, errors.New("unsupported pending deletes journal version")
|
||||
}
|
||||
return file, nil
|
||||
}
|
||||
135
cmd/tier-journal_gen.go
Normal file
135
cmd/tier-journal_gen.go
Normal file
@@ -0,0 +1,135 @@
|
||||
package cmd
|
||||
|
||||
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
|
||||
|
||||
import (
|
||||
"github.com/tinylib/msgp/msgp"
|
||||
)
|
||||
|
||||
// DecodeMsg implements msgp.Decodable
|
||||
func (z *jentry) DecodeMsg(dc *msgp.Reader) (err error) {
|
||||
var field []byte
|
||||
_ = field
|
||||
var zb0001 uint32
|
||||
zb0001, err = dc.ReadMapHeader()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
for zb0001 > 0 {
|
||||
zb0001--
|
||||
field, err = dc.ReadMapKeyPtr()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
switch msgp.UnsafeString(field) {
|
||||
case "obj":
|
||||
z.ObjName, err = dc.ReadString()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ObjName")
|
||||
return
|
||||
}
|
||||
case "tier":
|
||||
z.TierName, err = dc.ReadString()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "TierName")
|
||||
return
|
||||
}
|
||||
default:
|
||||
err = dc.Skip()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// EncodeMsg implements msgp.Encodable
|
||||
func (z jentry) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
// map header, size 2
|
||||
// write "obj"
|
||||
err = en.Append(0x82, 0xa3, 0x6f, 0x62, 0x6a)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteString(z.ObjName)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ObjName")
|
||||
return
|
||||
}
|
||||
// write "tier"
|
||||
err = en.Append(0xa4, 0x74, 0x69, 0x65, 0x72)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteString(z.TierName)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "TierName")
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// MarshalMsg implements msgp.Marshaler
|
||||
func (z jentry) MarshalMsg(b []byte) (o []byte, err error) {
|
||||
o = msgp.Require(b, z.Msgsize())
|
||||
// map header, size 2
|
||||
// string "obj"
|
||||
o = append(o, 0x82, 0xa3, 0x6f, 0x62, 0x6a)
|
||||
o = msgp.AppendString(o, z.ObjName)
|
||||
// string "tier"
|
||||
o = append(o, 0xa4, 0x74, 0x69, 0x65, 0x72)
|
||||
o = msgp.AppendString(o, z.TierName)
|
||||
return
|
||||
}
|
||||
|
||||
// UnmarshalMsg implements msgp.Unmarshaler
|
||||
func (z *jentry) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
||||
var field []byte
|
||||
_ = field
|
||||
var zb0001 uint32
|
||||
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
for zb0001 > 0 {
|
||||
zb0001--
|
||||
field, bts, err = msgp.ReadMapKeyZC(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
switch msgp.UnsafeString(field) {
|
||||
case "obj":
|
||||
z.ObjName, bts, err = msgp.ReadStringBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ObjName")
|
||||
return
|
||||
}
|
||||
case "tier":
|
||||
z.TierName, bts, err = msgp.ReadStringBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "TierName")
|
||||
return
|
||||
}
|
||||
default:
|
||||
bts, err = msgp.Skip(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
o = bts
|
||||
return
|
||||
}
|
||||
|
||||
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
|
||||
func (z jentry) Msgsize() (s int) {
|
||||
s = 1 + 4 + msgp.StringPrefixSize + len(z.ObjName) + 5 + msgp.StringPrefixSize + len(z.TierName)
|
||||
return
|
||||
}
|
||||
123
cmd/tier-journal_gen_test.go
Normal file
123
cmd/tier-journal_gen_test.go
Normal file
@@ -0,0 +1,123 @@
|
||||
package cmd
|
||||
|
||||
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
|
||||
"github.com/tinylib/msgp/msgp"
|
||||
)
|
||||
|
||||
func TestMarshalUnmarshaljentry(t *testing.T) {
|
||||
v := jentry{}
|
||||
bts, err := v.MarshalMsg(nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
left, err := v.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(left) > 0 {
|
||||
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
|
||||
}
|
||||
|
||||
left, err = msgp.Skip(bts)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(left) > 0 {
|
||||
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkMarshalMsgjentry(b *testing.B) {
|
||||
v := jentry{}
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
v.MarshalMsg(nil)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkAppendMsgjentry(b *testing.B) {
|
||||
v := jentry{}
|
||||
bts := make([]byte, 0, v.Msgsize())
|
||||
bts, _ = v.MarshalMsg(bts[0:0])
|
||||
b.SetBytes(int64(len(bts)))
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
bts, _ = v.MarshalMsg(bts[0:0])
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkUnmarshaljentry(b *testing.B) {
|
||||
v := jentry{}
|
||||
bts, _ := v.MarshalMsg(nil)
|
||||
b.ReportAllocs()
|
||||
b.SetBytes(int64(len(bts)))
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, err := v.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestEncodeDecodejentry(t *testing.T) {
|
||||
v := jentry{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
|
||||
m := v.Msgsize()
|
||||
if buf.Len() > m {
|
||||
t.Log("WARNING: TestEncodeDecodejentry Msgsize() is inaccurate")
|
||||
}
|
||||
|
||||
vn := jentry{}
|
||||
err := msgp.Decode(&buf, &vn)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
buf.Reset()
|
||||
msgp.Encode(&buf, &v)
|
||||
err = msgp.NewReader(&buf).Skip()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkEncodejentry(b *testing.B) {
|
||||
v := jentry{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
b.SetBytes(int64(buf.Len()))
|
||||
en := msgp.NewWriter(msgp.Nowhere)
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
v.EncodeMsg(en)
|
||||
}
|
||||
en.Flush()
|
||||
}
|
||||
|
||||
func BenchmarkDecodejentry(b *testing.B) {
|
||||
v := jentry{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
b.SetBytes(int64(buf.Len()))
|
||||
rd := msgp.NewEndlessReader(buf.Bytes(), b)
|
||||
dc := msgp.NewReader(rd)
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
err := v.DecodeMsg(dc)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
156
cmd/tier-sweeper.go
Normal file
156
cmd/tier-sweeper.go
Normal file
@@ -0,0 +1,156 @@
|
||||
// Copyright (c) 2015-2021 MinIO, Inc.
|
||||
//
|
||||
// This file is part of MinIO Object Storage stack
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
xhttp "github.com/minio/minio/cmd/http"
|
||||
"github.com/minio/minio/pkg/bucket/lifecycle"
|
||||
)
|
||||
|
||||
// objSweeper determines if a transitioned object needs to be removed from the remote tier.
|
||||
// A typical usage would be like,
|
||||
// os := newObjSweeper(bucket, object)
|
||||
// // Perform a ObjectLayer.GetObjectInfo to fetch object version information
|
||||
// goiOpts := os.GetOpts()
|
||||
// gerr := objAPI.GetObjectInfo(ctx, bucket, object, goiOpts)
|
||||
// if gerr == nil {
|
||||
// os.SetTransitionState(goi)
|
||||
// }
|
||||
//
|
||||
// // After the overwriting object operation is complete.
|
||||
// if jentry, ok := os.ShouldRemoveRemoteObject(); ok {
|
||||
// err := globalTierJournal.AddEntry(jentry)
|
||||
// logger.LogIf(ctx, err)
|
||||
// }
|
||||
type objSweeper struct {
|
||||
Object string
|
||||
Bucket string
|
||||
ReqVersion string // version ID set by application, applies only to DeleteObject and DeleteObjects APIs
|
||||
Versioned bool
|
||||
Suspended bool
|
||||
TransitionStatus string
|
||||
TransitionTier string
|
||||
RemoteObject string
|
||||
}
|
||||
|
||||
// newObjSweeper returns an objSweeper for a given bucket and object.
|
||||
// It initializes the versioning information using bucket name.
|
||||
func newObjSweeper(bucket, object string) *objSweeper {
|
||||
versioned := globalBucketVersioningSys.Enabled(bucket)
|
||||
suspended := globalBucketVersioningSys.Suspended(bucket)
|
||||
return &objSweeper{
|
||||
Object: object,
|
||||
Bucket: bucket,
|
||||
Versioned: versioned,
|
||||
Suspended: suspended,
|
||||
}
|
||||
}
|
||||
|
||||
// versionIDer interface is used to fetch object versionIDer from disparate sources
|
||||
// like http.Request and ObjectToDelete.
|
||||
type versionIDer interface {
|
||||
GetVersionID() string
|
||||
}
|
||||
|
||||
// multiDelete is a type alias for ObjectToDelete to implement versionID
|
||||
// interface
|
||||
type multiDelete ObjectToDelete
|
||||
|
||||
// GetVersionID returns object version of an object to be deleted via
|
||||
// multi-delete API.
|
||||
func (md multiDelete) GetVersionID() string {
|
||||
return md.VersionID
|
||||
}
|
||||
|
||||
// singleDelete is a type alias for http.Request to implement versionID
|
||||
// interface
|
||||
type singleDelete http.Request
|
||||
|
||||
// GetVersionID returns object version of an object to be deleted via (simple)
|
||||
// delete API. Note only when the versionID is set explicitly by the application
|
||||
// will we return a non-empty versionID.
|
||||
func (sd singleDelete) GetVersionID() string {
|
||||
return strings.TrimSpace(sd.URL.Query().Get(xhttp.VersionID))
|
||||
}
|
||||
|
||||
// WithVersion sets the version ID from v
|
||||
func (os *objSweeper) WithVersion(v versionIDer) *objSweeper {
|
||||
os.ReqVersion = v.GetVersionID()
|
||||
return os
|
||||
}
|
||||
|
||||
// GetOpts returns ObjectOptions to fetch the object version that may be
|
||||
// overwritten or deleted depending on bucket versioning status.
|
||||
func (os *objSweeper) GetOpts() ObjectOptions {
|
||||
opts := ObjectOptions{
|
||||
VersionID: os.ReqVersion,
|
||||
Versioned: os.Versioned,
|
||||
VersionSuspended: os.Suspended,
|
||||
}
|
||||
if os.Suspended && os.ReqVersion == "" {
|
||||
opts.VersionID = nullVersionID
|
||||
}
|
||||
return opts
|
||||
}
|
||||
|
||||
// SetTransitionState sets ILM transition related information from given info.
|
||||
func (os *objSweeper) SetTransitionState(info ObjectInfo) {
|
||||
os.TransitionTier = info.TransitionTier
|
||||
os.TransitionStatus = info.TransitionStatus
|
||||
os.RemoteObject = info.transitionedObjName
|
||||
}
|
||||
|
||||
// shouldRemoveRemoteObject determines if a transitioned object should be
|
||||
// removed from remote tier. If remote object is to be deleted, returns the
|
||||
// corresponding tier deletion journal entry and true. Otherwise returns empty
|
||||
// jentry value and false.
|
||||
func (os *objSweeper) shouldRemoveRemoteObject() (jentry, bool) {
|
||||
if os.TransitionStatus != lifecycle.TransitionComplete {
|
||||
return jentry{}, false
|
||||
}
|
||||
|
||||
// 1. If bucket versioning is disabled, remove the remote object.
|
||||
// 2. If bucket versioning is suspended and
|
||||
// a. version id is specified, remove its remote object.
|
||||
// b. version id is not specified, remove null version's remote object if it exists.
|
||||
// 3. If bucket versioning is enabled and
|
||||
// a. version id is specified, remove its remote object.
|
||||
// b. version id is not specified, nothing to be done (a delete marker is added).
|
||||
delTier := false
|
||||
switch {
|
||||
case !os.Versioned, os.Suspended: // 1, 2.a, 2.b
|
||||
delTier = true
|
||||
case os.Versioned && os.ReqVersion != "": // 3.a
|
||||
delTier = true
|
||||
}
|
||||
if delTier {
|
||||
return jentry{ObjName: os.RemoteObject, TierName: os.TransitionTier}, true
|
||||
}
|
||||
return jentry{}, false
|
||||
}
|
||||
|
||||
// Sweep removes the transitioned object if it's no longer referred to.
|
||||
func (os *objSweeper) Sweep() error {
|
||||
if je, ok := os.shouldRemoveRemoteObject(); ok {
|
||||
return globalTierJournal.AddEntry(je)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
377
cmd/tier.go
Normal file
377
cmd/tier.go
Normal file
@@ -0,0 +1,377 @@
|
||||
// Copyright (c) 2015-2021 MinIO, Inc.
|
||||
//
|
||||
// This file is part of MinIO Object Storage stack
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"path"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/minio/minio/pkg/hash"
|
||||
"github.com/minio/minio/pkg/madmin"
|
||||
)
|
||||
|
||||
//go:generate msgp -file $GOFILE
|
||||
|
||||
var (
|
||||
errTierInsufficientCreds = errors.New("insufficient tier credentials supplied")
|
||||
errTierBackendInUse = errors.New("remote tier backend already in use")
|
||||
errTierTypeUnsupported = errors.New("unsupported tier type")
|
||||
)
|
||||
|
||||
const (
|
||||
tierConfigFile = "tier-config.bin"
|
||||
tierConfigFormat = 1
|
||||
tierConfigVersion = 1
|
||||
)
|
||||
|
||||
// tierConfigPath refers to remote tier config object name
|
||||
var tierConfigPath string = path.Join(minioConfigPrefix, tierConfigFile)
|
||||
|
||||
// TierConfigMgr holds the collection of remote tiers configured in this deployment.
|
||||
type TierConfigMgr struct {
|
||||
sync.RWMutex `msg:"-"`
|
||||
drivercache map[string]WarmBackend `msg:"-"`
|
||||
|
||||
Tiers map[string]madmin.TierConfig `json:"tiers"`
|
||||
}
|
||||
|
||||
// IsTierValid returns true if there exists a remote tier by name tierName,
|
||||
// otherwise returns false.
|
||||
func (config *TierConfigMgr) IsTierValid(tierName string) bool {
|
||||
config.RLock()
|
||||
defer config.RUnlock()
|
||||
_, valid := config.isTierNameInUse(tierName)
|
||||
return valid
|
||||
}
|
||||
|
||||
// isTierNameInUse returns tier type and true if there exists a remote tier by
|
||||
// name tierName, otherwise returns madmin.Unsupported and false. N B this
|
||||
// function is meant for internal use, where the caller is expected to take
|
||||
// appropriate locks.
|
||||
func (config *TierConfigMgr) isTierNameInUse(tierName string) (madmin.TierType, bool) {
|
||||
if t, ok := config.Tiers[tierName]; ok {
|
||||
return t.Type, true
|
||||
}
|
||||
return madmin.Unsupported, false
|
||||
}
|
||||
|
||||
// Add adds tier to config if it passes all validations.
|
||||
func (config *TierConfigMgr) Add(ctx context.Context, tier madmin.TierConfig) error {
|
||||
config.Lock()
|
||||
defer config.Unlock()
|
||||
|
||||
// check if tier name is in all caps
|
||||
|
||||
tierName := tier.Name
|
||||
if tierName != strings.ToUpper(tierName) {
|
||||
return errTierNameNotUppercase
|
||||
}
|
||||
|
||||
// check if tier name already in use
|
||||
if _, exists := config.isTierNameInUse(tierName); exists {
|
||||
return errTierAlreadyExists
|
||||
}
|
||||
|
||||
d, err := newWarmBackend(ctx, tier)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Check if warmbackend is in use by other MinIO tenants
|
||||
inUse, err := d.InUse(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if inUse {
|
||||
return errTierBackendInUse
|
||||
}
|
||||
|
||||
config.Tiers[tierName] = tier
|
||||
config.drivercache[tierName] = d
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ListTiers lists remote tiers configured in this deployment.
|
||||
func (config *TierConfigMgr) ListTiers() []madmin.TierConfig {
|
||||
config.RLock()
|
||||
defer config.RUnlock()
|
||||
|
||||
var tierCfgs []madmin.TierConfig
|
||||
for _, tier := range config.Tiers {
|
||||
// This makes a local copy of tier config before
|
||||
// passing a reference to it.
|
||||
tier := tier.Clone()
|
||||
tierCfgs = append(tierCfgs, tier)
|
||||
}
|
||||
return tierCfgs
|
||||
}
|
||||
|
||||
// Edit replaces the credentials of the remote tier specified by tierName with creds.
|
||||
func (config *TierConfigMgr) Edit(ctx context.Context, tierName string, creds madmin.TierCreds) error {
|
||||
config.Lock()
|
||||
defer config.Unlock()
|
||||
|
||||
// check if tier by this name exists
|
||||
tierType, exists := config.isTierNameInUse(tierName)
|
||||
if !exists {
|
||||
return errTierNotFound
|
||||
}
|
||||
|
||||
newCfg := config.Tiers[tierName]
|
||||
switch tierType {
|
||||
case madmin.S3:
|
||||
if creds.AccessKey == "" || creds.SecretKey == "" {
|
||||
return errTierInsufficientCreds
|
||||
}
|
||||
newCfg.S3.AccessKey = creds.AccessKey
|
||||
newCfg.S3.SecretKey = creds.SecretKey
|
||||
|
||||
case madmin.Azure:
|
||||
if creds.AccessKey == "" || creds.SecretKey == "" {
|
||||
return errTierInsufficientCreds
|
||||
}
|
||||
newCfg.Azure.AccountName = creds.AccessKey
|
||||
newCfg.Azure.AccountKey = creds.SecretKey
|
||||
|
||||
case madmin.GCS:
|
||||
if creds.CredsJSON == nil {
|
||||
return errTierInsufficientCreds
|
||||
}
|
||||
newCfg.GCS.Creds = base64.URLEncoding.EncodeToString(creds.CredsJSON)
|
||||
}
|
||||
|
||||
d, err := newWarmBackend(ctx, newCfg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
config.Tiers[tierName] = newCfg
|
||||
config.drivercache[tierName] = d
|
||||
return nil
|
||||
}
|
||||
|
||||
// Bytes returns msgpack encoded config with format and version headers.
|
||||
func (config *TierConfigMgr) Bytes() ([]byte, error) {
|
||||
config.RLock()
|
||||
defer config.RUnlock()
|
||||
data := make([]byte, 4, config.Msgsize()+4)
|
||||
|
||||
// Initialize the header.
|
||||
binary.LittleEndian.PutUint16(data[0:2], tierConfigFormat)
|
||||
binary.LittleEndian.PutUint16(data[2:4], tierConfigVersion)
|
||||
|
||||
// Marshal the tier config
|
||||
return config.MarshalMsg(data)
|
||||
}
|
||||
|
||||
// getDriver returns a warmBackend interface object initialized with remote tier config matching tierName
|
||||
func (config *TierConfigMgr) getDriver(tierName string) (d WarmBackend, err error) {
|
||||
config.Lock()
|
||||
defer config.Unlock()
|
||||
|
||||
var ok bool
|
||||
// Lookup in-memory drivercache
|
||||
d, ok = config.drivercache[tierName]
|
||||
if ok {
|
||||
return d, nil
|
||||
}
|
||||
|
||||
// Initialize driver from tier config matching tierName
|
||||
t, ok := config.Tiers[tierName]
|
||||
if !ok {
|
||||
return nil, errTierNotFound
|
||||
}
|
||||
d, err = newWarmBackend(context.TODO(), t)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
config.drivercache[tierName] = d
|
||||
return d, nil
|
||||
}
|
||||
|
||||
// configReader returns a PutObjReader and ObjectOptions needed to save config
|
||||
// using a PutObject API. PutObjReader encrypts json encoded tier configurations
|
||||
// if KMS is enabled, otherwise simply yields the json encoded bytes as is.
|
||||
// Similarly, ObjectOptions value depends on KMS' status.
|
||||
func (config *TierConfigMgr) configReader() (*PutObjReader, *ObjectOptions, error) {
|
||||
b, err := config.Bytes()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
payloadSize := int64(len(b))
|
||||
br := bytes.NewReader(b)
|
||||
hr, err := hash.NewReader(br, payloadSize, "", "", payloadSize)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if GlobalKMS == nil {
|
||||
return NewPutObjReader(hr), &ObjectOptions{}, nil
|
||||
}
|
||||
|
||||
// Note: Local variables with names ek, oek, etc are named inline with
|
||||
// acronyms defined here -
|
||||
// https://github.com/minio/minio/blob/master/docs/security/README.md#acronyms
|
||||
|
||||
// Encrypt json encoded tier configurations
|
||||
metadata := make(map[string]string)
|
||||
sseS3 := true
|
||||
var extKey [32]byte
|
||||
encBr, oek, err := newEncryptReader(hr, extKey[:], minioMetaBucket, tierConfigPath, metadata, sseS3)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
info := ObjectInfo{
|
||||
Size: payloadSize,
|
||||
}
|
||||
encSize := info.EncryptedSize()
|
||||
encHr, err := hash.NewReader(encBr, encSize, "", "", encSize)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
pReader, err := NewPutObjReader(hr).WithEncryption(encHr, &oek)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
opts := &ObjectOptions{
|
||||
UserDefined: metadata,
|
||||
MTime: UTCNow(),
|
||||
}
|
||||
|
||||
return pReader, opts, nil
|
||||
}
|
||||
|
||||
// Reload updates config by reloading remote tier config from config store.
|
||||
func (config *TierConfigMgr) Reload(ctx context.Context, objAPI ObjectLayer) error {
|
||||
newConfig, err := loadTierConfig(ctx, objAPI)
|
||||
switch err {
|
||||
case nil:
|
||||
break
|
||||
case errConfigNotFound: // nothing to reload
|
||||
return nil
|
||||
default:
|
||||
return err
|
||||
}
|
||||
|
||||
config.Lock()
|
||||
defer config.Unlock()
|
||||
// Reset drivercache built using current config
|
||||
for k := range config.drivercache {
|
||||
delete(config.drivercache, k)
|
||||
}
|
||||
// Remove existing tier configs
|
||||
for k := range config.Tiers {
|
||||
delete(config.Tiers, k)
|
||||
}
|
||||
// Copy over the new tier configs
|
||||
for tier, cfg := range newConfig.Tiers {
|
||||
config.Tiers[tier] = cfg
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Save saves tier configuration onto objAPI
|
||||
func (config *TierConfigMgr) Save(ctx context.Context, objAPI ObjectLayer) error {
|
||||
if objAPI == nil {
|
||||
return errServerNotInitialized
|
||||
}
|
||||
|
||||
pr, opts, err := globalTierConfigMgr.configReader()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = objAPI.PutObject(ctx, minioMetaBucket, tierConfigPath, pr, *opts)
|
||||
return err
|
||||
}
|
||||
|
||||
// NewTierConfigMgr - creates new tier configuration manager,
|
||||
func NewTierConfigMgr() *TierConfigMgr {
|
||||
return &TierConfigMgr{
|
||||
drivercache: make(map[string]WarmBackend),
|
||||
Tiers: make(map[string]madmin.TierConfig),
|
||||
}
|
||||
}
|
||||
|
||||
// loadTierConfig loads remote tier configuration from objAPI.
|
||||
func loadTierConfig(ctx context.Context, objAPI ObjectLayer) (*TierConfigMgr, error) {
|
||||
if objAPI == nil {
|
||||
return nil, errServerNotInitialized
|
||||
}
|
||||
|
||||
data, err := readConfig(ctx, objAPI, tierConfigPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(data) <= 4 {
|
||||
return nil, fmt.Errorf("tierConfigInit: no data")
|
||||
}
|
||||
|
||||
// Read header
|
||||
switch binary.LittleEndian.Uint16(data[0:2]) {
|
||||
case tierConfigFormat:
|
||||
default:
|
||||
return nil, fmt.Errorf("tierConfigInit: unknown format: %d", binary.LittleEndian.Uint16(data[0:2]))
|
||||
}
|
||||
switch binary.LittleEndian.Uint16(data[2:4]) {
|
||||
case tierConfigVersion:
|
||||
default:
|
||||
return nil, fmt.Errorf("tierConfigInit: unknown version: %d", binary.LittleEndian.Uint16(data[2:4]))
|
||||
}
|
||||
|
||||
cfg := NewTierConfigMgr()
|
||||
_, decErr := cfg.UnmarshalMsg(data[4:])
|
||||
if decErr != nil {
|
||||
return nil, decErr
|
||||
}
|
||||
return cfg, nil
|
||||
|
||||
}
|
||||
|
||||
// Reset clears remote tier configured and clears tier driver cache.
|
||||
func (config *TierConfigMgr) Reset() {
|
||||
config.Lock()
|
||||
for k := range config.drivercache {
|
||||
delete(config.drivercache, k)
|
||||
}
|
||||
for k := range config.Tiers {
|
||||
delete(config.Tiers, k)
|
||||
}
|
||||
config.Unlock()
|
||||
|
||||
}
|
||||
|
||||
// Init initializes tier configuration reading from objAPI
|
||||
func (config *TierConfigMgr) Init(ctx context.Context, objAPI ObjectLayer) error {
|
||||
// In gateway mode, we don't support ILM tier configuration.
|
||||
if globalIsGateway {
|
||||
return nil
|
||||
}
|
||||
|
||||
return config.Reload(ctx, objAPI)
|
||||
}
|
||||
185
cmd/tier_gen.go
Normal file
185
cmd/tier_gen.go
Normal file
@@ -0,0 +1,185 @@
|
||||
package cmd
|
||||
|
||||
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
|
||||
|
||||
import (
|
||||
"github.com/minio/minio/pkg/madmin"
|
||||
"github.com/tinylib/msgp/msgp"
|
||||
)
|
||||
|
||||
// DecodeMsg implements msgp.Decodable
|
||||
func (z *TierConfigMgr) DecodeMsg(dc *msgp.Reader) (err error) {
|
||||
var field []byte
|
||||
_ = field
|
||||
var zb0001 uint32
|
||||
zb0001, err = dc.ReadMapHeader()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
for zb0001 > 0 {
|
||||
zb0001--
|
||||
field, err = dc.ReadMapKeyPtr()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
switch msgp.UnsafeString(field) {
|
||||
case "Tiers":
|
||||
var zb0002 uint32
|
||||
zb0002, err = dc.ReadMapHeader()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Tiers")
|
||||
return
|
||||
}
|
||||
if z.Tiers == nil {
|
||||
z.Tiers = make(map[string]madmin.TierConfig, zb0002)
|
||||
} else if len(z.Tiers) > 0 {
|
||||
for key := range z.Tiers {
|
||||
delete(z.Tiers, key)
|
||||
}
|
||||
}
|
||||
for zb0002 > 0 {
|
||||
zb0002--
|
||||
var za0001 string
|
||||
var za0002 madmin.TierConfig
|
||||
za0001, err = dc.ReadString()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Tiers")
|
||||
return
|
||||
}
|
||||
err = za0002.DecodeMsg(dc)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Tiers", za0001)
|
||||
return
|
||||
}
|
||||
z.Tiers[za0001] = za0002
|
||||
}
|
||||
default:
|
||||
err = dc.Skip()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// EncodeMsg implements msgp.Encodable
|
||||
func (z *TierConfigMgr) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
// map header, size 1
|
||||
// write "Tiers"
|
||||
err = en.Append(0x81, 0xa5, 0x54, 0x69, 0x65, 0x72, 0x73)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteMapHeader(uint32(len(z.Tiers)))
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Tiers")
|
||||
return
|
||||
}
|
||||
for za0001, za0002 := range z.Tiers {
|
||||
err = en.WriteString(za0001)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Tiers")
|
||||
return
|
||||
}
|
||||
err = za0002.EncodeMsg(en)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Tiers", za0001)
|
||||
return
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// MarshalMsg implements msgp.Marshaler
|
||||
func (z *TierConfigMgr) MarshalMsg(b []byte) (o []byte, err error) {
|
||||
o = msgp.Require(b, z.Msgsize())
|
||||
// map header, size 1
|
||||
// string "Tiers"
|
||||
o = append(o, 0x81, 0xa5, 0x54, 0x69, 0x65, 0x72, 0x73)
|
||||
o = msgp.AppendMapHeader(o, uint32(len(z.Tiers)))
|
||||
for za0001, za0002 := range z.Tiers {
|
||||
o = msgp.AppendString(o, za0001)
|
||||
o, err = za0002.MarshalMsg(o)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Tiers", za0001)
|
||||
return
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// UnmarshalMsg implements msgp.Unmarshaler
|
||||
func (z *TierConfigMgr) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
||||
var field []byte
|
||||
_ = field
|
||||
var zb0001 uint32
|
||||
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
for zb0001 > 0 {
|
||||
zb0001--
|
||||
field, bts, err = msgp.ReadMapKeyZC(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
switch msgp.UnsafeString(field) {
|
||||
case "Tiers":
|
||||
var zb0002 uint32
|
||||
zb0002, bts, err = msgp.ReadMapHeaderBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Tiers")
|
||||
return
|
||||
}
|
||||
if z.Tiers == nil {
|
||||
z.Tiers = make(map[string]madmin.TierConfig, zb0002)
|
||||
} else if len(z.Tiers) > 0 {
|
||||
for key := range z.Tiers {
|
||||
delete(z.Tiers, key)
|
||||
}
|
||||
}
|
||||
for zb0002 > 0 {
|
||||
var za0001 string
|
||||
var za0002 madmin.TierConfig
|
||||
zb0002--
|
||||
za0001, bts, err = msgp.ReadStringBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Tiers")
|
||||
return
|
||||
}
|
||||
bts, err = za0002.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Tiers", za0001)
|
||||
return
|
||||
}
|
||||
z.Tiers[za0001] = za0002
|
||||
}
|
||||
default:
|
||||
bts, err = msgp.Skip(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
o = bts
|
||||
return
|
||||
}
|
||||
|
||||
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
|
||||
func (z *TierConfigMgr) Msgsize() (s int) {
|
||||
s = 1 + 6 + msgp.MapHeaderSize
|
||||
if z.Tiers != nil {
|
||||
for za0001, za0002 := range z.Tiers {
|
||||
_ = za0002
|
||||
s += msgp.StringPrefixSize + len(za0001) + za0002.Msgsize()
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
123
cmd/tier_gen_test.go
Normal file
123
cmd/tier_gen_test.go
Normal file
@@ -0,0 +1,123 @@
|
||||
package cmd
|
||||
|
||||
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
|
||||
"github.com/tinylib/msgp/msgp"
|
||||
)
|
||||
|
||||
func TestMarshalUnmarshalTierConfigMgr(t *testing.T) {
|
||||
v := TierConfigMgr{}
|
||||
bts, err := v.MarshalMsg(nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
left, err := v.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(left) > 0 {
|
||||
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
|
||||
}
|
||||
|
||||
left, err = msgp.Skip(bts)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(left) > 0 {
|
||||
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkMarshalMsgTierConfigMgr(b *testing.B) {
|
||||
v := TierConfigMgr{}
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
v.MarshalMsg(nil)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkAppendMsgTierConfigMgr(b *testing.B) {
|
||||
v := TierConfigMgr{}
|
||||
bts := make([]byte, 0, v.Msgsize())
|
||||
bts, _ = v.MarshalMsg(bts[0:0])
|
||||
b.SetBytes(int64(len(bts)))
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
bts, _ = v.MarshalMsg(bts[0:0])
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkUnmarshalTierConfigMgr(b *testing.B) {
|
||||
v := TierConfigMgr{}
|
||||
bts, _ := v.MarshalMsg(nil)
|
||||
b.ReportAllocs()
|
||||
b.SetBytes(int64(len(bts)))
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, err := v.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestEncodeDecodeTierConfigMgr(t *testing.T) {
|
||||
v := TierConfigMgr{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
|
||||
m := v.Msgsize()
|
||||
if buf.Len() > m {
|
||||
t.Log("WARNING: TestEncodeDecodeTierConfigMgr Msgsize() is inaccurate")
|
||||
}
|
||||
|
||||
vn := TierConfigMgr{}
|
||||
err := msgp.Decode(&buf, &vn)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
buf.Reset()
|
||||
msgp.Encode(&buf, &v)
|
||||
err = msgp.NewReader(&buf).Skip()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkEncodeTierConfigMgr(b *testing.B) {
|
||||
v := TierConfigMgr{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
b.SetBytes(int64(buf.Len()))
|
||||
en := msgp.NewWriter(msgp.Nowhere)
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
v.EncodeMsg(en)
|
||||
}
|
||||
en.Flush()
|
||||
}
|
||||
|
||||
func BenchmarkDecodeTierConfigMgr(b *testing.B) {
|
||||
v := TierConfigMgr{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
b.SetBytes(int64(buf.Len()))
|
||||
rd := msgp.NewEndlessReader(buf.Bytes(), b)
|
||||
dc := msgp.NewReader(rd)
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
err := v.DecodeMsg(dc)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
188
cmd/warm-backend-azure.go
Normal file
188
cmd/warm-backend-azure.go
Normal file
@@ -0,0 +1,188 @@
|
||||
// Copyright (c) 2015-2021 MinIO, Inc.
|
||||
//
|
||||
// This file is part of MinIO Object Storage stack
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
"github.com/Azure/azure-storage-blob-go/azblob"
|
||||
"github.com/minio/minio/pkg/madmin"
|
||||
)
|
||||
|
||||
type warmBackendAzure struct {
|
||||
serviceURL azblob.ServiceURL
|
||||
Bucket string
|
||||
Prefix string
|
||||
StorageClass string
|
||||
}
|
||||
|
||||
func (az *warmBackendAzure) getDest(object string) string {
|
||||
destObj := object
|
||||
if az.Prefix != "" {
|
||||
destObj = fmt.Sprintf("%s/%s", az.Prefix, object)
|
||||
}
|
||||
return destObj
|
||||
}
|
||||
func (az *warmBackendAzure) tier() azblob.AccessTierType {
|
||||
for _, t := range azblob.PossibleAccessTierTypeValues() {
|
||||
if strings.ToLower(az.StorageClass) == strings.ToLower(string(t)) {
|
||||
return t
|
||||
}
|
||||
}
|
||||
return azblob.AccessTierType("")
|
||||
}
|
||||
func (az *warmBackendAzure) Put(ctx context.Context, object string, r io.Reader, length int64) error {
|
||||
blobURL := az.serviceURL.NewContainerURL(az.Bucket).NewBlockBlobURL(az.getDest(object))
|
||||
// set tier if specified -
|
||||
if az.StorageClass != "" {
|
||||
if _, err := blobURL.SetTier(ctx, az.tier(), azblob.LeaseAccessConditions{}); err != nil {
|
||||
return azureToObjectError(err, az.Bucket, object)
|
||||
}
|
||||
}
|
||||
_, err := azblob.UploadStreamToBlockBlob(ctx, r, blobURL, azblob.UploadStreamToBlockBlobOptions{})
|
||||
return azureToObjectError(err, az.Bucket, object)
|
||||
}
|
||||
|
||||
func (az *warmBackendAzure) Get(ctx context.Context, object string, opts WarmBackendGetOpts) (r io.ReadCloser, err error) {
|
||||
if opts.startOffset < 0 {
|
||||
return nil, InvalidRange{}
|
||||
}
|
||||
blobURL := az.serviceURL.NewContainerURL(az.Bucket).NewBlobURL(az.getDest(object))
|
||||
blob, err := blobURL.Download(ctx, opts.startOffset, opts.length, azblob.BlobAccessConditions{}, false)
|
||||
if err != nil {
|
||||
return nil, azureToObjectError(err, az.Bucket, object)
|
||||
}
|
||||
|
||||
rc := blob.Body(azblob.RetryReaderOptions{})
|
||||
return rc, nil
|
||||
}
|
||||
|
||||
func (az *warmBackendAzure) Remove(ctx context.Context, object string) error {
|
||||
blob := az.serviceURL.NewContainerURL(az.Bucket).NewBlobURL(az.getDest(object))
|
||||
_, err := blob.Delete(ctx, azblob.DeleteSnapshotsOptionNone, azblob.BlobAccessConditions{})
|
||||
return azureToObjectError(err, az.Bucket, object)
|
||||
}
|
||||
|
||||
func (az *warmBackendAzure) InUse(ctx context.Context) (bool, error) {
|
||||
containerURL := az.serviceURL.NewContainerURL(az.Bucket)
|
||||
resp, err := containerURL.ListBlobsHierarchySegment(ctx, azblob.Marker{}, "/", azblob.ListBlobsSegmentOptions{
|
||||
Prefix: az.Prefix,
|
||||
MaxResults: int32(1),
|
||||
})
|
||||
if err != nil {
|
||||
return false, azureToObjectError(err, az.Bucket, az.Prefix)
|
||||
}
|
||||
if len(resp.Segment.BlobPrefixes) > 0 || len(resp.Segment.BlobItems) > 0 {
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func newWarmBackendAzure(conf madmin.TierAzure) (*warmBackendAzure, error) {
|
||||
credential, err := azblob.NewSharedKeyCredential(conf.AccountName, conf.AccountKey)
|
||||
if err != nil {
|
||||
if _, ok := err.(base64.CorruptInputError); ok {
|
||||
return nil, errors.New("invalid Azure credentials")
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
p := azblob.NewPipeline(credential, azblob.PipelineOptions{})
|
||||
u, err := url.Parse(fmt.Sprintf("https://%s.blob.core.windows.net", conf.AccountName))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
serviceURL := azblob.NewServiceURL(*u, p)
|
||||
return &warmBackendAzure{
|
||||
serviceURL: serviceURL,
|
||||
Bucket: conf.Bucket,
|
||||
Prefix: strings.TrimSuffix(conf.Prefix, slashSeparator),
|
||||
StorageClass: conf.StorageClass,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Convert azure errors to minio object layer errors.
|
||||
func azureToObjectError(err error, params ...string) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
bucket := ""
|
||||
object := ""
|
||||
if len(params) >= 1 {
|
||||
bucket = params[0]
|
||||
}
|
||||
if len(params) == 2 {
|
||||
object = params[1]
|
||||
}
|
||||
|
||||
azureErr, ok := err.(azblob.StorageError)
|
||||
if !ok {
|
||||
// We don't interpret non Azure errors. As azure errors will
|
||||
// have StatusCode to help to convert to object errors.
|
||||
return err
|
||||
}
|
||||
|
||||
serviceCode := string(azureErr.ServiceCode())
|
||||
statusCode := azureErr.Response().StatusCode
|
||||
|
||||
return azureCodesToObjectError(err, serviceCode, statusCode, bucket, object)
|
||||
}
|
||||
|
||||
func azureCodesToObjectError(err error, serviceCode string, statusCode int, bucket string, object string) error {
|
||||
switch serviceCode {
|
||||
case "ContainerNotFound", "ContainerBeingDeleted":
|
||||
err = BucketNotFound{Bucket: bucket}
|
||||
case "ContainerAlreadyExists":
|
||||
err = BucketExists{Bucket: bucket}
|
||||
case "InvalidResourceName":
|
||||
err = BucketNameInvalid{Bucket: bucket}
|
||||
case "RequestBodyTooLarge":
|
||||
err = PartTooBig{}
|
||||
case "InvalidMetadata":
|
||||
err = UnsupportedMetadata{}
|
||||
case "BlobAccessTierNotSupportedForAccountType":
|
||||
err = NotImplemented{}
|
||||
case "OutOfRangeInput":
|
||||
err = ObjectNameInvalid{
|
||||
Bucket: bucket,
|
||||
Object: object,
|
||||
}
|
||||
default:
|
||||
switch statusCode {
|
||||
case http.StatusNotFound:
|
||||
if object != "" {
|
||||
err = ObjectNotFound{
|
||||
Bucket: bucket,
|
||||
Object: object,
|
||||
}
|
||||
} else {
|
||||
err = BucketNotFound{Bucket: bucket}
|
||||
}
|
||||
case http.StatusBadRequest:
|
||||
err = BucketNameInvalid{Bucket: bucket}
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
203
cmd/warm-backend-gcs.go
Normal file
203
cmd/warm-backend-gcs.go
Normal file
@@ -0,0 +1,203 @@
|
||||
// Copyright (c) 2015-2021 MinIO, Inc.
|
||||
//
|
||||
// This file is part of MinIO Object Storage stack
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"cloud.google.com/go/storage"
|
||||
"github.com/minio/minio/pkg/madmin"
|
||||
"google.golang.org/api/googleapi"
|
||||
"google.golang.org/api/iterator"
|
||||
"google.golang.org/api/option"
|
||||
)
|
||||
|
||||
type warmBackendGCS struct {
|
||||
client *storage.Client
|
||||
Bucket string
|
||||
Prefix string
|
||||
StorageClass string
|
||||
}
|
||||
|
||||
func (gcs *warmBackendGCS) getDest(object string) string {
|
||||
destObj := object
|
||||
if gcs.Prefix != "" {
|
||||
destObj = fmt.Sprintf("%s/%s", gcs.Prefix, object)
|
||||
}
|
||||
return destObj
|
||||
}
|
||||
func (gcs *warmBackendGCS) Put(ctx context.Context, key string, data io.Reader, length int64) error {
|
||||
object := gcs.client.Bucket(gcs.Bucket).Object(gcs.getDest(key))
|
||||
//TODO: set storage class
|
||||
w := object.NewWriter(ctx)
|
||||
if gcs.StorageClass != "" {
|
||||
w.ObjectAttrs.StorageClass = gcs.StorageClass
|
||||
}
|
||||
if _, err := io.Copy(w, data); err != nil {
|
||||
return gcsToObjectError(err, gcs.Bucket, key)
|
||||
}
|
||||
|
||||
return w.Close()
|
||||
}
|
||||
|
||||
func (gcs *warmBackendGCS) Get(ctx context.Context, key string, opts WarmBackendGetOpts) (r io.ReadCloser, err error) {
|
||||
// GCS storage decompresses a gzipped object by default and returns the data.
|
||||
// Refer to https://cloud.google.com/storage/docs/transcoding#decompressive_transcoding
|
||||
// Need to set `Accept-Encoding` header to `gzip` when issuing a GetObject call, to be able
|
||||
// to download the object in compressed state.
|
||||
// Calling ReadCompressed with true accomplishes that.
|
||||
object := gcs.client.Bucket(gcs.Bucket).Object(gcs.getDest(key)).ReadCompressed(true)
|
||||
|
||||
r, err = object.NewRangeReader(ctx, opts.startOffset, opts.length)
|
||||
if err != nil {
|
||||
return nil, gcsToObjectError(err, gcs.Bucket, key)
|
||||
|
||||
}
|
||||
return r, nil
|
||||
}
|
||||
|
||||
func (gcs *warmBackendGCS) Remove(ctx context.Context, key string) error {
|
||||
err := gcs.client.Bucket(gcs.Bucket).Object(gcs.getDest(key)).Delete(ctx)
|
||||
return gcsToObjectError(err, gcs.Bucket, key)
|
||||
}
|
||||
|
||||
func (gcs *warmBackendGCS) InUse(ctx context.Context) (bool, error) {
|
||||
it := gcs.client.Bucket(gcs.Bucket).Objects(ctx, &storage.Query{
|
||||
Delimiter: "/",
|
||||
Prefix: gcs.Prefix,
|
||||
Versions: false,
|
||||
})
|
||||
pager := iterator.NewPager(it, 1, "")
|
||||
gcsObjects := make([]*storage.ObjectAttrs, 0)
|
||||
_, err := pager.NextPage(&gcsObjects)
|
||||
if err != nil {
|
||||
return false, gcsToObjectError(err, gcs.Bucket, gcs.Prefix)
|
||||
}
|
||||
if len(gcsObjects) > 0 {
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func newWarmBackendGCS(conf madmin.TierGCS) (*warmBackendGCS, error) {
|
||||
credsJSON, err := conf.GetCredentialJSON()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
client, err := storage.NewClient(context.Background(), option.WithCredentialsJSON(credsJSON), option.WithScopes(storage.ScopeReadWrite))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &warmBackendGCS{client, conf.Bucket, conf.Prefix, conf.StorageClass}, nil
|
||||
}
|
||||
|
||||
// Convert GCS errors to minio object layer errors.
|
||||
func gcsToObjectError(err error, params ...string) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
bucket := ""
|
||||
object := ""
|
||||
uploadID := ""
|
||||
if len(params) >= 1 {
|
||||
bucket = params[0]
|
||||
}
|
||||
if len(params) == 2 {
|
||||
object = params[1]
|
||||
}
|
||||
if len(params) == 3 {
|
||||
uploadID = params[2]
|
||||
}
|
||||
|
||||
// in some cases just a plain error is being returned
|
||||
switch err.Error() {
|
||||
case "storage: bucket doesn't exist":
|
||||
err = BucketNotFound{
|
||||
Bucket: bucket,
|
||||
}
|
||||
return err
|
||||
case "storage: object doesn't exist":
|
||||
if uploadID != "" {
|
||||
err = InvalidUploadID{
|
||||
UploadID: uploadID,
|
||||
}
|
||||
} else {
|
||||
err = ObjectNotFound{
|
||||
Bucket: bucket,
|
||||
Object: object,
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
googleAPIErr, ok := err.(*googleapi.Error)
|
||||
if !ok {
|
||||
// We don't interpret non MinIO errors. As minio errors will
|
||||
// have StatusCode to help to convert to object errors.
|
||||
return err
|
||||
}
|
||||
|
||||
if len(googleAPIErr.Errors) == 0 {
|
||||
return err
|
||||
}
|
||||
|
||||
reason := googleAPIErr.Errors[0].Reason
|
||||
message := googleAPIErr.Errors[0].Message
|
||||
|
||||
switch reason {
|
||||
case "required":
|
||||
// Anonymous users does not have storage.xyz access to project 123.
|
||||
fallthrough
|
||||
case "keyInvalid":
|
||||
fallthrough
|
||||
case "forbidden":
|
||||
err = PrefixAccessDenied{
|
||||
Bucket: bucket,
|
||||
Object: object,
|
||||
}
|
||||
case "invalid":
|
||||
err = BucketNameInvalid{
|
||||
Bucket: bucket,
|
||||
}
|
||||
case "notFound":
|
||||
if object != "" {
|
||||
err = ObjectNotFound{
|
||||
Bucket: bucket,
|
||||
Object: object,
|
||||
}
|
||||
break
|
||||
}
|
||||
err = BucketNotFound{Bucket: bucket}
|
||||
case "conflict":
|
||||
if message == "You already own this bucket. Please select another name." {
|
||||
err = BucketAlreadyOwnedByYou{Bucket: bucket}
|
||||
break
|
||||
}
|
||||
if message == "Sorry, that name is not available. Please try a different one." {
|
||||
err = BucketAlreadyExists{Bucket: bucket}
|
||||
break
|
||||
}
|
||||
err = BucketNotEmpty{Bucket: bucket}
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
126
cmd/warm-backend-s3.go
Normal file
126
cmd/warm-backend-s3.go
Normal file
@@ -0,0 +1,126 @@
|
||||
// Copyright (c) 2015-2021 MinIO, Inc.
|
||||
//
|
||||
// This file is part of MinIO Object Storage stack
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/url"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
minio "github.com/minio/minio-go/v7"
|
||||
miniogo "github.com/minio/minio-go/v7"
|
||||
"github.com/minio/minio-go/v7/pkg/credentials"
|
||||
"github.com/minio/minio/pkg/madmin"
|
||||
)
|
||||
|
||||
type warmBackendS3 struct {
|
||||
client *minio.Client
|
||||
core *minio.Core
|
||||
Bucket string
|
||||
Prefix string
|
||||
StorageClass string
|
||||
}
|
||||
|
||||
func (s3 *warmBackendS3) ToObjectError(err error, params ...string) error {
|
||||
object := ""
|
||||
if len(params) >= 1 {
|
||||
object = params[0]
|
||||
}
|
||||
|
||||
return ErrorRespToObjectError(err, s3.Bucket, s3.getDest(object))
|
||||
}
|
||||
|
||||
func (s3 *warmBackendS3) getDest(object string) string {
|
||||
destObj := object
|
||||
if s3.Prefix != "" {
|
||||
destObj = fmt.Sprintf("%s/%s", s3.Prefix, object)
|
||||
}
|
||||
return destObj
|
||||
}
|
||||
|
||||
func (s3 *warmBackendS3) Put(ctx context.Context, object string, r io.Reader, length int64) error {
|
||||
_, err := s3.client.PutObject(ctx, s3.Bucket, s3.getDest(object), r, length, minio.PutObjectOptions{StorageClass: s3.StorageClass})
|
||||
return s3.ToObjectError(err, object)
|
||||
}
|
||||
|
||||
func (s3 *warmBackendS3) Get(ctx context.Context, object string, opts WarmBackendGetOpts) (io.ReadCloser, error) {
|
||||
gopts := minio.GetObjectOptions{}
|
||||
|
||||
if opts.startOffset >= 0 && opts.length > 0 {
|
||||
if err := gopts.SetRange(opts.startOffset, opts.startOffset+opts.length-1); err != nil {
|
||||
return nil, s3.ToObjectError(err, object)
|
||||
}
|
||||
}
|
||||
c := &miniogo.Core{Client: s3.client}
|
||||
// Important to use core primitives here to pass range get options as is.
|
||||
r, _, _, err := c.GetObject(ctx, s3.Bucket, s3.getDest(object), gopts)
|
||||
if err != nil {
|
||||
return nil, s3.ToObjectError(err, object)
|
||||
}
|
||||
return r, nil
|
||||
}
|
||||
|
||||
func (s3 *warmBackendS3) Remove(ctx context.Context, object string) error {
|
||||
err := s3.client.RemoveObject(ctx, s3.Bucket, s3.getDest(object), minio.RemoveObjectOptions{})
|
||||
return s3.ToObjectError(err, object)
|
||||
}
|
||||
|
||||
func (s3 *warmBackendS3) InUse(ctx context.Context) (bool, error) {
|
||||
result, err := s3.core.ListObjectsV2(s3.Bucket, s3.Prefix, "", false, "/", 1)
|
||||
if err != nil {
|
||||
return false, s3.ToObjectError(err)
|
||||
}
|
||||
if len(result.CommonPrefixes) > 0 || len(result.Contents) > 0 {
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func newWarmBackendS3(conf madmin.TierS3) (*warmBackendS3, error) {
|
||||
u, err := url.Parse(conf.Endpoint)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
creds := credentials.NewStaticV4(conf.AccessKey, conf.SecretKey, "")
|
||||
getRemoteTargetInstanceTransportOnce.Do(func() {
|
||||
getRemoteTargetInstanceTransport = newGatewayHTTPTransport(10 * time.Minute)
|
||||
})
|
||||
opts := &minio.Options{
|
||||
Creds: creds,
|
||||
Secure: u.Scheme == "https",
|
||||
Transport: getRemoteTargetInstanceTransport,
|
||||
}
|
||||
client, err := minio.New(u.Host, opts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
core, err := minio.NewCore(u.Host, opts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &warmBackendS3{
|
||||
client: client,
|
||||
core: core,
|
||||
Bucket: conf.Bucket,
|
||||
Prefix: strings.TrimSuffix(conf.Prefix, slashSeparator),
|
||||
StorageClass: conf.StorageClass,
|
||||
}, nil
|
||||
}
|
||||
140
cmd/warm-backend.go
Normal file
140
cmd/warm-backend.go
Normal file
@@ -0,0 +1,140 @@
|
||||
// Copyright (c) 2015-2021 MinIO, Inc.
|
||||
//
|
||||
// This file is part of MinIO Object Storage stack
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/minio/minio/pkg/madmin"
|
||||
)
|
||||
|
||||
// WarmBackendGetOpts is used to express byte ranges within an object. The zero
|
||||
// value represents the entire byte range of an object.
|
||||
type WarmBackendGetOpts struct {
|
||||
startOffset int64
|
||||
length int64
|
||||
}
|
||||
|
||||
// WarmBackend provides interface to be implemented by remote tier backends
|
||||
type WarmBackend interface {
|
||||
Put(ctx context.Context, object string, r io.Reader, length int64) error
|
||||
Get(ctx context.Context, object string, opts WarmBackendGetOpts) (io.ReadCloser, error)
|
||||
Remove(ctx context.Context, object string) error
|
||||
InUse(ctx context.Context) (bool, error)
|
||||
}
|
||||
|
||||
const probeObject = "probeobject"
|
||||
|
||||
// checkWarmBackend checks if tier config credentials have sufficient privileges
|
||||
// to perform all operations defined in the WarmBackend interface.
|
||||
func checkWarmBackend(ctx context.Context, w WarmBackend) error {
|
||||
var empty bytes.Reader
|
||||
err := w.Put(ctx, probeObject, &empty, 0)
|
||||
if err != nil {
|
||||
return tierPermErr{
|
||||
Op: tierPut,
|
||||
Err: err,
|
||||
}
|
||||
}
|
||||
|
||||
_, err = w.Get(ctx, probeObject, WarmBackendGetOpts{})
|
||||
if err != nil {
|
||||
switch {
|
||||
case isErrBucketNotFound(err):
|
||||
return errTierBucketNotFound
|
||||
case isErrSignatureDoesNotMatch(err):
|
||||
return errTierInvalidCredentials
|
||||
default:
|
||||
return tierPermErr{
|
||||
Op: tierGet,
|
||||
Err: err,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err = w.Remove(ctx, probeObject); err != nil {
|
||||
return tierPermErr{
|
||||
Op: tierDelete,
|
||||
Err: err,
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
type tierOp uint8
|
||||
|
||||
const (
|
||||
_ tierOp = iota
|
||||
tierGet
|
||||
tierPut
|
||||
tierDelete
|
||||
)
|
||||
|
||||
func (op tierOp) String() string {
|
||||
switch op {
|
||||
case tierGet:
|
||||
return "GET"
|
||||
case tierPut:
|
||||
return "PUT"
|
||||
case tierDelete:
|
||||
return "DELETE"
|
||||
}
|
||||
return "UNKNOWN"
|
||||
}
|
||||
|
||||
type tierPermErr struct {
|
||||
Op tierOp
|
||||
Err error
|
||||
}
|
||||
|
||||
func (te tierPermErr) Error() string {
|
||||
return fmt.Sprintf("failed to perform %s %v", te.Op, te.Err)
|
||||
}
|
||||
|
||||
func errIsTierPermError(err error) bool {
|
||||
var tpErr tierPermErr
|
||||
return errors.As(err, &tpErr)
|
||||
}
|
||||
|
||||
// newWarmBackend instantiates the tier type specific WarmBackend, runs
|
||||
// checkWarmBackend on it.
|
||||
func newWarmBackend(ctx context.Context, tier madmin.TierConfig) (d WarmBackend, err error) {
|
||||
switch tier.Type {
|
||||
case madmin.S3:
|
||||
d, err = newWarmBackendS3(*tier.S3)
|
||||
case madmin.Azure:
|
||||
d, err = newWarmBackendAzure(*tier.Azure)
|
||||
case madmin.GCS:
|
||||
d, err = newWarmBackendGCS(*tier.GCS)
|
||||
default:
|
||||
return nil, errTierTypeUnsupported
|
||||
}
|
||||
if err != nil {
|
||||
return nil, errTierTypeUnsupported
|
||||
}
|
||||
|
||||
err = checkWarmBackend(ctx, d)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return d, nil
|
||||
}
|
||||
@@ -47,7 +47,6 @@ import (
|
||||
xhttp "github.com/minio/minio/cmd/http"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/auth"
|
||||
"github.com/minio/minio/pkg/bucket/lifecycle"
|
||||
objectlock "github.com/minio/minio/pkg/bucket/object/lock"
|
||||
"github.com/minio/minio/pkg/bucket/policy"
|
||||
"github.com/minio/minio/pkg/bucket/replication"
|
||||
@@ -766,8 +765,13 @@ next:
|
||||
if _, err := globalBucketMetadataSys.GetLifecycleConfig(args.BucketName); err == nil {
|
||||
hasLifecycleConfig = true
|
||||
}
|
||||
os := newObjSweeper(args.BucketName, objectName)
|
||||
opts = os.GetOpts()
|
||||
if hasReplicationRules(ctx, args.BucketName, []ObjectToDelete{{ObjectName: objectName}}) || hasLifecycleConfig {
|
||||
goi, gerr = getObjectInfoFn(ctx, args.BucketName, objectName, opts)
|
||||
if gerr == nil {
|
||||
os.SetTransitionState(goi)
|
||||
}
|
||||
if replicateDel, replicateSync = checkReplicateDelete(ctx, args.BucketName, ObjectToDelete{
|
||||
ObjectName: objectName,
|
||||
VersionID: goi.VersionID,
|
||||
@@ -823,18 +827,9 @@ next:
|
||||
}
|
||||
scheduleReplicationDelete(ctx, dobj, objectAPI, replicateSync)
|
||||
}
|
||||
if goi.TransitionStatus == lifecycle.TransitionComplete {
|
||||
deleteTransitionedObject(ctx, objectAPI, args.BucketName, objectName, lifecycle.ObjectOpts{
|
||||
Name: objectName,
|
||||
UserTags: goi.UserTags,
|
||||
VersionID: goi.VersionID,
|
||||
DeleteMarker: goi.DeleteMarker,
|
||||
TransitionStatus: goi.TransitionStatus,
|
||||
IsLatest: goi.IsLatest,
|
||||
}, false, true)
|
||||
}
|
||||
|
||||
logger.LogIf(ctx, err)
|
||||
logger.LogIf(ctx, os.Sweep())
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -1322,6 +1317,13 @@ func (web *webAPIHandlers) Upload(w http.ResponseWriter, r *http.Request) {
|
||||
opts.UserDefined[strings.ToLower(xhttp.AmzObjectLockRetainUntilDate)] = retentionDate.UTC().Format(iso8601TimeFormat)
|
||||
}
|
||||
|
||||
os := newObjSweeper(bucket, object)
|
||||
// Get appropriate object info to identify the remote object to delete
|
||||
goiOpts := os.GetOpts()
|
||||
if goi, gerr := getObjectInfo(ctx, bucket, object, goiOpts); gerr == nil {
|
||||
os.SetTransitionState(goi)
|
||||
}
|
||||
|
||||
objInfo, err := putObject(GlobalContext, bucket, object, pReader, opts)
|
||||
if err != nil {
|
||||
writeWebErrorResponse(w, err)
|
||||
@@ -1339,6 +1341,7 @@ func (web *webAPIHandlers) Upload(w http.ResponseWriter, r *http.Request) {
|
||||
if mustReplicate {
|
||||
scheduleReplication(ctx, objInfo.Clone(), objectAPI, sync, replication.ObjectReplicationType)
|
||||
}
|
||||
logger.LogIf(ctx, os.Sweep())
|
||||
|
||||
reqParams := extractReqParams(r)
|
||||
reqParams["accessKey"] = claims.GetAccessKey()
|
||||
|
||||
@@ -24,16 +24,6 @@ import (
|
||||
"syscall"
|
||||
)
|
||||
|
||||
// Function not implemented error
|
||||
func isSysErrNoSys(err error) bool {
|
||||
return errors.Is(err, syscall.ENOSYS)
|
||||
}
|
||||
|
||||
// Not supported error
|
||||
func isSysErrOpNotSupported(err error) bool {
|
||||
return errors.Is(err, syscall.EOPNOTSUPP)
|
||||
}
|
||||
|
||||
// No space left on device error
|
||||
func isSysErrNoSpace(err error) bool {
|
||||
return errors.Is(err, syscall.ENOSPC)
|
||||
|
||||
@@ -103,11 +103,13 @@ func getFileInfo(xlMetaBuf []byte, volume, path, versionID string, data bool) (F
|
||||
if err := json.Unmarshal(xlMetaBuf, xlMeta); err != nil {
|
||||
return FileInfo{}, errFileCorrupt
|
||||
}
|
||||
|
||||
fi, err := xlMeta.ToFileInfo(volume, path)
|
||||
if err == errFileNotFound && versionID != "" {
|
||||
return fi, errFileVersionNotFound
|
||||
}
|
||||
fi.XLV1 = true // indicates older version
|
||||
fi.IsLatest = true // No versions so current version is latest.
|
||||
fi.XLV1 = true // indicates older version
|
||||
return fi, err
|
||||
}
|
||||
|
||||
|
||||
@@ -177,12 +177,15 @@ func (c *ChecksumInfo) UnmarshalJSON(data []byte) error {
|
||||
}
|
||||
|
||||
// constant and shouldn't be changed.
|
||||
const legacyDataDir = "legacy"
|
||||
const (
|
||||
legacyDataDir = "legacy"
|
||||
)
|
||||
|
||||
func (m *xlMetaV1Object) ToFileInfo(volume, path string) (FileInfo, error) {
|
||||
if !m.valid() {
|
||||
return FileInfo{}, errFileCorrupt
|
||||
}
|
||||
|
||||
fi := FileInfo{
|
||||
Volume: volume,
|
||||
Name: path,
|
||||
@@ -194,9 +197,6 @@ func (m *xlMetaV1Object) ToFileInfo(volume, path string) (FileInfo, error) {
|
||||
VersionID: m.VersionID,
|
||||
DataDir: m.DataDir,
|
||||
}
|
||||
if st, ok := m.Meta[ReservedMetadataPrefixLower+"transition-status"]; ok {
|
||||
fi.TransitionStatus = st
|
||||
}
|
||||
return fi, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -1,20 +1,3 @@
|
||||
// Copyright (c) 2015-2021 MinIO, Inc.
|
||||
//
|
||||
// This file is part of MinIO Object Storage stack
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package cmd
|
||||
|
||||
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
|
||||
|
||||
@@ -1,20 +1,3 @@
|
||||
// Copyright (c) 2015-2021 MinIO, Inc.
|
||||
//
|
||||
// This file is part of MinIO Object Storage stack
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package cmd
|
||||
|
||||
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
|
||||
|
||||
@@ -30,6 +30,7 @@ import (
|
||||
"github.com/google/uuid"
|
||||
xhttp "github.com/minio/minio/cmd/http"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/bucket/lifecycle"
|
||||
"github.com/tinylib/msgp/msgp"
|
||||
)
|
||||
|
||||
@@ -841,6 +842,16 @@ func (z *xlMetaV2) AddVersion(fi FileInfo) error {
|
||||
if len(fi.Data) > 0 || fi.Size == 0 {
|
||||
z.data.replace(fi.VersionID, fi.Data)
|
||||
}
|
||||
|
||||
if fi.TransitionStatus != "" {
|
||||
ventry.ObjectV2.MetaSys[ReservedMetadataPrefixLower+TransitionStatus] = []byte(fi.TransitionStatus)
|
||||
}
|
||||
if fi.TransitionedObjName != "" {
|
||||
ventry.ObjectV2.MetaSys[ReservedMetadataPrefixLower+TransitionedObjectName] = []byte(fi.TransitionedObjName)
|
||||
}
|
||||
if fi.TransitionTier != "" {
|
||||
ventry.ObjectV2.MetaSys[ReservedMetadataPrefixLower+TransitionTier] = []byte(fi.TransitionTier)
|
||||
}
|
||||
}
|
||||
|
||||
if !ventry.Valid() {
|
||||
@@ -910,6 +921,18 @@ func (j xlMetaV2DeleteMarker) ToFileInfo(volume, path string) (FileInfo, error)
|
||||
return fi, nil
|
||||
}
|
||||
|
||||
// UsesDataDir returns true if this object version uses its data directory for
|
||||
// its contents and false otherwise.
|
||||
func (j *xlMetaV2Object) UsesDataDir() bool {
|
||||
// Skip if this version is not transitioned, i.e it uses its data directory.
|
||||
if !bytes.Equal(j.MetaSys[ReservedMetadataPrefixLower+TransitionStatus], []byte(lifecycle.TransitionComplete)) {
|
||||
return true
|
||||
}
|
||||
|
||||
// Check if this transitioned object has been restored on disk.
|
||||
return isRestoredObjectOnDisk(j.MetaUser)
|
||||
}
|
||||
|
||||
func (j xlMetaV2Object) ToFileInfo(volume, path string) (FileInfo, error) {
|
||||
versionID := ""
|
||||
var uv uuid.UUID
|
||||
@@ -953,8 +976,6 @@ func (j xlMetaV2Object) ToFileInfo(volume, path string) (FileInfo, error) {
|
||||
}
|
||||
for k, v := range j.MetaSys {
|
||||
switch {
|
||||
case equals(k, ReservedMetadataPrefixLower+"transition-status"):
|
||||
fi.TransitionStatus = string(v)
|
||||
case equals(k, VersionPurgeStatusKey):
|
||||
fi.VersionPurgeStatus = VersionPurgeStatusType(string(v))
|
||||
case strings.HasPrefix(strings.ToLower(k), ReservedMetadataPrefixLower):
|
||||
@@ -972,6 +993,15 @@ func (j xlMetaV2Object) ToFileInfo(volume, path string) (FileInfo, error) {
|
||||
}
|
||||
fi.DataDir = uuid.UUID(j.DataDir).String()
|
||||
|
||||
if st, ok := j.MetaSys[ReservedMetadataPrefixLower+TransitionStatus]; ok {
|
||||
fi.TransitionStatus = string(st)
|
||||
}
|
||||
if o, ok := j.MetaSys[ReservedMetadataPrefixLower+TransitionedObjectName]; ok {
|
||||
fi.TransitionedObjName = string(o)
|
||||
}
|
||||
if sc, ok := j.MetaSys[ReservedMetadataPrefixLower+TransitionTier]; ok {
|
||||
fi.TransitionTier = string(sc)
|
||||
}
|
||||
return fi, nil
|
||||
}
|
||||
|
||||
@@ -1009,7 +1039,10 @@ func (z *xlMetaV2) SharedDataDirCount(versionID [16]byte, dataDir [16]byte) int
|
||||
if version.ObjectV2.VersionID == versionID {
|
||||
continue
|
||||
}
|
||||
if version.ObjectV2.DataDir == dataDir {
|
||||
if version.ObjectV2.DataDir != dataDir {
|
||||
continue
|
||||
}
|
||||
if version.ObjectV2.UsesDataDir() {
|
||||
sameDataDirCount++
|
||||
}
|
||||
}
|
||||
@@ -1083,11 +1116,6 @@ func (z *xlMetaV2) DeleteVersion(fi FileInfo) (string, bool, error) {
|
||||
switch version.Type {
|
||||
case LegacyType:
|
||||
if version.ObjectV1.VersionID == fi.VersionID {
|
||||
if fi.TransitionStatus != "" {
|
||||
z.Versions[i].ObjectV1.Meta[ReservedMetadataPrefixLower+"transition-status"] = fi.TransitionStatus
|
||||
return uuid.UUID(version.ObjectV2.DataDir).String(), len(z.Versions) == 0, nil
|
||||
}
|
||||
|
||||
z.Versions = append(z.Versions[:i], z.Versions[i+1:]...)
|
||||
if fi.Deleted {
|
||||
z.Versions = append(z.Versions, ventry)
|
||||
@@ -1131,22 +1159,27 @@ func (z *xlMetaV2) DeleteVersion(fi FileInfo) (string, bool, error) {
|
||||
switch version.Type {
|
||||
case ObjectType:
|
||||
if version.ObjectV2.VersionID == uv {
|
||||
if fi.TransitionStatus != "" {
|
||||
z.Versions[i].ObjectV2.MetaSys[ReservedMetadataPrefixLower+"transition-status"] = []byte(fi.TransitionStatus)
|
||||
return uuid.UUID(version.ObjectV2.DataDir).String(), len(z.Versions) == 0, nil
|
||||
switch {
|
||||
case fi.ExpireRestored:
|
||||
delete(z.Versions[i].ObjectV2.MetaUser, xhttp.AmzRestore)
|
||||
delete(z.Versions[i].ObjectV2.MetaUser, xhttp.AmzRestoreExpiryDays)
|
||||
delete(z.Versions[i].ObjectV2.MetaUser, xhttp.AmzRestoreRequestDate)
|
||||
case fi.TransitionStatus == lifecycle.TransitionComplete:
|
||||
z.Versions[i].ObjectV2.MetaSys[ReservedMetadataPrefixLower+TransitionStatus] = []byte(fi.TransitionStatus)
|
||||
z.Versions[i].ObjectV2.MetaSys[ReservedMetadataPrefixLower+TransitionedObjectName] = []byte(fi.TransitionedObjName)
|
||||
z.Versions[i].ObjectV2.MetaSys[ReservedMetadataPrefixLower+TransitionTier] = []byte(fi.TransitionTier)
|
||||
default:
|
||||
z.Versions = append(z.Versions[:i], z.Versions[i+1:]...)
|
||||
}
|
||||
|
||||
if fi.Deleted {
|
||||
z.Versions = append(z.Versions, ventry)
|
||||
}
|
||||
z.Versions = append(z.Versions[:i], z.Versions[i+1:]...)
|
||||
if z.SharedDataDirCount(version.ObjectV2.VersionID, version.ObjectV2.DataDir) > 0 {
|
||||
if fi.Deleted {
|
||||
z.Versions = append(z.Versions, ventry)
|
||||
}
|
||||
// Found that another version references the same dataDir
|
||||
// we shouldn't remove it, and only remove the version instead
|
||||
return "", len(z.Versions) == 0, nil
|
||||
}
|
||||
if fi.Deleted {
|
||||
z.Versions = append(z.Versions, ventry)
|
||||
}
|
||||
return uuid.UUID(version.ObjectV2.DataDir).String(), len(z.Versions) == 0, nil
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,20 +1,3 @@
|
||||
// Copyright (c) 2015-2021 MinIO, Inc.
|
||||
//
|
||||
// This file is part of MinIO Object Storage stack
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package cmd
|
||||
|
||||
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
|
||||
|
||||
@@ -1,20 +1,3 @@
|
||||
// Copyright (c) 2015-2021 MinIO, Inc.
|
||||
//
|
||||
// This file is part of MinIO Object Storage stack
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package cmd
|
||||
|
||||
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
|
||||
|
||||
@@ -21,6 +21,10 @@ import (
|
||||
"bytes"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
xhttp "github.com/minio/minio/cmd/http"
|
||||
"github.com/minio/minio/pkg/bucket/lifecycle"
|
||||
)
|
||||
|
||||
func TestXLV2FormatData(t *testing.T) {
|
||||
@@ -142,6 +146,7 @@ func TestXLV2FormatData(t *testing.T) {
|
||||
failOnErr(xl2.Load(trimmed))
|
||||
if len(xl2.data) != 0 {
|
||||
t.Fatal("data, was not trimmed, bytes left:", len(xl2.data))
|
||||
|
||||
}
|
||||
// Corrupt metadata, last 5 bytes is the checksum, so go a bit further back.
|
||||
trimmed[len(trimmed)-10] += 10
|
||||
@@ -149,3 +154,219 @@ func TestXLV2FormatData(t *testing.T) {
|
||||
t.Fatal("metadata corruption not detected")
|
||||
}
|
||||
}
|
||||
|
||||
// TestUsesDataDir tests xlMetaV2.UsesDataDir
|
||||
func TestUsesDataDir(t *testing.T) {
|
||||
vID := uuid.New()
|
||||
dataDir := uuid.New()
|
||||
transitioned := make(map[string][]byte)
|
||||
transitioned[ReservedMetadataPrefixLower+TransitionStatus] = []byte(lifecycle.TransitionComplete)
|
||||
|
||||
toBeRestored := make(map[string]string)
|
||||
toBeRestored[xhttp.AmzRestore] = ongoingRestoreObj().String()
|
||||
|
||||
restored := make(map[string]string)
|
||||
restored[xhttp.AmzRestore] = completedRestoreObj(time.Now().UTC().Add(time.Hour)).String()
|
||||
|
||||
restoredExpired := make(map[string]string)
|
||||
restoredExpired[xhttp.AmzRestore] = completedRestoreObj(time.Now().UTC().Add(-time.Hour)).String()
|
||||
|
||||
testCases := []struct {
|
||||
xlmeta xlMetaV2Object
|
||||
uses bool
|
||||
}{
|
||||
{ // transitioned object version
|
||||
xlmeta: xlMetaV2Object{
|
||||
VersionID: vID,
|
||||
DataDir: dataDir,
|
||||
MetaSys: transitioned,
|
||||
},
|
||||
uses: false,
|
||||
},
|
||||
{ // to be restored (requires object version to be transitioned)
|
||||
xlmeta: xlMetaV2Object{
|
||||
VersionID: vID,
|
||||
DataDir: dataDir,
|
||||
MetaSys: transitioned,
|
||||
MetaUser: toBeRestored,
|
||||
},
|
||||
uses: false,
|
||||
},
|
||||
{ // restored object version (requires object version to be transitioned)
|
||||
xlmeta: xlMetaV2Object{
|
||||
VersionID: vID,
|
||||
DataDir: dataDir,
|
||||
MetaSys: transitioned,
|
||||
MetaUser: restored,
|
||||
},
|
||||
uses: true,
|
||||
},
|
||||
{ // restored object version expired an hour back (requires object version to be transitioned)
|
||||
xlmeta: xlMetaV2Object{
|
||||
VersionID: vID,
|
||||
DataDir: dataDir,
|
||||
MetaSys: transitioned,
|
||||
MetaUser: restoredExpired,
|
||||
},
|
||||
uses: false,
|
||||
},
|
||||
{ // object version with no ILM applied
|
||||
xlmeta: xlMetaV2Object{
|
||||
VersionID: vID,
|
||||
DataDir: dataDir,
|
||||
},
|
||||
uses: true,
|
||||
},
|
||||
}
|
||||
for i, tc := range testCases {
|
||||
if got := tc.xlmeta.UsesDataDir(); got != tc.uses {
|
||||
t.Fatalf("Test %d: Expected %v but got %v for %v", i+1, tc.uses, got, tc.xlmeta)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestDeleteVersionWithSharedDataDir(t *testing.T) {
|
||||
failOnErr := func(i int, err error) {
|
||||
t.Helper()
|
||||
if err != nil {
|
||||
t.Fatalf("Test %d: failed with %v", i, err)
|
||||
}
|
||||
}
|
||||
|
||||
data := []byte("some object data")
|
||||
data2 := []byte("some other object data")
|
||||
|
||||
xl := xlMetaV2{}
|
||||
fi := FileInfo{
|
||||
Volume: "volume",
|
||||
Name: "object-name",
|
||||
VersionID: "756100c6-b393-4981-928a-d49bbc164741",
|
||||
IsLatest: true,
|
||||
Deleted: false,
|
||||
TransitionStatus: "",
|
||||
DataDir: "bffea160-ca7f-465f-98bc-9b4f1c3ba1ef",
|
||||
XLV1: false,
|
||||
ModTime: time.Now(),
|
||||
Size: 0,
|
||||
Mode: 0,
|
||||
Metadata: nil,
|
||||
Parts: nil,
|
||||
Erasure: ErasureInfo{
|
||||
Algorithm: ReedSolomon.String(),
|
||||
DataBlocks: 4,
|
||||
ParityBlocks: 2,
|
||||
BlockSize: 10000,
|
||||
Index: 1,
|
||||
Distribution: []int{1, 2, 3, 4, 5, 6, 7, 8},
|
||||
Checksums: []ChecksumInfo{{
|
||||
PartNumber: 1,
|
||||
Algorithm: HighwayHash256S,
|
||||
Hash: nil,
|
||||
}},
|
||||
},
|
||||
MarkDeleted: false,
|
||||
DeleteMarkerReplicationStatus: "",
|
||||
VersionPurgeStatus: "",
|
||||
Data: data,
|
||||
NumVersions: 1,
|
||||
SuccessorModTime: time.Time{},
|
||||
}
|
||||
|
||||
d0, d1, d2 := mustGetUUID(), mustGetUUID(), mustGetUUID()
|
||||
testCases := []struct {
|
||||
versionID string
|
||||
dataDir string
|
||||
data []byte
|
||||
shares int
|
||||
transitionStatus string
|
||||
restoreObjStatus string
|
||||
expireRestored bool
|
||||
expectedDataDir string
|
||||
}{
|
||||
{ // object versions with inlined data don't count towards shared data directory
|
||||
versionID: mustGetUUID(),
|
||||
dataDir: d0,
|
||||
data: data,
|
||||
shares: 0,
|
||||
},
|
||||
{ // object versions with inlined data don't count towards shared data directory
|
||||
versionID: mustGetUUID(),
|
||||
dataDir: d1,
|
||||
data: data2,
|
||||
shares: 0,
|
||||
},
|
||||
{ // transitioned object version don't count towards shared data directory
|
||||
versionID: mustGetUUID(),
|
||||
dataDir: d2,
|
||||
shares: 3,
|
||||
transitionStatus: lifecycle.TransitionComplete,
|
||||
},
|
||||
{ // transitioned object version with an ongoing restore-object request.
|
||||
versionID: mustGetUUID(),
|
||||
dataDir: d2,
|
||||
shares: 3,
|
||||
transitionStatus: lifecycle.TransitionComplete,
|
||||
restoreObjStatus: ongoingRestoreObj().String(),
|
||||
},
|
||||
// The following versions are on-disk.
|
||||
{ // restored object version expiring 10 hours from now.
|
||||
versionID: mustGetUUID(),
|
||||
dataDir: d2,
|
||||
shares: 2,
|
||||
transitionStatus: lifecycle.TransitionComplete,
|
||||
restoreObjStatus: completedRestoreObj(time.Now().Add(10 * time.Hour)).String(),
|
||||
expireRestored: true,
|
||||
},
|
||||
{
|
||||
versionID: mustGetUUID(),
|
||||
dataDir: d2,
|
||||
shares: 2,
|
||||
},
|
||||
{
|
||||
versionID: mustGetUUID(),
|
||||
dataDir: d2,
|
||||
shares: 2,
|
||||
expectedDataDir: d2,
|
||||
},
|
||||
}
|
||||
|
||||
var fileInfos []FileInfo
|
||||
for i, tc := range testCases {
|
||||
fi := fi
|
||||
fi.VersionID = tc.versionID
|
||||
fi.DataDir = tc.dataDir
|
||||
fi.Data = tc.data
|
||||
if tc.data == nil {
|
||||
fi.Size = 42 // to prevent inlining of data
|
||||
}
|
||||
if tc.restoreObjStatus != "" {
|
||||
fi.Metadata = map[string]string{
|
||||
xhttp.AmzRestore: tc.restoreObjStatus,
|
||||
}
|
||||
}
|
||||
fi.TransitionStatus = tc.transitionStatus
|
||||
failOnErr(i+1, xl.AddVersion(fi))
|
||||
fi.ExpireRestored = tc.expireRestored
|
||||
fileInfos = append(fileInfos, fi)
|
||||
}
|
||||
|
||||
for i, tc := range testCases {
|
||||
version := xl.Versions[i]
|
||||
if actual := xl.SharedDataDirCount(version.ObjectV2.VersionID, version.ObjectV2.DataDir); actual != tc.shares {
|
||||
t.Fatalf("Test %d: For %#v, expected sharers of data directory %d got %d", i+1, version.ObjectV2, tc.shares, actual)
|
||||
}
|
||||
}
|
||||
|
||||
// Deleting fileInfos[4].VersionID, fileInfos[5].VersionID should return empty data dir; there are other object version sharing the data dir.
|
||||
// Subsequently deleting fileInfos[6].versionID should return fileInfos[6].dataDir since there are no other object versions sharing this data dir.
|
||||
count := len(testCases)
|
||||
for i := 4; i < len(testCases); i++ {
|
||||
tc := testCases[i]
|
||||
dataDir, _, err := xl.DeleteVersion(fileInfos[i])
|
||||
failOnErr(count+1, err)
|
||||
if dataDir != tc.expectedDataDir {
|
||||
t.Fatalf("Expected %s but got %s", tc.expectedDataDir, dataDir)
|
||||
}
|
||||
count++
|
||||
}
|
||||
}
|
||||
|
||||
@@ -845,37 +845,28 @@ func (s *xlStorage) DeleteVersion(ctx context.Context, volume, path string, fi F
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// transitioned objects maintains metadata on the source cluster. When transition
|
||||
// status is set, update the metadata to disk.
|
||||
if !lastVersion || fi.TransitionStatus != "" {
|
||||
// when data-dir is specified. Transition leverages existing DeleteObject
|
||||
// api call to mark object as deleted. When object is pending transition,
|
||||
// just update the metadata and avoid deleting data dir.
|
||||
if dataDir != "" && fi.TransitionStatus != lifecycle.TransitionPending {
|
||||
versionID := fi.VersionID
|
||||
if versionID == "" {
|
||||
versionID = nullVersionID
|
||||
}
|
||||
xlMeta.data.remove(versionID)
|
||||
// PR #11758 used DataDir, preserve it
|
||||
// for users who might have used master
|
||||
// branch
|
||||
xlMeta.data.remove(dataDir)
|
||||
|
||||
filePath := pathJoin(volumeDir, path, dataDir)
|
||||
if err = checkPathLength(filePath); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
tmpuuid := mustGetUUID()
|
||||
if err = renameAll(filePath, pathutil.Join(s.diskPath, minioMetaTmpDeletedBucket, tmpuuid)); err != nil {
|
||||
if err != errFileNotFound {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if dataDir != "" {
|
||||
versionID := fi.VersionID
|
||||
if versionID == "" {
|
||||
versionID = nullVersionID
|
||||
}
|
||||
xlMeta.data.remove(versionID)
|
||||
// PR #11758 used DataDir, preserve it
|
||||
// for users who might have used master
|
||||
// branch
|
||||
xlMeta.data.remove(dataDir)
|
||||
filePath := pathJoin(volumeDir, path, dataDir)
|
||||
if err = checkPathLength(filePath); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err = renameAll(filePath, pathutil.Join(s.diskPath, minioMetaTmpDeletedBucket, mustGetUUID())); err != nil {
|
||||
if err != errFileNotFound {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
if !lastVersion {
|
||||
buf, err = xlMeta.AppendTo(nil)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -1867,7 +1858,8 @@ func (s *xlStorage) RenameData(ctx context.Context, srcVolume, srcPath string, f
|
||||
var srcDataPath string
|
||||
var dstDataPath string
|
||||
dataDir := retainSlash(fi.DataDir)
|
||||
if dataDir != "" {
|
||||
// no need to rename dataDir paths for objects that are in transitionComplete state.
|
||||
if dataDir != "" && fi.TransitionStatus != lifecycle.TransitionComplete {
|
||||
srcDataPath = retainSlash(pathJoin(srcVolumeDir, srcPath, dataDir))
|
||||
// make sure to always use path.Join here, do not use pathJoin as
|
||||
// it would additionally add `/` at the end and it comes in the
|
||||
|
||||
Reference in New Issue
Block a user