mirror of
https://github.com/minio/minio.git
synced 2025-11-07 21:02:58 -05:00
reduce all major allocations in replication path (#18032)
- remove targetClient for passing around via replicationObjectInfo{}
- remove cloing to object info unnecessarily
- remove objectInfo from replicationObjectInfo{} (only require necessary fields)
This commit is contained in:
@@ -224,21 +224,19 @@ func (o mustReplicateOptions) isMetadataReplication() bool {
|
||||
return o.opType == replication.MetadataReplicationType
|
||||
}
|
||||
|
||||
func getMustReplicateOptions(o ObjectInfo, op replication.Type, opts ObjectOptions) mustReplicateOptions {
|
||||
if !op.Valid() {
|
||||
op = replication.ObjectReplicationType
|
||||
if o.metadataOnly {
|
||||
op = replication.MetadataReplicationType
|
||||
}
|
||||
}
|
||||
meta := cloneMSS(o.UserDefined)
|
||||
if o.UserTags != "" {
|
||||
meta[xhttp.AmzObjectTagging] = o.UserTags
|
||||
func (o ObjectInfo) getMustReplicateOptions(op replication.Type, opts ObjectOptions) mustReplicateOptions {
|
||||
return getMustReplicateOptions(o.UserDefined, o.UserTags, o.ReplicationStatus, op, opts)
|
||||
}
|
||||
|
||||
func getMustReplicateOptions(userDefined map[string]string, userTags string, status replication.StatusType, op replication.Type, opts ObjectOptions) mustReplicateOptions {
|
||||
meta := cloneMSS(userDefined)
|
||||
if userTags != "" {
|
||||
meta[xhttp.AmzObjectTagging] = userTags
|
||||
}
|
||||
|
||||
return mustReplicateOptions{
|
||||
meta: meta,
|
||||
status: o.ReplicationStatus,
|
||||
status: status,
|
||||
opType: op,
|
||||
replicationRequest: opts.ReplicationRequest,
|
||||
}
|
||||
@@ -356,40 +354,41 @@ func checkReplicateDelete(ctx context.Context, bucket string, dobj ObjectToDelet
|
||||
OpType: replication.DeleteReplicationType,
|
||||
}
|
||||
tgtArns := rcfg.FilterTargetArns(opts)
|
||||
if len(tgtArns) > 0 {
|
||||
dsc.targetsMap = make(map[string]replicateTargetDecision, len(tgtArns))
|
||||
var sync, replicate bool
|
||||
for _, tgtArn := range tgtArns {
|
||||
opts.TargetArn = tgtArn
|
||||
replicate = rcfg.Replicate(opts)
|
||||
// when incoming delete is removal of a delete marker(a.k.a versioned delete),
|
||||
// GetObjectInfo returns extra information even though it returns errFileNotFound
|
||||
if gerr != nil {
|
||||
validReplStatus := false
|
||||
switch oi.TargetReplicationStatus(tgtArn) {
|
||||
case replication.Pending, replication.Completed, replication.Failed:
|
||||
validReplStatus = true
|
||||
}
|
||||
if oi.DeleteMarker && (validReplStatus || replicate) {
|
||||
dsc.Set(newReplicateTargetDecision(tgtArn, replicate, sync))
|
||||
continue
|
||||
} else {
|
||||
// can be the case that other cluster is down and duplicate `mc rm --vid`
|
||||
// is issued - this still needs to be replicated back to the other target
|
||||
replicate = oi.VersionPurgeStatus == Pending || oi.VersionPurgeStatus == Failed
|
||||
dsc.Set(newReplicateTargetDecision(tgtArn, replicate, sync))
|
||||
continue
|
||||
}
|
||||
dsc.targetsMap = make(map[string]replicateTargetDecision, len(tgtArns))
|
||||
if len(tgtArns) == 0 {
|
||||
return dsc
|
||||
}
|
||||
var sync, replicate bool
|
||||
for _, tgtArn := range tgtArns {
|
||||
opts.TargetArn = tgtArn
|
||||
replicate = rcfg.Replicate(opts)
|
||||
// when incoming delete is removal of a delete marker(a.k.a versioned delete),
|
||||
// GetObjectInfo returns extra information even though it returns errFileNotFound
|
||||
if gerr != nil {
|
||||
validReplStatus := false
|
||||
switch oi.TargetReplicationStatus(tgtArn) {
|
||||
case replication.Pending, replication.Completed, replication.Failed:
|
||||
validReplStatus = true
|
||||
}
|
||||
tgt := globalBucketTargetSys.GetRemoteTargetClient(tgtArn)
|
||||
// the target online status should not be used here while deciding
|
||||
// whether to replicate deletes as the target could be temporarily down
|
||||
tgtDsc := newReplicateTargetDecision(tgtArn, false, false)
|
||||
if tgt != nil {
|
||||
tgtDsc = newReplicateTargetDecision(tgtArn, replicate, tgt.replicateSync)
|
||||
if oi.DeleteMarker && (validReplStatus || replicate) {
|
||||
dsc.Set(newReplicateTargetDecision(tgtArn, replicate, sync))
|
||||
continue
|
||||
} else {
|
||||
// can be the case that other cluster is down and duplicate `mc rm --vid`
|
||||
// is issued - this still needs to be replicated back to the other target
|
||||
replicate = oi.VersionPurgeStatus == Pending || oi.VersionPurgeStatus == Failed
|
||||
dsc.Set(newReplicateTargetDecision(tgtArn, replicate, sync))
|
||||
continue
|
||||
}
|
||||
dsc.Set(tgtDsc)
|
||||
}
|
||||
tgt := globalBucketTargetSys.GetRemoteTargetClient(tgtArn)
|
||||
// the target online status should not be used here while deciding
|
||||
// whether to replicate deletes as the target could be temporarily down
|
||||
tgtDsc := newReplicateTargetDecision(tgtArn, false, false)
|
||||
if tgt != nil {
|
||||
tgtDsc = newReplicateTargetDecision(tgtArn, replicate, tgt.replicateSync)
|
||||
}
|
||||
dsc.Set(tgtDsc)
|
||||
}
|
||||
return dsc
|
||||
}
|
||||
@@ -483,15 +482,10 @@ func replicateDelete(ctx context.Context, dobj DeletedObjectReplicationInfo, obj
|
||||
ctx = lkctx.Context()
|
||||
defer lk.Unlock(lkctx)
|
||||
|
||||
rinfos := replicatedInfos{Targets: make([]replicatedTargetInfo, 0, len(dsc.targetsMap))}
|
||||
var wg sync.WaitGroup
|
||||
var rinfos replicatedInfos
|
||||
rinfos.Targets = make([]replicatedTargetInfo, len(dsc.targetsMap))
|
||||
idx := -1
|
||||
var mu sync.Mutex
|
||||
for _, tgtEntry := range dsc.targetsMap {
|
||||
idx++
|
||||
if tgtEntry.Tgt == nil {
|
||||
continue
|
||||
}
|
||||
if !tgtEntry.Replicate {
|
||||
continue
|
||||
}
|
||||
@@ -499,11 +493,33 @@ func replicateDelete(ctx context.Context, dobj DeletedObjectReplicationInfo, obj
|
||||
if dobj.TargetArn != "" && dobj.TargetArn != tgtEntry.Arn {
|
||||
continue
|
||||
}
|
||||
tgtClnt := globalBucketTargetSys.GetRemoteTargetClient(tgtEntry.Arn)
|
||||
if tgtClnt == nil {
|
||||
// Skip stale targets if any and log them to be missing atleast once.
|
||||
logger.LogOnceIf(ctx, fmt.Errorf("failed to get target for bucket:%s arn:%s", bucket, tgtEntry.Arn), tgtEntry.Arn)
|
||||
sendEvent(eventArgs{
|
||||
EventName: event.ObjectReplicationNotTracked,
|
||||
BucketName: bucket,
|
||||
Object: ObjectInfo{
|
||||
Bucket: bucket,
|
||||
Name: dobj.ObjectName,
|
||||
VersionID: versionID,
|
||||
DeleteMarker: dobj.DeleteMarker,
|
||||
},
|
||||
UserAgent: "Internal: [Replication]",
|
||||
Host: globalLocalNodeName,
|
||||
})
|
||||
continue
|
||||
}
|
||||
wg.Add(1)
|
||||
go func(index int, tgt *TargetClient) {
|
||||
go func(tgt *TargetClient) {
|
||||
defer wg.Done()
|
||||
rinfos.Targets[index] = replicateDeleteToTarget(ctx, dobj, tgt)
|
||||
}(idx, tgtEntry.Tgt)
|
||||
tgtInfo := replicateDeleteToTarget(ctx, dobj, tgt)
|
||||
|
||||
mu.Lock()
|
||||
rinfos.Targets = append(rinfos.Targets, tgtInfo)
|
||||
mu.Unlock()
|
||||
}(tgtClnt)
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
@@ -963,9 +979,8 @@ func replicateObject(ctx context.Context, ri ReplicateObjectInfo, objectAPI Obje
|
||||
})
|
||||
}()
|
||||
|
||||
objInfo := ri.ObjectInfo
|
||||
bucket := objInfo.Bucket
|
||||
object := objInfo.Name
|
||||
bucket := ri.Bucket
|
||||
object := ri.Name
|
||||
|
||||
cfg, err := getReplicationConfig(ctx, bucket)
|
||||
if err != nil {
|
||||
@@ -973,7 +988,7 @@ func replicateObject(ctx context.Context, ri ReplicateObjectInfo, objectAPI Obje
|
||||
sendEvent(eventArgs{
|
||||
EventName: event.ObjectReplicationNotTracked,
|
||||
BucketName: bucket,
|
||||
Object: objInfo,
|
||||
Object: ri.ToObjectInfo(),
|
||||
UserAgent: "Internal: [Replication]",
|
||||
Host: globalLocalNodeName,
|
||||
})
|
||||
@@ -981,8 +996,8 @@ func replicateObject(ctx context.Context, ri ReplicateObjectInfo, objectAPI Obje
|
||||
}
|
||||
tgtArns := cfg.FilterTargetArns(replication.ObjectOpts{
|
||||
Name: object,
|
||||
SSEC: crypto.SSEC.IsEncrypted(objInfo.UserDefined),
|
||||
UserTags: objInfo.UserTags,
|
||||
SSEC: ri.SSEC,
|
||||
UserTags: ri.UserTags,
|
||||
})
|
||||
// Lock the object name before starting replication.
|
||||
// Use separate lock that doesn't collide with regular objects.
|
||||
@@ -992,7 +1007,7 @@ func replicateObject(ctx context.Context, ri ReplicateObjectInfo, objectAPI Obje
|
||||
sendEvent(eventArgs{
|
||||
EventName: event.ObjectReplicationNotTracked,
|
||||
BucketName: bucket,
|
||||
Object: objInfo,
|
||||
Object: ri.ToObjectInfo(),
|
||||
UserAgent: "Internal: [Replication]",
|
||||
Host: globalLocalNodeName,
|
||||
})
|
||||
@@ -1002,32 +1017,38 @@ func replicateObject(ctx context.Context, ri ReplicateObjectInfo, objectAPI Obje
|
||||
ctx = lkctx.Context()
|
||||
defer lk.Unlock(lkctx)
|
||||
|
||||
rinfos := replicatedInfos{Targets: make([]replicatedTargetInfo, 0, len(tgtArns))}
|
||||
var wg sync.WaitGroup
|
||||
var rinfos replicatedInfos
|
||||
rinfos.Targets = make([]replicatedTargetInfo, len(tgtArns))
|
||||
for i, tgtArn := range tgtArns {
|
||||
var mu sync.Mutex
|
||||
for _, tgtArn := range tgtArns {
|
||||
tgt := globalBucketTargetSys.GetRemoteTargetClient(tgtArn)
|
||||
if tgt == nil {
|
||||
logger.LogOnceIf(ctx, fmt.Errorf("failed to get target for bucket:%s arn:%s", bucket, tgtArn), tgtArn)
|
||||
sendEvent(eventArgs{
|
||||
EventName: event.ObjectReplicationNotTracked,
|
||||
BucketName: bucket,
|
||||
Object: objInfo,
|
||||
Object: ri.ToObjectInfo(),
|
||||
UserAgent: "Internal: [Replication]",
|
||||
Host: globalLocalNodeName,
|
||||
})
|
||||
continue
|
||||
}
|
||||
wg.Add(1)
|
||||
go func(index int, tgt *TargetClient) {
|
||||
go func(tgt *TargetClient) {
|
||||
defer wg.Done()
|
||||
|
||||
var tgtInfo replicatedTargetInfo
|
||||
if ri.OpType == replication.ObjectReplicationType {
|
||||
// all incoming calls go through optimized path.
|
||||
rinfos.Targets[index] = ri.replicateObject(ctx, objectAPI, tgt)
|
||||
tgtInfo = ri.replicateObject(ctx, objectAPI, tgt)
|
||||
} else {
|
||||
rinfos.Targets[index] = ri.replicateAll(ctx, objectAPI, tgt)
|
||||
tgtInfo = ri.replicateAll(ctx, objectAPI, tgt)
|
||||
}
|
||||
}(i, tgt)
|
||||
|
||||
mu.Lock()
|
||||
rinfos.Targets = append(rinfos.Targets, tgtInfo)
|
||||
mu.Unlock()
|
||||
}(tgt)
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
@@ -1042,10 +1063,11 @@ func replicateObject(ctx context.Context, ri ReplicateObjectInfo, objectAPI Obje
|
||||
newReplStatusInternal := rinfos.ReplicationStatusInternal()
|
||||
// Note that internal replication status(es) may match for previously replicated objects - in such cases
|
||||
// metadata should be updated with last resync timestamp.
|
||||
if objInfo.ReplicationStatusInternal != newReplStatusInternal || rinfos.ReplicationResynced() {
|
||||
objInfo := ri.ToObjectInfo()
|
||||
if ri.ReplicationStatusInternal != newReplStatusInternal || rinfos.ReplicationResynced() {
|
||||
popts := ObjectOptions{
|
||||
MTime: objInfo.ModTime,
|
||||
VersionID: objInfo.VersionID,
|
||||
MTime: ri.ModTime,
|
||||
VersionID: ri.VersionID,
|
||||
EvalMetadataFn: func(oi *ObjectInfo, gerr error) (dsc ReplicateDecision, err error) {
|
||||
oi.UserDefined[ReservedMetadataPrefixLower+ReplicationStatus] = newReplStatusInternal
|
||||
oi.UserDefined[ReservedMetadataPrefixLower+ReplicationTimestamp] = UTCNow().Format(time.RFC3339Nano)
|
||||
@@ -1055,14 +1077,18 @@ func replicateObject(ctx context.Context, ri ReplicateObjectInfo, objectAPI Obje
|
||||
oi.UserDefined[targetResetHeader(rinfo.Arn)] = rinfo.ResyncTimestamp
|
||||
}
|
||||
}
|
||||
if objInfo.UserTags != "" {
|
||||
oi.UserDefined[xhttp.AmzObjectTagging] = objInfo.UserTags
|
||||
if ri.UserTags != "" {
|
||||
oi.UserDefined[xhttp.AmzObjectTagging] = ri.UserTags
|
||||
}
|
||||
return dsc, nil
|
||||
},
|
||||
}
|
||||
|
||||
_, _ = objectAPI.PutObjectMetadata(ctx, bucket, object, popts)
|
||||
uobjInfo, _ := objectAPI.PutObjectMetadata(ctx, bucket, object, popts)
|
||||
if uobjInfo.Name != "" {
|
||||
objInfo = uobjInfo
|
||||
}
|
||||
|
||||
opType := replication.MetadataReplicationType
|
||||
if rinfos.Action() == replicateAll {
|
||||
opType = replication.ObjectReplicationType
|
||||
@@ -1098,23 +1124,21 @@ func replicateObject(ctx context.Context, ri ReplicateObjectInfo, objectAPI Obje
|
||||
// The source object is then updated to reflect the replication status.
|
||||
func (ri ReplicateObjectInfo) replicateObject(ctx context.Context, objectAPI ObjectLayer, tgt *TargetClient) (rinfo replicatedTargetInfo) {
|
||||
startTime := time.Now()
|
||||
objInfo := ri.ObjectInfo.Clone()
|
||||
bucket := objInfo.Bucket
|
||||
object := objInfo.Name
|
||||
sz, _ := objInfo.GetActualSize()
|
||||
bucket := ri.Bucket
|
||||
object := ri.Name
|
||||
|
||||
rAction := replicateAll
|
||||
rinfo = replicatedTargetInfo{
|
||||
Size: sz,
|
||||
Size: ri.ActualSize,
|
||||
Arn: tgt.ARN,
|
||||
PrevReplicationStatus: objInfo.TargetReplicationStatus(tgt.ARN),
|
||||
PrevReplicationStatus: ri.TargetReplicationStatus(tgt.ARN),
|
||||
ReplicationStatus: replication.Failed,
|
||||
OpType: ri.OpType,
|
||||
ReplicationAction: rAction,
|
||||
endpoint: tgt.EndpointURL().Host,
|
||||
secure: tgt.EndpointURL().Scheme == "https",
|
||||
}
|
||||
if ri.ObjectInfo.TargetReplicationStatus(tgt.ARN) == replication.Completed && !ri.ExistingObjResync.Empty() && !ri.ExistingObjResync.mustResyncTarget(tgt.ARN) {
|
||||
if ri.TargetReplicationStatus(tgt.ARN) == replication.Completed && !ri.ExistingObjResync.Empty() && !ri.ExistingObjResync.mustResyncTarget(tgt.ARN) {
|
||||
rinfo.ReplicationStatus = replication.Completed
|
||||
rinfo.ReplicationResynced = true
|
||||
return
|
||||
@@ -1125,7 +1149,7 @@ func (ri ReplicateObjectInfo) replicateObject(ctx context.Context, objectAPI Obj
|
||||
sendEvent(eventArgs{
|
||||
EventName: event.ObjectReplicationNotTracked,
|
||||
BucketName: bucket,
|
||||
Object: objInfo,
|
||||
Object: ri.ToObjectInfo(),
|
||||
UserAgent: "Internal: [Replication]",
|
||||
Host: globalLocalNodeName,
|
||||
})
|
||||
@@ -1136,12 +1160,13 @@ func (ri ReplicateObjectInfo) replicateObject(ctx context.Context, objectAPI Obj
|
||||
versionSuspended := globalBucketVersioningSys.PrefixSuspended(bucket, object)
|
||||
|
||||
gr, err := objectAPI.GetObjectNInfo(ctx, bucket, object, nil, http.Header{}, ObjectOptions{
|
||||
VersionID: objInfo.VersionID,
|
||||
VersionID: ri.VersionID,
|
||||
Versioned: versioned,
|
||||
VersionSuspended: versionSuspended,
|
||||
})
|
||||
if err != nil {
|
||||
if !isErrVersionNotFound(err) && !isErrObjectNotFound(err) {
|
||||
objInfo := ri.ToObjectInfo()
|
||||
sendEvent(eventArgs{
|
||||
EventName: event.ObjectReplicationNotTracked,
|
||||
BucketName: bucket,
|
||||
@@ -1155,7 +1180,8 @@ func (ri ReplicateObjectInfo) replicateObject(ctx context.Context, objectAPI Obj
|
||||
}
|
||||
defer gr.Close()
|
||||
|
||||
objInfo = gr.ObjInfo
|
||||
objInfo := gr.ObjInfo
|
||||
|
||||
// make sure we have the latest metadata for metrics calculation
|
||||
rinfo.PrevReplicationStatus = objInfo.TargetReplicationStatus(tgt.ARN)
|
||||
|
||||
@@ -1217,7 +1243,7 @@ func (ri ReplicateObjectInfo) replicateObject(ctx context.Context, objectAPI Obj
|
||||
|
||||
opts := &bandwidth.MonitorReaderOptions{
|
||||
BucketOptions: bandwidth.BucketOptions{
|
||||
Name: objInfo.Bucket,
|
||||
Name: ri.Bucket,
|
||||
ReplicationARN: tgt.ARN,
|
||||
},
|
||||
HeaderSize: headerSize,
|
||||
@@ -1256,10 +1282,8 @@ func (ri ReplicateObjectInfo) replicateObject(ctx context.Context, objectAPI Obj
|
||||
// The source object is then updated to reflect the replication status.
|
||||
func (ri ReplicateObjectInfo) replicateAll(ctx context.Context, objectAPI ObjectLayer, tgt *TargetClient) (rinfo replicatedTargetInfo) {
|
||||
startTime := time.Now()
|
||||
objInfo := ri.ObjectInfo.Clone()
|
||||
bucket := objInfo.Bucket
|
||||
object := objInfo.Name
|
||||
sz, _ := objInfo.GetActualSize()
|
||||
bucket := ri.Bucket
|
||||
object := ri.Name
|
||||
|
||||
// set defaults for replication action based on operation being performed - actual
|
||||
// replication action can only be determined after stat on remote. This default is
|
||||
@@ -1267,9 +1291,9 @@ func (ri ReplicateObjectInfo) replicateAll(ctx context.Context, objectAPI Object
|
||||
rAction := replicateMetadata
|
||||
|
||||
rinfo = replicatedTargetInfo{
|
||||
Size: sz,
|
||||
Size: ri.ActualSize,
|
||||
Arn: tgt.ARN,
|
||||
PrevReplicationStatus: objInfo.TargetReplicationStatus(tgt.ARN),
|
||||
PrevReplicationStatus: ri.TargetReplicationStatus(tgt.ARN),
|
||||
ReplicationStatus: replication.Failed,
|
||||
OpType: ri.OpType,
|
||||
ReplicationAction: rAction,
|
||||
@@ -1282,7 +1306,7 @@ func (ri ReplicateObjectInfo) replicateAll(ctx context.Context, objectAPI Object
|
||||
sendEvent(eventArgs{
|
||||
EventName: event.ObjectReplicationNotTracked,
|
||||
BucketName: bucket,
|
||||
Object: objInfo,
|
||||
Object: ri.ToObjectInfo(),
|
||||
UserAgent: "Internal: [Replication]",
|
||||
Host: globalLocalNodeName,
|
||||
})
|
||||
@@ -1293,12 +1317,13 @@ func (ri ReplicateObjectInfo) replicateAll(ctx context.Context, objectAPI Object
|
||||
versionSuspended := globalBucketVersioningSys.PrefixSuspended(bucket, object)
|
||||
|
||||
gr, err := objectAPI.GetObjectNInfo(ctx, bucket, object, nil, http.Header{}, ObjectOptions{
|
||||
VersionID: objInfo.VersionID,
|
||||
VersionID: ri.VersionID,
|
||||
Versioned: versioned,
|
||||
VersionSuspended: versionSuspended,
|
||||
})
|
||||
if err != nil {
|
||||
if !isErrVersionNotFound(err) && !isErrObjectNotFound(err) {
|
||||
objInfo := ri.ToObjectInfo()
|
||||
sendEvent(eventArgs{
|
||||
EventName: event.ObjectReplicationNotTracked,
|
||||
BucketName: bucket,
|
||||
@@ -1312,7 +1337,7 @@ func (ri ReplicateObjectInfo) replicateAll(ctx context.Context, objectAPI Object
|
||||
}
|
||||
defer gr.Close()
|
||||
|
||||
objInfo = gr.ObjInfo
|
||||
objInfo := gr.ObjInfo
|
||||
// make sure we have the latest metadata for metrics calculation
|
||||
rinfo.PrevReplicationStatus = objInfo.TargetReplicationStatus(tgt.ARN)
|
||||
|
||||
@@ -1379,7 +1404,9 @@ func (ri ReplicateObjectInfo) replicateAll(ctx context.Context, objectAPI Object
|
||||
}
|
||||
// object with same VersionID already exists, replication kicked off by
|
||||
// PutObject might have completed
|
||||
if objInfo.TargetReplicationStatus(tgt.ARN) == replication.Pending || objInfo.TargetReplicationStatus(tgt.ARN) == replication.Failed || ri.OpType == replication.ExistingObjectReplicationType {
|
||||
if objInfo.TargetReplicationStatus(tgt.ARN) == replication.Pending ||
|
||||
objInfo.TargetReplicationStatus(tgt.ARN) == replication.Failed ||
|
||||
ri.OpType == replication.ExistingObjectReplicationType {
|
||||
// if metadata is not updated for some reason after replication, such as
|
||||
// 503 encountered while updating metadata - make sure to set ReplicationStatus
|
||||
// as Completed.
|
||||
@@ -2233,8 +2260,35 @@ func proxyHeadToReplicationTarget(ctx context.Context, bucket, object string, rs
|
||||
return oi, proxy
|
||||
}
|
||||
|
||||
func scheduleReplication(ctx context.Context, objInfo ObjectInfo, o ObjectLayer, dsc ReplicateDecision, opType replication.Type) {
|
||||
ri := ReplicateObjectInfo{ObjectInfo: objInfo, OpType: opType, Dsc: dsc, EventType: ReplicateIncoming}
|
||||
func scheduleReplication(ctx context.Context, oi ObjectInfo, o ObjectLayer, dsc ReplicateDecision, opType replication.Type) {
|
||||
tgtStatuses := replicationStatusesMap(oi.ReplicationStatusInternal)
|
||||
purgeStatuses := versionPurgeStatusesMap(oi.VersionPurgeStatusInternal)
|
||||
tm, _ := time.Parse(time.RFC3339Nano, oi.UserDefined[ReservedMetadataPrefixLower+ReplicationTimestamp])
|
||||
rstate := oi.ReplicationState()
|
||||
rstate.ReplicateDecisionStr = dsc.String()
|
||||
asz, _ := oi.GetActualSize()
|
||||
|
||||
ri := ReplicateObjectInfo{
|
||||
Name: oi.Name,
|
||||
Size: oi.Size,
|
||||
ActualSize: asz,
|
||||
Bucket: oi.Bucket,
|
||||
VersionID: oi.VersionID,
|
||||
ModTime: oi.ModTime,
|
||||
ReplicationStatus: oi.ReplicationStatus,
|
||||
ReplicationStatusInternal: oi.ReplicationStatusInternal,
|
||||
DeleteMarker: oi.DeleteMarker,
|
||||
VersionPurgeStatusInternal: oi.VersionPurgeStatusInternal,
|
||||
VersionPurgeStatus: oi.VersionPurgeStatus,
|
||||
|
||||
ReplicationState: rstate,
|
||||
OpType: opType,
|
||||
Dsc: dsc,
|
||||
TargetStatuses: tgtStatuses,
|
||||
TargetPurgeStatuses: purgeStatuses,
|
||||
ReplicationTimestamp: tm,
|
||||
}
|
||||
|
||||
if dsc.Synchronous() {
|
||||
replicateObject(ctx, ri, o)
|
||||
} else {
|
||||
@@ -2263,7 +2317,7 @@ func (c replicationConfig) Replicate(opts replication.ObjectOpts) bool {
|
||||
}
|
||||
|
||||
// Resync returns true if replication reset is requested
|
||||
func (c replicationConfig) Resync(ctx context.Context, oi ObjectInfo, dsc *ReplicateDecision, tgtStatuses map[string]replication.StatusType) (r ResyncDecision) {
|
||||
func (c replicationConfig) Resync(ctx context.Context, oi ObjectInfo, dsc ReplicateDecision, tgtStatuses map[string]replication.StatusType) (r ResyncDecision) {
|
||||
if c.Empty() {
|
||||
return
|
||||
}
|
||||
@@ -2272,8 +2326,6 @@ func (c replicationConfig) Resync(ctx context.Context, oi ObjectInfo, dsc *Repli
|
||||
if oi.DeleteMarker {
|
||||
opts := replication.ObjectOpts{
|
||||
Name: oi.Name,
|
||||
SSEC: crypto.SSEC.IsEncrypted(oi.UserDefined),
|
||||
UserTags: oi.UserTags,
|
||||
DeleteMarker: oi.DeleteMarker,
|
||||
VersionID: oi.VersionID,
|
||||
OpType: replication.DeleteReplicationType,
|
||||
@@ -2294,23 +2346,19 @@ func (c replicationConfig) Resync(ctx context.Context, oi ObjectInfo, dsc *Repli
|
||||
}
|
||||
|
||||
// Ignore previous replication status when deciding if object can be re-replicated
|
||||
objInfo := oi.Clone()
|
||||
objInfo.ReplicationStatusInternal = ""
|
||||
objInfo.VersionPurgeStatusInternal = ""
|
||||
objInfo.ReplicationStatus = ""
|
||||
objInfo.VersionPurgeStatus = ""
|
||||
delete(objInfo.UserDefined, xhttp.AmzBucketReplicationStatus)
|
||||
resyncdsc := mustReplicate(ctx, oi.Bucket, oi.Name, getMustReplicateOptions(objInfo, replication.ExistingObjectReplicationType, ObjectOptions{}))
|
||||
dsc = &resyncdsc
|
||||
return c.resync(oi, dsc, tgtStatuses)
|
||||
userDefined := cloneMSS(oi.UserDefined)
|
||||
delete(userDefined, xhttp.AmzBucketReplicationStatus)
|
||||
|
||||
rdsc := mustReplicate(ctx, oi.Bucket, oi.Name, getMustReplicateOptions(userDefined, oi.UserTags, "", replication.ExistingObjectReplicationType, ObjectOptions{}))
|
||||
return c.resync(oi, rdsc, tgtStatuses)
|
||||
}
|
||||
|
||||
// wrapper function for testability. Returns true if a new reset is requested on
|
||||
// already replicated objects OR object qualifies for existing object replication
|
||||
// and no reset requested.
|
||||
func (c replicationConfig) resync(oi ObjectInfo, dsc *ReplicateDecision, tgtStatuses map[string]replication.StatusType) (r ResyncDecision) {
|
||||
func (c replicationConfig) resync(oi ObjectInfo, dsc ReplicateDecision, tgtStatuses map[string]replication.StatusType) (r ResyncDecision) {
|
||||
r = ResyncDecision{
|
||||
targets: make(map[string]ResyncTargetDecision),
|
||||
targets: make(map[string]ResyncTargetDecision, len(dsc.targetsMap)),
|
||||
}
|
||||
if c.remotes == nil {
|
||||
return
|
||||
@@ -2567,7 +2615,7 @@ func (s *replicationResyncer) resyncBucket(ctx context.Context, objectAPI Object
|
||||
ObjectName: roi.Name,
|
||||
DeleteMarkerVersionID: dmVersionID,
|
||||
VersionID: versionID,
|
||||
ReplicationState: roi.getReplicationState(),
|
||||
ReplicationState: roi.ReplicationState,
|
||||
DeleteMarkerMTime: DeleteMarkerMTime{roi.ModTime},
|
||||
DeleteMarker: roi.DeleteMarker,
|
||||
},
|
||||
@@ -3013,7 +3061,7 @@ func queueReplicationHeal(ctx context.Context, bucket string, oi ObjectInfo, rcf
|
||||
ObjectName: roi.Name,
|
||||
DeleteMarkerVersionID: dmVersionID,
|
||||
VersionID: versionID,
|
||||
ReplicationState: roi.getReplicationState(),
|
||||
ReplicationState: roi.ReplicationState,
|
||||
DeleteMarkerMTime: DeleteMarkerMTime{roi.ModTime},
|
||||
DeleteMarker: roi.DeleteMarker,
|
||||
},
|
||||
|
||||
Reference in New Issue
Block a user