mirror of
https://github.com/minio/minio.git
synced 2025-11-07 04:42:56 -05:00
fix: use the right channel to feed the data in (#18605)
this PR fixes a regression in batch replication where we weren't sending any data from the Walk() results due to incorrect channels being used.
This commit is contained in:
@@ -1007,12 +1007,9 @@ func (r *BatchJobReplicateV1) Start(ctx context.Context, api ObjectLayer, job Ba
|
||||
// None of the provided metadata filters match skip the object.
|
||||
return false
|
||||
}
|
||||
// if one of source or target is non MinIO, just replicate the top most version like `mc mirror`
|
||||
if (r.Target.Type == BatchJobReplicateResourceS3 || r.Source.Type == BatchJobReplicateResourceS3) && !info.IsLatest {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
// if one of source or target is non MinIO, just replicate the top most version like `mc mirror`
|
||||
return !((r.Target.Type == BatchJobReplicateResourceS3 || r.Source.Type == BatchJobReplicateResourceS3) && !info.IsLatest)
|
||||
}
|
||||
|
||||
u, err := url.Parse(r.Target.Endpoint)
|
||||
@@ -1123,8 +1120,7 @@ func (r *BatchJobReplicateV1) Start(ctx context.Context, api ObjectLayer, job Ba
|
||||
// one of source/target is s3, skip delete marker and all versions under the same object name.
|
||||
s3Type := r.Target.Type == BatchJobReplicateResourceS3 || r.Source.Type == BatchJobReplicateResourceS3
|
||||
|
||||
results := make(chan ObjectInfo, 100)
|
||||
if err := api.Walk(ctx, r.Source.Bucket, r.Source.Prefix, results, WalkOptions{
|
||||
if err := api.Walk(ctx, r.Source.Bucket, r.Source.Prefix, walkCh, WalkOptions{
|
||||
Marker: lastObject,
|
||||
Filter: selectObj,
|
||||
AskDisks: walkQuorum,
|
||||
|
||||
@@ -337,13 +337,13 @@ type SMA struct {
|
||||
filledBuf bool
|
||||
}
|
||||
|
||||
func newSMA(len int) *SMA {
|
||||
if len <= 0 {
|
||||
len = defaultWindowSize
|
||||
func newSMA(ln int) *SMA {
|
||||
if ln <= 0 {
|
||||
ln = defaultWindowSize
|
||||
}
|
||||
return &SMA{
|
||||
buf: make([]float64, len),
|
||||
window: len,
|
||||
buf: make([]float64, ln),
|
||||
window: ln,
|
||||
idx: 0,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -373,15 +373,14 @@ func checkReplicateDelete(ctx context.Context, bucket string, dobj ObjectToDelet
|
||||
if oi.DeleteMarker && (validReplStatus || replicate) {
|
||||
dsc.Set(newReplicateTargetDecision(tgtArn, replicate, sync))
|
||||
continue
|
||||
} else {
|
||||
// can be the case that other cluster is down and duplicate `mc rm --vid`
|
||||
// is issued - this still needs to be replicated back to the other target
|
||||
if !oi.VersionPurgeStatus.Empty() {
|
||||
replicate = oi.VersionPurgeStatus == Pending || oi.VersionPurgeStatus == Failed
|
||||
dsc.Set(newReplicateTargetDecision(tgtArn, replicate, sync))
|
||||
}
|
||||
continue
|
||||
}
|
||||
// can be the case that other cluster is down and duplicate `mc rm --vid`
|
||||
// is issued - this still needs to be replicated back to the other target
|
||||
if !oi.VersionPurgeStatus.Empty() {
|
||||
replicate = oi.VersionPurgeStatus == Pending || oi.VersionPurgeStatus == Failed
|
||||
dsc.Set(newReplicateTargetDecision(tgtArn, replicate, sync))
|
||||
}
|
||||
continue
|
||||
}
|
||||
tgt := globalBucketTargetSys.GetRemoteTargetClient(bucket, tgtArn)
|
||||
// the target online status should not be used here while deciding
|
||||
|
||||
@@ -581,12 +581,12 @@ func (f *folderScanner) scanFolder(ctx context.Context, folder cachedFolder, int
|
||||
foundAny = true
|
||||
break
|
||||
}
|
||||
if next := f.updateCache.searchParent(parent); next == nil {
|
||||
next := f.updateCache.searchParent(parent)
|
||||
if next == nil {
|
||||
foundAny = true
|
||||
break
|
||||
} else {
|
||||
parent = *next
|
||||
}
|
||||
parent = *next
|
||||
}
|
||||
if !foundAny {
|
||||
// Add non-compacted empty entry.
|
||||
|
||||
@@ -72,15 +72,15 @@ func writeDataBlocks(ctx context.Context, dst io.Writer, enBlocks [][]byte, data
|
||||
// Decrement offset.
|
||||
offset -= int64(len(block))
|
||||
continue
|
||||
} else {
|
||||
// Skip until offset.
|
||||
block = block[offset:]
|
||||
|
||||
// Reset the offset for next iteration to read everything
|
||||
// from subsequent blocks.
|
||||
offset = 0
|
||||
}
|
||||
|
||||
// Skip until offset.
|
||||
block = block[offset:]
|
||||
|
||||
// Reset the offset for next iteration to read everything
|
||||
// from subsequent blocks.
|
||||
offset = 0
|
||||
|
||||
// We have written all the blocks, write the last remaining block.
|
||||
if write < int64(len(block)) {
|
||||
n, err := dst.Write(block[:write])
|
||||
|
||||
@@ -148,12 +148,11 @@ func (config *TierConfigMgr) Remove(ctx context.Context, tier string) error {
|
||||
return err
|
||||
} else if inuse {
|
||||
return errTierBackendNotEmpty
|
||||
} else {
|
||||
config.Lock()
|
||||
delete(config.Tiers, tier)
|
||||
delete(config.drivercache, tier)
|
||||
config.Unlock()
|
||||
}
|
||||
config.Lock()
|
||||
delete(config.Tiers, tier)
|
||||
delete(config.drivercache, tier)
|
||||
config.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user