mirror of
https://github.com/minio/minio.git
synced 2025-04-19 02:05:24 -04:00
replication: heal proactively upon access (#15501)
Queue failed/pending replication for healing during listing and GET/HEAD API calls. This includes healing of existing objects that were never replicated or those in the middle of a resync operation. This PR also fixes a bug in ListObjectVersions where lifecycle filtering should be done.
This commit is contained in:
parent
a406bb0288
commit
21bf5b4db7
@ -2386,3 +2386,90 @@ func getReplicationDiff(ctx context.Context, objAPI ObjectLayer, bucket string,
|
|||||||
}()
|
}()
|
||||||
return diffCh, nil
|
return diffCh, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// QueueReplicationHeal is a wrapper for queueReplicationHeal
|
||||||
|
func QueueReplicationHeal(ctx context.Context, bucket string, oi ObjectInfo) {
|
||||||
|
// un-versioned case
|
||||||
|
if oi.VersionID == "" {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
rcfg, _, _ := globalBucketMetadataSys.GetReplicationConfig(ctx, bucket)
|
||||||
|
tgts, _ := globalBucketTargetSys.ListBucketTargets(ctx, bucket)
|
||||||
|
queueReplicationHeal(ctx, bucket, oi, replicationConfig{
|
||||||
|
Config: rcfg,
|
||||||
|
remotes: tgts,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// queueReplicationHeal enqueues objects that failed replication OR eligible for resyncing through
|
||||||
|
// an ongoing resync operation or via existing objects replication configuration setting.
|
||||||
|
func queueReplicationHeal(ctx context.Context, bucket string, oi ObjectInfo, rcfg replicationConfig) (roi ReplicateObjectInfo) {
|
||||||
|
// un-versioned case
|
||||||
|
if oi.VersionID == "" {
|
||||||
|
return roi
|
||||||
|
}
|
||||||
|
|
||||||
|
if rcfg.Config == nil || rcfg.remotes == nil {
|
||||||
|
return roi
|
||||||
|
}
|
||||||
|
roi = getHealReplicateObjectInfo(oi, rcfg)
|
||||||
|
if !roi.Dsc.ReplicateAny() {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// early return if replication already done, otherwise we need to determine if this
|
||||||
|
// version is an existing object that needs healing.
|
||||||
|
if oi.ReplicationStatus == replication.Completed && oi.VersionPurgeStatus.Empty() && !roi.ExistingObjResync.mustResync() {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if roi.DeleteMarker || !roi.VersionPurgeStatus.Empty() {
|
||||||
|
versionID := ""
|
||||||
|
dmVersionID := ""
|
||||||
|
if roi.VersionPurgeStatus.Empty() {
|
||||||
|
dmVersionID = roi.VersionID
|
||||||
|
} else {
|
||||||
|
versionID = roi.VersionID
|
||||||
|
}
|
||||||
|
|
||||||
|
dv := DeletedObjectReplicationInfo{
|
||||||
|
DeletedObject: DeletedObject{
|
||||||
|
ObjectName: roi.Name,
|
||||||
|
DeleteMarkerVersionID: dmVersionID,
|
||||||
|
VersionID: versionID,
|
||||||
|
ReplicationState: roi.getReplicationState(roi.Dsc.String(), versionID, true),
|
||||||
|
DeleteMarkerMTime: DeleteMarkerMTime{roi.ModTime},
|
||||||
|
DeleteMarker: roi.DeleteMarker,
|
||||||
|
},
|
||||||
|
Bucket: roi.Bucket,
|
||||||
|
OpType: replication.HealReplicationType,
|
||||||
|
EventType: ReplicateHealDelete,
|
||||||
|
}
|
||||||
|
// heal delete marker replication failure or versioned delete replication failure
|
||||||
|
if roi.ReplicationStatus == replication.Pending ||
|
||||||
|
roi.ReplicationStatus == replication.Failed ||
|
||||||
|
roi.VersionPurgeStatus == Failed || roi.VersionPurgeStatus == Pending {
|
||||||
|
globalReplicationPool.queueReplicaDeleteTask(dv)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// if replication status is Complete on DeleteMarker and existing object resync required
|
||||||
|
if roi.ExistingObjResync.mustResync() && (roi.ReplicationStatus == replication.Completed || roi.ReplicationStatus.Empty()) {
|
||||||
|
queueReplicateDeletesWrapper(dv, roi.ExistingObjResync)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if roi.ExistingObjResync.mustResync() {
|
||||||
|
roi.OpType = replication.ExistingObjectReplicationType
|
||||||
|
}
|
||||||
|
switch roi.ReplicationStatus {
|
||||||
|
case replication.Pending, replication.Failed:
|
||||||
|
roi.EventType = ReplicateHeal
|
||||||
|
globalReplicationPool.queueReplicaTask(roi)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if roi.ExistingObjResync.mustResync() {
|
||||||
|
roi.EventType = ReplicateExisting
|
||||||
|
globalReplicationPool.queueReplicaTask(roi)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
@ -1225,29 +1225,16 @@ func (i *scannerItem) objectPath() string {
|
|||||||
|
|
||||||
// healReplication will heal a scanned item that has failed replication.
|
// healReplication will heal a scanned item that has failed replication.
|
||||||
func (i *scannerItem) healReplication(ctx context.Context, o ObjectLayer, oi ObjectInfo, sizeS *sizeSummary) {
|
func (i *scannerItem) healReplication(ctx context.Context, o ObjectLayer, oi ObjectInfo, sizeS *sizeSummary) {
|
||||||
roi := getHealReplicateObjectInfo(oi, i.replication)
|
if oi.VersionID == "" {
|
||||||
if !roi.Dsc.ReplicateAny() {
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
if i.replication.Config == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
roi := queueReplicationHeal(ctx, oi.Bucket, oi, i.replication)
|
||||||
if oi.DeleteMarker || !oi.VersionPurgeStatus.Empty() {
|
if oi.DeleteMarker || !oi.VersionPurgeStatus.Empty() {
|
||||||
// heal delete marker replication failure or versioned delete replication failure
|
|
||||||
if oi.ReplicationStatus == replication.Pending ||
|
|
||||||
oi.ReplicationStatus == replication.Failed ||
|
|
||||||
oi.VersionPurgeStatus == Failed || oi.VersionPurgeStatus == Pending {
|
|
||||||
i.healReplicationDeletes(ctx, o, roi)
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
// if replication status is Complete on DeleteMarker and existing object resync required
|
|
||||||
if roi.ExistingObjResync.mustResync() && (oi.ReplicationStatus == replication.Completed || oi.ReplicationStatus.Empty()) {
|
|
||||||
i.healReplicationDeletes(ctx, o, roi)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if roi.ExistingObjResync.mustResync() {
|
|
||||||
roi.OpType = replication.ExistingObjectReplicationType
|
|
||||||
}
|
|
||||||
|
|
||||||
if sizeS.replTargetStats == nil && len(roi.TargetStatuses) > 0 {
|
if sizeS.replTargetStats == nil && len(roi.TargetStatuses) > 0 {
|
||||||
sizeS.replTargetStats = make(map[string]replTargetSizeSummary)
|
sizeS.replTargetStats = make(map[string]replTargetSizeSummary)
|
||||||
@ -1277,52 +1264,9 @@ func (i *scannerItem) healReplication(ctx context.Context, o ObjectLayer, oi Obj
|
|||||||
}
|
}
|
||||||
|
|
||||||
switch oi.ReplicationStatus {
|
switch oi.ReplicationStatus {
|
||||||
case replication.Pending, replication.Failed:
|
|
||||||
roi.EventType = ReplicateHeal
|
|
||||||
globalReplicationPool.queueReplicaTask(roi)
|
|
||||||
return
|
|
||||||
case replication.Replica:
|
case replication.Replica:
|
||||||
sizeS.replicaSize += oi.Size
|
sizeS.replicaSize += oi.Size
|
||||||
}
|
}
|
||||||
if roi.ExistingObjResync.mustResync() {
|
|
||||||
roi.EventType = ReplicateExisting
|
|
||||||
globalReplicationPool.queueReplicaTask(roi)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// healReplicationDeletes will heal a scanned deleted item that failed to replicate deletes.
|
|
||||||
func (i *scannerItem) healReplicationDeletes(ctx context.Context, o ObjectLayer, roi ReplicateObjectInfo) {
|
|
||||||
// handle soft delete and permanent delete failures here.
|
|
||||||
if roi.DeleteMarker || !roi.VersionPurgeStatus.Empty() {
|
|
||||||
versionID := ""
|
|
||||||
dmVersionID := ""
|
|
||||||
if roi.VersionPurgeStatus.Empty() {
|
|
||||||
dmVersionID = roi.VersionID
|
|
||||||
} else {
|
|
||||||
versionID = roi.VersionID
|
|
||||||
}
|
|
||||||
|
|
||||||
doi := DeletedObjectReplicationInfo{
|
|
||||||
DeletedObject: DeletedObject{
|
|
||||||
ObjectName: roi.Name,
|
|
||||||
DeleteMarkerVersionID: dmVersionID,
|
|
||||||
VersionID: versionID,
|
|
||||||
ReplicationState: roi.getReplicationState(roi.Dsc.String(), versionID, true),
|
|
||||||
DeleteMarkerMTime: DeleteMarkerMTime{roi.ModTime},
|
|
||||||
DeleteMarker: roi.DeleteMarker,
|
|
||||||
},
|
|
||||||
Bucket: roi.Bucket,
|
|
||||||
OpType: replication.HealReplicationType,
|
|
||||||
EventType: ReplicateHealDelete,
|
|
||||||
}
|
|
||||||
if roi.ExistingObjResync.mustResync() {
|
|
||||||
doi.OpType = replication.ExistingObjectReplicationType
|
|
||||||
doi.EventType = ReplicateExistingDelete
|
|
||||||
queueReplicateDeletesWrapper(doi, roi.ExistingObjResync)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
globalReplicationPool.queueReplicaDeleteTask(doi)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type dynamicSleeper struct {
|
type dynamicSleeper struct {
|
||||||
|
@ -1178,7 +1178,6 @@ func (z *erasureServerPools) ListObjectVersions(ctx context.Context, bucket, pre
|
|||||||
if marker == "" && versionMarker != "" {
|
if marker == "" && versionMarker != "" {
|
||||||
return loi, NotImplemented{}
|
return loi, NotImplemented{}
|
||||||
}
|
}
|
||||||
|
|
||||||
opts := listPathOptions{
|
opts := listPathOptions{
|
||||||
Bucket: bucket,
|
Bucket: bucket,
|
||||||
Prefix: prefix,
|
Prefix: prefix,
|
||||||
@ -1189,6 +1188,8 @@ func (z *erasureServerPools) ListObjectVersions(ctx context.Context, bucket, pre
|
|||||||
AskDisks: globalAPIConfig.getListQuorum(),
|
AskDisks: globalAPIConfig.getListQuorum(),
|
||||||
Versioned: true,
|
Versioned: true,
|
||||||
}
|
}
|
||||||
|
// set bucket metadata in opts
|
||||||
|
opts.setBucketMeta(ctx)
|
||||||
|
|
||||||
merged, err := z.listPath(ctx, &opts)
|
merged, err := z.listPath(ctx, &opts)
|
||||||
if err != nil && err != io.EOF {
|
if err != nil && err != io.EOF {
|
||||||
@ -1235,12 +1236,16 @@ func maxKeysPlusOne(maxKeys int, addOne bool) int {
|
|||||||
|
|
||||||
func (z *erasureServerPools) ListObjects(ctx context.Context, bucket, prefix, marker, delimiter string, maxKeys int) (ListObjectsInfo, error) {
|
func (z *erasureServerPools) ListObjects(ctx context.Context, bucket, prefix, marker, delimiter string, maxKeys int) (ListObjectsInfo, error) {
|
||||||
var loi ListObjectsInfo
|
var loi ListObjectsInfo
|
||||||
|
opts := listPathOptions{
|
||||||
// Automatically remove the object/version is an expiry lifecycle rule can be applied
|
Bucket: bucket,
|
||||||
lc, _ := globalLifecycleSys.Get(bucket)
|
Prefix: prefix,
|
||||||
|
Separator: delimiter,
|
||||||
// Check if bucket is object locked.
|
Limit: maxKeysPlusOne(maxKeys, marker != ""),
|
||||||
rcfg, _ := globalBucketObjectLockSys.Get(bucket)
|
Marker: marker,
|
||||||
|
InclDeleted: false,
|
||||||
|
AskDisks: globalAPIConfig.getListQuorum(),
|
||||||
|
}
|
||||||
|
opts.setBucketMeta(ctx)
|
||||||
|
|
||||||
if len(prefix) > 0 && maxKeys == 1 && delimiter == "" && marker == "" {
|
if len(prefix) > 0 && maxKeys == 1 && delimiter == "" && marker == "" {
|
||||||
// Optimization for certain applications like
|
// Optimization for certain applications like
|
||||||
@ -1252,8 +1257,8 @@ func (z *erasureServerPools) ListObjects(ctx context.Context, bucket, prefix, ma
|
|||||||
// to avoid the need for ListObjects().
|
// to avoid the need for ListObjects().
|
||||||
objInfo, err := z.GetObjectInfo(ctx, bucket, prefix, ObjectOptions{NoLock: true})
|
objInfo, err := z.GetObjectInfo(ctx, bucket, prefix, ObjectOptions{NoLock: true})
|
||||||
if err == nil {
|
if err == nil {
|
||||||
if lc != nil {
|
if opts.Lifecycle != nil {
|
||||||
action := evalActionFromLifecycle(ctx, *lc, rcfg, objInfo, false)
|
action := evalActionFromLifecycle(ctx, *opts.Lifecycle, opts.Retention, objInfo, false)
|
||||||
switch action {
|
switch action {
|
||||||
case lifecycle.DeleteVersionAction, lifecycle.DeleteAction:
|
case lifecycle.DeleteVersionAction, lifecycle.DeleteAction:
|
||||||
fallthrough
|
fallthrough
|
||||||
@ -1266,18 +1271,6 @@ func (z *erasureServerPools) ListObjects(ctx context.Context, bucket, prefix, ma
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
opts := listPathOptions{
|
|
||||||
Bucket: bucket,
|
|
||||||
Prefix: prefix,
|
|
||||||
Separator: delimiter,
|
|
||||||
Limit: maxKeysPlusOne(maxKeys, marker != ""),
|
|
||||||
Marker: marker,
|
|
||||||
InclDeleted: false,
|
|
||||||
AskDisks: globalAPIConfig.getListQuorum(),
|
|
||||||
Lifecycle: lc,
|
|
||||||
Retention: rcfg,
|
|
||||||
}
|
|
||||||
|
|
||||||
merged, err := z.listPath(ctx, &opts)
|
merged, err := z.listPath(ctx, &opts)
|
||||||
if err != nil && err != io.EOF {
|
if err != nil && err != io.EOF {
|
||||||
if !isErrBucketNotFound(err) {
|
if !isErrBucketNotFound(err) {
|
||||||
|
@ -2859,11 +2859,16 @@ func (es *erasureSingle) AbortMultipartUpload(ctx context.Context, bucket, objec
|
|||||||
func (es *erasureSingle) ListObjects(ctx context.Context, bucket, prefix, marker, delimiter string, maxKeys int) (ListObjectsInfo, error) {
|
func (es *erasureSingle) ListObjects(ctx context.Context, bucket, prefix, marker, delimiter string, maxKeys int) (ListObjectsInfo, error) {
|
||||||
var loi ListObjectsInfo
|
var loi ListObjectsInfo
|
||||||
|
|
||||||
// Automatically remove the object/version is an expiry lifecycle rule can be applied
|
opts := listPathOptions{
|
||||||
lc, _ := globalLifecycleSys.Get(bucket)
|
Bucket: bucket,
|
||||||
|
Prefix: prefix,
|
||||||
// Check if bucket is object locked.
|
Separator: delimiter,
|
||||||
rcfg, _ := globalBucketObjectLockSys.Get(bucket)
|
Limit: maxKeysPlusOne(maxKeys, marker != ""),
|
||||||
|
Marker: marker,
|
||||||
|
InclDeleted: false,
|
||||||
|
AskDisks: globalAPIConfig.getListQuorum(),
|
||||||
|
}
|
||||||
|
opts.setBucketMeta(ctx)
|
||||||
|
|
||||||
if len(prefix) > 0 && maxKeys == 1 && delimiter == "" && marker == "" {
|
if len(prefix) > 0 && maxKeys == 1 && delimiter == "" && marker == "" {
|
||||||
// Optimization for certain applications like
|
// Optimization for certain applications like
|
||||||
@ -2875,8 +2880,8 @@ func (es *erasureSingle) ListObjects(ctx context.Context, bucket, prefix, marker
|
|||||||
// to avoid the need for ListObjects().
|
// to avoid the need for ListObjects().
|
||||||
objInfo, err := es.GetObjectInfo(ctx, bucket, prefix, ObjectOptions{NoLock: true})
|
objInfo, err := es.GetObjectInfo(ctx, bucket, prefix, ObjectOptions{NoLock: true})
|
||||||
if err == nil {
|
if err == nil {
|
||||||
if lc != nil {
|
if opts.Lifecycle != nil {
|
||||||
action := evalActionFromLifecycle(ctx, *lc, rcfg, objInfo, false)
|
action := evalActionFromLifecycle(ctx, *opts.Lifecycle, opts.Retention, objInfo, false)
|
||||||
switch action {
|
switch action {
|
||||||
case lifecycle.DeleteVersionAction, lifecycle.DeleteAction:
|
case lifecycle.DeleteVersionAction, lifecycle.DeleteAction:
|
||||||
fallthrough
|
fallthrough
|
||||||
@ -2889,18 +2894,6 @@ func (es *erasureSingle) ListObjects(ctx context.Context, bucket, prefix, marker
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
opts := listPathOptions{
|
|
||||||
Bucket: bucket,
|
|
||||||
Prefix: prefix,
|
|
||||||
Separator: delimiter,
|
|
||||||
Limit: maxKeysPlusOne(maxKeys, marker != ""),
|
|
||||||
Marker: marker,
|
|
||||||
InclDeleted: false,
|
|
||||||
AskDisks: globalAPIConfig.getListQuorum(),
|
|
||||||
Lifecycle: lc,
|
|
||||||
Retention: rcfg,
|
|
||||||
}
|
|
||||||
|
|
||||||
merged, err := es.listPath(ctx, &opts)
|
merged, err := es.listPath(ctx, &opts)
|
||||||
if err != nil && err != io.EOF {
|
if err != nil && err != io.EOF {
|
||||||
if !isErrBucketNotFound(err) {
|
if !isErrBucketNotFound(err) {
|
||||||
@ -2959,7 +2952,6 @@ func (es *erasureSingle) ListObjectVersions(ctx context.Context, bucket, prefix,
|
|||||||
if marker == "" && versionMarker != "" {
|
if marker == "" && versionMarker != "" {
|
||||||
return loi, NotImplemented{}
|
return loi, NotImplemented{}
|
||||||
}
|
}
|
||||||
|
|
||||||
opts := listPathOptions{
|
opts := listPathOptions{
|
||||||
Bucket: bucket,
|
Bucket: bucket,
|
||||||
Prefix: prefix,
|
Prefix: prefix,
|
||||||
@ -2970,6 +2962,7 @@ func (es *erasureSingle) ListObjectVersions(ctx context.Context, bucket, prefix,
|
|||||||
AskDisks: "strict",
|
AskDisks: "strict",
|
||||||
Versioned: true,
|
Versioned: true,
|
||||||
}
|
}
|
||||||
|
opts.setBucketMeta(ctx)
|
||||||
|
|
||||||
merged, err := es.listPath(ctx, &opts)
|
merged, err := es.listPath(ctx, &opts)
|
||||||
if err != nil && err != io.EOF {
|
if err != nil && err != io.EOF {
|
||||||
|
@ -29,7 +29,6 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/minio/minio/internal/bucket/lifecycle"
|
"github.com/minio/minio/internal/bucket/lifecycle"
|
||||||
"github.com/minio/minio/internal/bucket/object/lock"
|
|
||||||
"github.com/minio/minio/internal/logger"
|
"github.com/minio/minio/internal/logger"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -486,9 +485,9 @@ func (es *erasureSingle) listMerged(ctx context.Context, o listPathOptions, resu
|
|||||||
mu.Unlock()
|
mu.Unlock()
|
||||||
|
|
||||||
// Do lifecycle filtering.
|
// Do lifecycle filtering.
|
||||||
if o.Lifecycle != nil {
|
if o.Lifecycle != nil || o.Replication.Config != nil {
|
||||||
filterIn := make(chan metaCacheEntry, 10)
|
filterIn := make(chan metaCacheEntry, 10)
|
||||||
go filterLifeCycle(ctx, o.Bucket, *o.Lifecycle, o.Retention, filterIn, results)
|
go applyBucketActions(ctx, o, filterIn, results)
|
||||||
// Replace results.
|
// Replace results.
|
||||||
results = filterIn
|
results = filterIn
|
||||||
}
|
}
|
||||||
@ -572,9 +571,9 @@ func (z *erasureServerPools) listMerged(ctx context.Context, o listPathOptions,
|
|||||||
mu.Unlock()
|
mu.Unlock()
|
||||||
|
|
||||||
// Do lifecycle filtering.
|
// Do lifecycle filtering.
|
||||||
if o.Lifecycle != nil {
|
if o.Lifecycle != nil || o.Replication.Config != nil {
|
||||||
filterIn := make(chan metaCacheEntry, 10)
|
filterIn := make(chan metaCacheEntry, 10)
|
||||||
go filterLifeCycle(ctx, o.Bucket, *o.Lifecycle, o.Retention, filterIn, results)
|
go applyBucketActions(ctx, o, filterIn, results)
|
||||||
// Replace results.
|
// Replace results.
|
||||||
results = filterIn
|
results = filterIn
|
||||||
}
|
}
|
||||||
@ -635,15 +634,16 @@ func (z *erasureServerPools) listMerged(ctx context.Context, o listPathOptions,
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// filterLifeCycle will filter out objects if the most recent
|
// applyBucketActions applies lifecycle and replication actions on the listing
|
||||||
// version should be deleted by lifecycle.
|
// It will filter out objects if the most recent version should be deleted by lifecycle.
|
||||||
|
// Entries that failed replication will be queued if no lifecycle rules got applied.
|
||||||
// out will be closed when there are no more results.
|
// out will be closed when there are no more results.
|
||||||
// When 'in' is closed or the context is canceled the
|
// When 'in' is closed or the context is canceled the
|
||||||
// function closes 'out' and exits.
|
// function closes 'out' and exits.
|
||||||
func filterLifeCycle(ctx context.Context, bucket string, lc lifecycle.Lifecycle, lr lock.Retention, in <-chan metaCacheEntry, out chan<- metaCacheEntry) {
|
func applyBucketActions(ctx context.Context, o listPathOptions, in <-chan metaCacheEntry, out chan<- metaCacheEntry) {
|
||||||
defer close(out)
|
defer close(out)
|
||||||
|
|
||||||
vcfg, _ := globalBucketVersioningSys.Get(bucket)
|
vcfg, _ := globalBucketVersioningSys.Get(o.Bucket)
|
||||||
for {
|
for {
|
||||||
var obj metaCacheEntry
|
var obj metaCacheEntry
|
||||||
var ok bool
|
var ok bool
|
||||||
@ -656,15 +656,16 @@ func filterLifeCycle(ctx context.Context, bucket string, lc lifecycle.Lifecycle,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fi, err := obj.fileInfo(bucket)
|
fi, err := obj.fileInfo(o.Bucket)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
versioned := vcfg != nil && vcfg.Versioned(obj.name)
|
versioned := vcfg != nil && vcfg.Versioned(obj.name)
|
||||||
|
|
||||||
objInfo := fi.ToObjectInfo(bucket, obj.name, versioned)
|
objInfo := fi.ToObjectInfo(o.Bucket, obj.name, versioned)
|
||||||
action := evalActionFromLifecycle(ctx, lc, lr, objInfo, false)
|
if o.Lifecycle != nil {
|
||||||
|
action := evalActionFromLifecycle(ctx, *o.Lifecycle, o.Retention, objInfo, false)
|
||||||
switch action {
|
switch action {
|
||||||
case lifecycle.DeleteVersionAction, lifecycle.DeleteAction:
|
case lifecycle.DeleteVersionAction, lifecycle.DeleteAction:
|
||||||
globalExpiryState.enqueueByDays(objInfo, false, action == lifecycle.DeleteVersionAction)
|
globalExpiryState.enqueueByDays(objInfo, false, action == lifecycle.DeleteVersionAction)
|
||||||
@ -675,10 +676,12 @@ func filterLifeCycle(ctx context.Context, bucket string, lc lifecycle.Lifecycle,
|
|||||||
// Skip this entry.
|
// Skip this entry.
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
}
|
||||||
select {
|
select {
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
return
|
return
|
||||||
case out <- obj:
|
case out <- obj:
|
||||||
|
queueReplicationHeal(ctx, o.Bucket, objInfo, o.Replication)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -102,6 +102,8 @@ type listPathOptions struct {
|
|||||||
// Retention configuration, needed to be passed along with lifecycle if set.
|
// Retention configuration, needed to be passed along with lifecycle if set.
|
||||||
Retention lock.Retention
|
Retention lock.Retention
|
||||||
|
|
||||||
|
// Replication configuration
|
||||||
|
Replication replicationConfig
|
||||||
// pool and set of where the cache is located.
|
// pool and set of where the cache is located.
|
||||||
pool, set int
|
pool, set int
|
||||||
}
|
}
|
||||||
@ -110,6 +112,21 @@ func init() {
|
|||||||
gob.Register(listPathOptions{})
|
gob.Register(listPathOptions{})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (o *listPathOptions) setBucketMeta(ctx context.Context) {
|
||||||
|
lc, _ := globalLifecycleSys.Get(o.Bucket)
|
||||||
|
|
||||||
|
// Check if bucket is object locked.
|
||||||
|
rcfg, _ := globalBucketObjectLockSys.Get(o.Bucket)
|
||||||
|
replCfg, _, _ := globalBucketMetadataSys.GetReplicationConfig(ctx, o.Bucket)
|
||||||
|
tgts, _ := globalBucketTargetSys.ListBucketTargets(ctx, o.Bucket)
|
||||||
|
o.Lifecycle = lc
|
||||||
|
o.Replication = replicationConfig{
|
||||||
|
Config: replCfg,
|
||||||
|
remotes: tgts,
|
||||||
|
}
|
||||||
|
o.Retention = rcfg
|
||||||
|
}
|
||||||
|
|
||||||
// newMetacache constructs a new metacache from the options.
|
// newMetacache constructs a new metacache from the options.
|
||||||
func (o listPathOptions) newMetacache() metacache {
|
func (o listPathOptions) newMetacache() metacache {
|
||||||
return metacache{
|
return metacache{
|
||||||
|
@ -417,12 +417,11 @@ func (api objectAPIHandlers) getObjectHandler(ctx context.Context, objectAPI Obj
|
|||||||
|
|
||||||
return checkPreconditions(ctx, w, r, oi, opts)
|
return checkPreconditions(ctx, w, r, oi, opts)
|
||||||
}
|
}
|
||||||
|
var proxy proxyResult
|
||||||
gr, err := getObjectNInfo(ctx, bucket, object, rs, r.Header, readLock, opts)
|
gr, err := getObjectNInfo(ctx, bucket, object, rs, r.Header, readLock, opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
var (
|
var (
|
||||||
reader *GetObjectReader
|
reader *GetObjectReader
|
||||||
proxy proxyResult
|
|
||||||
perr error
|
perr error
|
||||||
)
|
)
|
||||||
proxytgts := getProxyTargets(ctx, bucket, object, opts)
|
proxytgts := getProxyTargets(ctx, bucket, object, opts)
|
||||||
@ -491,6 +490,10 @@ func (api objectAPIHandlers) getObjectHandler(ctx context.Context, objectAPI Obj
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Queue failed/pending replication automatically
|
||||||
|
if !proxy.Proxy {
|
||||||
|
QueueReplicationHeal(ctx, bucket, objInfo)
|
||||||
|
}
|
||||||
// filter object lock metadata if permission does not permit
|
// filter object lock metadata if permission does not permit
|
||||||
getRetPerms := checkRequestAuthType(ctx, r, policy.GetObjectRetentionAction, bucket, object)
|
getRetPerms := checkRequestAuthType(ctx, r, policy.GetObjectRetentionAction, bucket, object)
|
||||||
legalHoldPerms := checkRequestAuthType(ctx, r, policy.GetObjectLegalHoldAction, bucket, object)
|
legalHoldPerms := checkRequestAuthType(ctx, r, policy.GetObjectLegalHoldAction, bucket, object)
|
||||||
@ -659,11 +662,9 @@ func (api objectAPIHandlers) headObjectHandler(ctx context.Context, objectAPI Ob
|
|||||||
rangeHeader := r.Header.Get(xhttp.Range)
|
rangeHeader := r.Header.Get(xhttp.Range)
|
||||||
|
|
||||||
objInfo, err := getObjectInfo(ctx, bucket, object, opts)
|
objInfo, err := getObjectInfo(ctx, bucket, object, opts)
|
||||||
|
var proxy proxyResult
|
||||||
if err != nil {
|
if err != nil {
|
||||||
var (
|
var oi ObjectInfo
|
||||||
proxy proxyResult
|
|
||||||
oi ObjectInfo
|
|
||||||
)
|
|
||||||
// proxy HEAD to replication target if active-active replication configured on bucket
|
// proxy HEAD to replication target if active-active replication configured on bucket
|
||||||
proxytgts := getProxyTargets(ctx, bucket, object, opts)
|
proxytgts := getProxyTargets(ctx, bucket, object, opts)
|
||||||
if !proxytgts.Empty() {
|
if !proxytgts.Empty() {
|
||||||
@ -690,6 +691,7 @@ func (api objectAPIHandlers) headObjectHandler(ctx context.Context, objectAPI Ob
|
|||||||
w.Header()[xhttp.AmzDeleteMarker] = []string{strconv.FormatBool(objInfo.DeleteMarker)}
|
w.Header()[xhttp.AmzDeleteMarker] = []string{strconv.FormatBool(objInfo.DeleteMarker)}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
QueueReplicationHeal(ctx, bucket, objInfo)
|
||||||
writeErrorResponseHeadersOnly(w, toAPIError(ctx, err))
|
writeErrorResponseHeadersOnly(w, toAPIError(ctx, err))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -714,6 +716,11 @@ func (api objectAPIHandlers) headObjectHandler(ctx context.Context, objectAPI Ob
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Queue failed/pending replication automatically
|
||||||
|
if !proxy.Proxy {
|
||||||
|
QueueReplicationHeal(ctx, bucket, objInfo)
|
||||||
|
}
|
||||||
|
|
||||||
// filter object lock metadata if permission does not permit
|
// filter object lock metadata if permission does not permit
|
||||||
getRetPerms := checkRequestAuthType(ctx, r, policy.GetObjectRetentionAction, bucket, object)
|
getRetPerms := checkRequestAuthType(ctx, r, policy.GetObjectRetentionAction, bucket, object)
|
||||||
legalHoldPerms := checkRequestAuthType(ctx, r, policy.GetObjectLegalHoldAction, bucket, object)
|
legalHoldPerms := checkRequestAuthType(ctx, r, policy.GetObjectLegalHoldAction, bucket, object)
|
||||||
|
Loading…
x
Reference in New Issue
Block a user