From e909be6380c412955c006ddba7b6e1eeb956046e Mon Sep 17 00:00:00 2001 From: Poorna Date: Sat, 19 Jul 2025 01:38:46 -0700 Subject: [PATCH] send replication requests to correct pool (#1162) Fixes incorrect application of ilm expiry rules on versioned objects when replication is enabled. Regression from https://github.com/minio/minio/pull/20441 which sends DeleteObject calls to all pools. This is a problem for replication + ilm scenario since replicated version can end up in a pool by itself instead of pool where remaining object versions reside. For example, if the delete marker is set on pool1 and object versions exist on pool2, the second rule below will cause the delete marker to be expired by ilm policy since it is the single version present in pool1 ``` { "Rules": [ { "ID": "cs6il1ri2hp48g71mdjg", "NoncurrentVersionExpiration": { "NoncurrentDays": 14 }, "Status": "Enabled" }, { "Expiration": { "ExpiredObjectDeleteMarker": true }, "ID": "cs6inj3i2hp4po19cil0", "Status": "Enabled" } ] } ``` --- cmd/erasure-server-pool.go | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/cmd/erasure-server-pool.go b/cmd/erasure-server-pool.go index 4a3f095d9..d13539e06 100644 --- a/cmd/erasure-server-pool.go +++ b/cmd/erasure-server-pool.go @@ -1185,6 +1185,13 @@ func (z *erasureServerPools) DeleteObject(ctx context.Context, bucket string, ob return z.deleteObjectFromAllPools(ctx, bucket, object, opts, noReadQuorumPools) } + // All replication requests needs to go to pool with the object. + if opts.ReplicationRequest { + objInfo, err = z.serverPools[pinfo.Index].DeleteObject(ctx, bucket, object, opts) + objInfo.Name = decodeDirObject(object) + return objInfo, err + } + for _, pool := range z.serverPools { objInfo, err := pool.DeleteObject(ctx, bucket, object, opts) if err != nil && !isErrObjectNotFound(err) && !isErrVersionNotFound(err) {