mirror of
https://github.com/minio/minio.git
synced 2024-12-24 22:25:54 -05:00
fix: decommission delete markers for non-current objects (#15225)
versioned buckets were not creating the delete markers present in the versioned stack of an object, this essentially would stop decommission to succeed. This PR fixes creating such delete markers properly during a decommissioning process, adds tests as well.
This commit is contained in:
parent
39b3941892
commit
9d80ff5a05
@ -689,6 +689,7 @@ func (z *erasureServerPools) decommissionPool(ctx context.Context, idx int, pool
|
||||
VersionID: version.VersionID,
|
||||
MTime: version.ModTime,
|
||||
DeleteReplication: version.ReplicationState,
|
||||
DeleteMarker: true, // make sure we create a delete marker
|
||||
})
|
||||
var failure bool
|
||||
if err != nil {
|
||||
@ -698,10 +699,10 @@ func (z *erasureServerPools) decommissionPool(ctx context.Context, idx int, pool
|
||||
z.poolMetaMutex.Lock()
|
||||
z.poolMeta.CountItem(idx, 0, failure)
|
||||
z.poolMetaMutex.Unlock()
|
||||
if failure {
|
||||
break // break out on first error
|
||||
if !failure {
|
||||
// Success keep a count.
|
||||
decommissionedCount++
|
||||
}
|
||||
decommissionedCount++
|
||||
continue
|
||||
}
|
||||
|
||||
|
@ -119,7 +119,7 @@ func newErasureServerPools(ctx context.Context, endpointServerPools EndpointServ
|
||||
}
|
||||
|
||||
if deploymentID == "" {
|
||||
// all zones should have same deployment ID
|
||||
// all pools should have same deployment ID
|
||||
deploymentID = formats[i].ID
|
||||
}
|
||||
|
||||
@ -457,7 +457,7 @@ func (z *erasureServerPools) getPoolIdxExistingWithOpts(ctx context.Context, buc
|
||||
// If the object exists, but the latest version is a delete marker, the index with it is still returned.
|
||||
// If the object does not exist ObjectNotFound error is returned.
|
||||
// If any other error is found, it is returned.
|
||||
// The check is skipped if there is only one zone, and 0, nil is always returned in that case.
|
||||
// The check is skipped if there is only one pool, and 0, nil is always returned in that case.
|
||||
func (z *erasureServerPools) getPoolIdxExistingNoLock(ctx context.Context, bucket, object string) (idx int, err error) {
|
||||
return z.getPoolIdxExistingWithOpts(ctx, bucket, object, ObjectOptions{
|
||||
NoLock: true,
|
||||
@ -881,7 +881,7 @@ func (z *erasureServerPools) getLatestObjectInfoWithIdx(ctx context.Context, buc
|
||||
sort.Slice(results, func(i, j int) bool {
|
||||
a, b := results[i], results[j]
|
||||
if a.oi.ModTime.Equal(b.oi.ModTime) {
|
||||
// On tiebreak, select the lowest zone index.
|
||||
// On tiebreak, select the lowest pool index.
|
||||
return a.zIdx < b.zIdx
|
||||
}
|
||||
return a.oi.ModTime.After(b.oi.ModTime)
|
||||
@ -975,12 +975,12 @@ func (z *erasureServerPools) PutObject(ctx context.Context, bucket string, objec
|
||||
}
|
||||
|
||||
func (z *erasureServerPools) deletePrefix(ctx context.Context, bucket string, prefix string) error {
|
||||
for idx, zone := range z.serverPools {
|
||||
for idx, pool := range z.serverPools {
|
||||
if z.IsSuspended(idx) {
|
||||
logger.LogIf(ctx, fmt.Errorf("pool %d is suspended, all writes are suspended", idx+1))
|
||||
continue
|
||||
}
|
||||
_, err := zone.DeleteObject(ctx, bucket, prefix, ObjectOptions{DeletePrefix: true})
|
||||
_, err := pool.DeleteObject(ctx, bucket, prefix, ObjectOptions{DeletePrefix: true})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -28,13 +28,20 @@ export MC_HOST_myminio="http://minioadmin:minioadmin@localhost:9000/"
|
||||
./mc admin policy set myminio/ lake,rw user=minio12345
|
||||
|
||||
./mc mb -l myminio/versioned
|
||||
|
||||
./mc mirror internal myminio/versioned/ --quiet >/dev/null
|
||||
|
||||
## Soft delete (creates delete markers)
|
||||
./mc rm -r --force myminio/versioned >/dev/null
|
||||
|
||||
## mirror again to create another set of version on top
|
||||
./mc mirror internal myminio/versioned/ --quiet >/dev/null
|
||||
|
||||
user_count=$(./mc admin user list myminio/ | wc -l)
|
||||
policy_count=$(./mc admin policy list myminio/ | wc -l)
|
||||
|
||||
kill $pid
|
||||
(minio server /tmp/xl/{1...10}/disk{0...1} /tmp/xl/{11...30}/disk{0...3} 2>&1 >/dev/null) &
|
||||
(minio server /tmp/xl/{1...10}/disk{0...1} /tmp/xl/{11...30}/disk{0...3} 2>&1 >/tmp/expanded.log) &
|
||||
pid=$!
|
||||
|
||||
sleep 2
|
||||
@ -60,7 +67,9 @@ if [ $ret -ne 0 ]; then
|
||||
fi
|
||||
|
||||
./mc mirror cmd myminio/versioned/ --quiet >/dev/null
|
||||
|
||||
./mc ls -r myminio/versioned/ > expanded_ns.txt
|
||||
./mc ls -r --versions myminio/versioned/ > expanded_ns_versions.txt
|
||||
|
||||
./mc admin decom start myminio/ /tmp/xl/{1...10}/disk{0...1}
|
||||
|
||||
@ -98,6 +107,7 @@ if [ $ret -ne 0 ]; then
|
||||
fi
|
||||
|
||||
./mc ls -r myminio/versioned > decommissioned_ns.txt
|
||||
./mc ls -r --versions myminio/versioned > decommissioned_ns_versions.txt
|
||||
|
||||
out=$(diff -qpruN expanded_ns.txt decommissioned_ns.txt)
|
||||
ret=$?
|
||||
@ -105,4 +115,10 @@ if [ $ret -ne 0 ]; then
|
||||
echo "BUG: expected no missing entries after decommission: $out"
|
||||
fi
|
||||
|
||||
out=$(diff -qpruN expanded_ns_versions.txt decommissioned_ns_versions.txt)
|
||||
ret=$?
|
||||
if [ $ret -ne 0 ]; then
|
||||
echo "BUG: expected no missing entries after decommission: $out"
|
||||
fi
|
||||
|
||||
kill $pid
|
||||
|
Loading…
Reference in New Issue
Block a user