mirror of
https://github.com/minio/minio.git
synced 2024-12-24 22:25:54 -05:00
fix: decommission delete markers for non-current objects (#15225)
versioned buckets were not creating the delete markers present in the versioned stack of an object, this essentially would stop decommission to succeed. This PR fixes creating such delete markers properly during a decommissioning process, adds tests as well.
This commit is contained in:
parent
39b3941892
commit
9d80ff5a05
@ -689,6 +689,7 @@ func (z *erasureServerPools) decommissionPool(ctx context.Context, idx int, pool
|
|||||||
VersionID: version.VersionID,
|
VersionID: version.VersionID,
|
||||||
MTime: version.ModTime,
|
MTime: version.ModTime,
|
||||||
DeleteReplication: version.ReplicationState,
|
DeleteReplication: version.ReplicationState,
|
||||||
|
DeleteMarker: true, // make sure we create a delete marker
|
||||||
})
|
})
|
||||||
var failure bool
|
var failure bool
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -698,10 +699,10 @@ func (z *erasureServerPools) decommissionPool(ctx context.Context, idx int, pool
|
|||||||
z.poolMetaMutex.Lock()
|
z.poolMetaMutex.Lock()
|
||||||
z.poolMeta.CountItem(idx, 0, failure)
|
z.poolMeta.CountItem(idx, 0, failure)
|
||||||
z.poolMetaMutex.Unlock()
|
z.poolMetaMutex.Unlock()
|
||||||
if failure {
|
if !failure {
|
||||||
break // break out on first error
|
// Success keep a count.
|
||||||
|
decommissionedCount++
|
||||||
}
|
}
|
||||||
decommissionedCount++
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -119,7 +119,7 @@ func newErasureServerPools(ctx context.Context, endpointServerPools EndpointServ
|
|||||||
}
|
}
|
||||||
|
|
||||||
if deploymentID == "" {
|
if deploymentID == "" {
|
||||||
// all zones should have same deployment ID
|
// all pools should have same deployment ID
|
||||||
deploymentID = formats[i].ID
|
deploymentID = formats[i].ID
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -457,7 +457,7 @@ func (z *erasureServerPools) getPoolIdxExistingWithOpts(ctx context.Context, buc
|
|||||||
// If the object exists, but the latest version is a delete marker, the index with it is still returned.
|
// If the object exists, but the latest version is a delete marker, the index with it is still returned.
|
||||||
// If the object does not exist ObjectNotFound error is returned.
|
// If the object does not exist ObjectNotFound error is returned.
|
||||||
// If any other error is found, it is returned.
|
// If any other error is found, it is returned.
|
||||||
// The check is skipped if there is only one zone, and 0, nil is always returned in that case.
|
// The check is skipped if there is only one pool, and 0, nil is always returned in that case.
|
||||||
func (z *erasureServerPools) getPoolIdxExistingNoLock(ctx context.Context, bucket, object string) (idx int, err error) {
|
func (z *erasureServerPools) getPoolIdxExistingNoLock(ctx context.Context, bucket, object string) (idx int, err error) {
|
||||||
return z.getPoolIdxExistingWithOpts(ctx, bucket, object, ObjectOptions{
|
return z.getPoolIdxExistingWithOpts(ctx, bucket, object, ObjectOptions{
|
||||||
NoLock: true,
|
NoLock: true,
|
||||||
@ -881,7 +881,7 @@ func (z *erasureServerPools) getLatestObjectInfoWithIdx(ctx context.Context, buc
|
|||||||
sort.Slice(results, func(i, j int) bool {
|
sort.Slice(results, func(i, j int) bool {
|
||||||
a, b := results[i], results[j]
|
a, b := results[i], results[j]
|
||||||
if a.oi.ModTime.Equal(b.oi.ModTime) {
|
if a.oi.ModTime.Equal(b.oi.ModTime) {
|
||||||
// On tiebreak, select the lowest zone index.
|
// On tiebreak, select the lowest pool index.
|
||||||
return a.zIdx < b.zIdx
|
return a.zIdx < b.zIdx
|
||||||
}
|
}
|
||||||
return a.oi.ModTime.After(b.oi.ModTime)
|
return a.oi.ModTime.After(b.oi.ModTime)
|
||||||
@ -975,12 +975,12 @@ func (z *erasureServerPools) PutObject(ctx context.Context, bucket string, objec
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (z *erasureServerPools) deletePrefix(ctx context.Context, bucket string, prefix string) error {
|
func (z *erasureServerPools) deletePrefix(ctx context.Context, bucket string, prefix string) error {
|
||||||
for idx, zone := range z.serverPools {
|
for idx, pool := range z.serverPools {
|
||||||
if z.IsSuspended(idx) {
|
if z.IsSuspended(idx) {
|
||||||
logger.LogIf(ctx, fmt.Errorf("pool %d is suspended, all writes are suspended", idx+1))
|
logger.LogIf(ctx, fmt.Errorf("pool %d is suspended, all writes are suspended", idx+1))
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
_, err := zone.DeleteObject(ctx, bucket, prefix, ObjectOptions{DeletePrefix: true})
|
_, err := pool.DeleteObject(ctx, bucket, prefix, ObjectOptions{DeletePrefix: true})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -28,13 +28,20 @@ export MC_HOST_myminio="http://minioadmin:minioadmin@localhost:9000/"
|
|||||||
./mc admin policy set myminio/ lake,rw user=minio12345
|
./mc admin policy set myminio/ lake,rw user=minio12345
|
||||||
|
|
||||||
./mc mb -l myminio/versioned
|
./mc mb -l myminio/versioned
|
||||||
|
|
||||||
|
./mc mirror internal myminio/versioned/ --quiet >/dev/null
|
||||||
|
|
||||||
|
## Soft delete (creates delete markers)
|
||||||
|
./mc rm -r --force myminio/versioned >/dev/null
|
||||||
|
|
||||||
|
## mirror again to create another set of version on top
|
||||||
./mc mirror internal myminio/versioned/ --quiet >/dev/null
|
./mc mirror internal myminio/versioned/ --quiet >/dev/null
|
||||||
|
|
||||||
user_count=$(./mc admin user list myminio/ | wc -l)
|
user_count=$(./mc admin user list myminio/ | wc -l)
|
||||||
policy_count=$(./mc admin policy list myminio/ | wc -l)
|
policy_count=$(./mc admin policy list myminio/ | wc -l)
|
||||||
|
|
||||||
kill $pid
|
kill $pid
|
||||||
(minio server /tmp/xl/{1...10}/disk{0...1} /tmp/xl/{11...30}/disk{0...3} 2>&1 >/dev/null) &
|
(minio server /tmp/xl/{1...10}/disk{0...1} /tmp/xl/{11...30}/disk{0...3} 2>&1 >/tmp/expanded.log) &
|
||||||
pid=$!
|
pid=$!
|
||||||
|
|
||||||
sleep 2
|
sleep 2
|
||||||
@ -60,7 +67,9 @@ if [ $ret -ne 0 ]; then
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
./mc mirror cmd myminio/versioned/ --quiet >/dev/null
|
./mc mirror cmd myminio/versioned/ --quiet >/dev/null
|
||||||
|
|
||||||
./mc ls -r myminio/versioned/ > expanded_ns.txt
|
./mc ls -r myminio/versioned/ > expanded_ns.txt
|
||||||
|
./mc ls -r --versions myminio/versioned/ > expanded_ns_versions.txt
|
||||||
|
|
||||||
./mc admin decom start myminio/ /tmp/xl/{1...10}/disk{0...1}
|
./mc admin decom start myminio/ /tmp/xl/{1...10}/disk{0...1}
|
||||||
|
|
||||||
@ -98,6 +107,7 @@ if [ $ret -ne 0 ]; then
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
./mc ls -r myminio/versioned > decommissioned_ns.txt
|
./mc ls -r myminio/versioned > decommissioned_ns.txt
|
||||||
|
./mc ls -r --versions myminio/versioned > decommissioned_ns_versions.txt
|
||||||
|
|
||||||
out=$(diff -qpruN expanded_ns.txt decommissioned_ns.txt)
|
out=$(diff -qpruN expanded_ns.txt decommissioned_ns.txt)
|
||||||
ret=$?
|
ret=$?
|
||||||
@ -105,4 +115,10 @@ if [ $ret -ne 0 ]; then
|
|||||||
echo "BUG: expected no missing entries after decommission: $out"
|
echo "BUG: expected no missing entries after decommission: $out"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
out=$(diff -qpruN expanded_ns_versions.txt decommissioned_ns_versions.txt)
|
||||||
|
ret=$?
|
||||||
|
if [ $ret -ne 0 ]; then
|
||||||
|
echo "BUG: expected no missing entries after decommission: $out"
|
||||||
|
fi
|
||||||
|
|
||||||
kill $pid
|
kill $pid
|
||||||
|
Loading…
Reference in New Issue
Block a user