mirror of
https://github.com/minio/minio.git
synced 2025-01-25 21:53:16 -05:00
fix: allow cancel of decom only when its in progress (#17607)
This commit is contained in:
parent
af3d99e35f
commit
dfd7cca0d2
@ -245,6 +245,7 @@ func (p *poolMeta) QueueBuckets(idx int, buckets []decomBucketInfo) {
|
||||
var (
|
||||
errDecommissionAlreadyRunning = errors.New("decommission is already in progress")
|
||||
errDecommissionComplete = errors.New("decommission is complete, please remove the servers from command-line")
|
||||
errDecommissionNotStarted = errors.New("decommission is not in progress")
|
||||
)
|
||||
|
||||
func (p *poolMeta) Decommission(idx int, pi poolSpaceInfo) error {
|
||||
@ -1183,7 +1184,11 @@ func (z *erasureServerPools) Status(ctx context.Context, idx int) (PoolStatus, e
|
||||
poolInfo := z.poolMeta.Pools[idx]
|
||||
if poolInfo.Decommission != nil {
|
||||
poolInfo.Decommission.TotalSize = pi.Total
|
||||
poolInfo.Decommission.CurrentSize = poolInfo.Decommission.StartSize + poolInfo.Decommission.BytesDone
|
||||
if poolInfo.Decommission.Failed || poolInfo.Decommission.Canceled {
|
||||
poolInfo.Decommission.CurrentSize = pi.Free
|
||||
} else {
|
||||
poolInfo.Decommission.CurrentSize = poolInfo.Decommission.StartSize + poolInfo.Decommission.BytesDone
|
||||
}
|
||||
} else {
|
||||
poolInfo.Decommission = &PoolDecommissionInfo{
|
||||
TotalSize: pi.Total,
|
||||
@ -1219,15 +1224,21 @@ func (z *erasureServerPools) DecommissionCancel(ctx context.Context, idx int) (e
|
||||
z.poolMetaMutex.Lock()
|
||||
defer z.poolMetaMutex.Unlock()
|
||||
|
||||
fn := z.decommissionCancelers[idx]
|
||||
if fn == nil {
|
||||
// canceling a decommission before it started return an error.
|
||||
return errDecommissionNotStarted
|
||||
}
|
||||
|
||||
defer fn() // cancel any active thread.
|
||||
|
||||
if z.poolMeta.DecommissionCancel(idx) {
|
||||
if fn := z.decommissionCancelers[idx]; fn != nil {
|
||||
defer fn() // cancel any active thread.
|
||||
}
|
||||
if err = z.poolMeta.save(ctx, z.serverPools); err != nil {
|
||||
return err
|
||||
}
|
||||
globalNotificationSys.ReloadPoolMeta(ctx)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -1245,8 +1256,9 @@ func (z *erasureServerPools) DecommissionFailed(ctx context.Context, idx int) (e
|
||||
|
||||
if z.poolMeta.DecommissionFailed(idx) {
|
||||
if fn := z.decommissionCancelers[idx]; fn != nil {
|
||||
defer fn() // cancel any active thread.
|
||||
}
|
||||
defer fn()
|
||||
} // cancel any active thread.
|
||||
|
||||
if err = z.poolMeta.save(ctx, z.serverPools); err != nil {
|
||||
return err
|
||||
}
|
||||
@ -1269,8 +1281,9 @@ func (z *erasureServerPools) CompleteDecommission(ctx context.Context, idx int)
|
||||
|
||||
if z.poolMeta.DecommissionComplete(idx) {
|
||||
if fn := z.decommissionCancelers[idx]; fn != nil {
|
||||
defer fn() // cancel any active thread.
|
||||
}
|
||||
defer fn()
|
||||
} // cancel any active thread.
|
||||
|
||||
if err = z.poolMeta.save(ctx, z.serverPools); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -84,7 +84,7 @@ remote_arn=$(./mc replicate ls sitea/bucket --json | jq -r .rule.Destination.Buc
|
||||
sleep 1
|
||||
|
||||
./mc replicate resync start sitea/bucket/ --remote-bucket "${remote_arn}"
|
||||
sleep 20s ## sleep for 20s idea is that we give 200ms per object.
|
||||
sleep 30s ## sleep for 30s idea is that we give 300ms per object.
|
||||
|
||||
count=$(./mc replicate resync status sitea/bucket --remote-bucket "${remote_arn}" --json | jq .resyncInfo.target[].replicationCount)
|
||||
|
||||
@ -99,7 +99,7 @@ if [ $ret -ne 0 ]; then
|
||||
fi
|
||||
|
||||
if [ $count -ne 12 ]; then
|
||||
echo "resync not complete after 10s unexpected failure"
|
||||
echo "resync not complete after 30s - unexpected failure"
|
||||
./mc diff sitea/bucket siteb/bucket
|
||||
exit 1
|
||||
fi
|
||||
|
@ -33,7 +33,7 @@ sleep 2
|
||||
./mc admin policy create myminio/ lake ./docs/distributed/rw.json
|
||||
|
||||
./mc admin policy attach myminio/ rw --user=minio123
|
||||
./mc admin policy attach myminio/ lake,rw --user=minio12345
|
||||
./mc admin policy attach myminio/ lake --user=minio12345
|
||||
|
||||
./mc mb -l myminio/versioned
|
||||
|
||||
|
@ -28,7 +28,7 @@ sleep 2
|
||||
./mc admin policy create myminio/ lake ./docs/distributed/rw.json
|
||||
|
||||
./mc admin policy attach myminio/ rw --user=minio123
|
||||
./mc admin policy attach myminio/ lake,rw --user=minio12345
|
||||
./mc admin policy attach myminio/ lake --user=minio12345
|
||||
|
||||
./mc mb -l myminio/versioned
|
||||
|
||||
|
@ -30,7 +30,7 @@ export MC_HOST_myminio="http://minioadmin:minioadmin@localhost:9000/"
|
||||
./mc admin policy create myminio/ lake ./docs/distributed/rw.json
|
||||
|
||||
./mc admin policy attach myminio/ rw --user=minio123
|
||||
./mc admin policy attach myminio/ lake,rw --user=minio12345
|
||||
./mc admin policy attach myminio/ lake --user=minio12345
|
||||
|
||||
./mc mb -l myminio/versioned
|
||||
|
||||
|
@ -15,7 +15,7 @@ fi
|
||||
|
||||
export CI=true
|
||||
|
||||
(minio server /tmp/xl/{1...10}/disk{0...1} 2>&1 >/dev/null) &
|
||||
(minio server /tmp/xl/{1...10}/disk{0...1} 2>&1 >/tmp/decom.log) &
|
||||
pid=$!
|
||||
|
||||
sleep 2
|
||||
@ -29,7 +29,7 @@ export MC_HOST_myminio="http://minioadmin:minioadmin@localhost:9000/"
|
||||
./mc admin policy create myminio/ lake ./docs/distributed/rw.json
|
||||
|
||||
./mc admin policy attach myminio/ rw --user=minio123
|
||||
./mc admin policy attach myminio/ lake,rw --user=minio12345
|
||||
./mc admin policy attach myminio/ lake --user=minio12345
|
||||
|
||||
./mc mb -l myminio/versioned
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user