diff --git a/cmd/erasure-server-pool-decom.go b/cmd/erasure-server-pool-decom.go index 3ca8baf4f..3dba71ed5 100644 --- a/cmd/erasure-server-pool-decom.go +++ b/cmd/erasure-server-pool-decom.go @@ -245,6 +245,7 @@ func (p *poolMeta) QueueBuckets(idx int, buckets []decomBucketInfo) { var ( errDecommissionAlreadyRunning = errors.New("decommission is already in progress") errDecommissionComplete = errors.New("decommission is complete, please remove the servers from command-line") + errDecommissionNotStarted = errors.New("decommission is not in progress") ) func (p *poolMeta) Decommission(idx int, pi poolSpaceInfo) error { @@ -1183,7 +1184,11 @@ func (z *erasureServerPools) Status(ctx context.Context, idx int) (PoolStatus, e poolInfo := z.poolMeta.Pools[idx] if poolInfo.Decommission != nil { poolInfo.Decommission.TotalSize = pi.Total - poolInfo.Decommission.CurrentSize = poolInfo.Decommission.StartSize + poolInfo.Decommission.BytesDone + if poolInfo.Decommission.Failed || poolInfo.Decommission.Canceled { + poolInfo.Decommission.CurrentSize = pi.Free + } else { + poolInfo.Decommission.CurrentSize = poolInfo.Decommission.StartSize + poolInfo.Decommission.BytesDone + } } else { poolInfo.Decommission = &PoolDecommissionInfo{ TotalSize: pi.Total, @@ -1219,15 +1224,21 @@ func (z *erasureServerPools) DecommissionCancel(ctx context.Context, idx int) (e z.poolMetaMutex.Lock() defer z.poolMetaMutex.Unlock() + fn := z.decommissionCancelers[idx] + if fn == nil { + // canceling a decommission before it started return an error. + return errDecommissionNotStarted + } + + defer fn() // cancel any active thread. + if z.poolMeta.DecommissionCancel(idx) { - if fn := z.decommissionCancelers[idx]; fn != nil { - defer fn() // cancel any active thread. - } if err = z.poolMeta.save(ctx, z.serverPools); err != nil { return err } globalNotificationSys.ReloadPoolMeta(ctx) } + return nil } @@ -1245,8 +1256,9 @@ func (z *erasureServerPools) DecommissionFailed(ctx context.Context, idx int) (e if z.poolMeta.DecommissionFailed(idx) { if fn := z.decommissionCancelers[idx]; fn != nil { - defer fn() // cancel any active thread. - } + defer fn() + } // cancel any active thread. + if err = z.poolMeta.save(ctx, z.serverPools); err != nil { return err } @@ -1269,8 +1281,9 @@ func (z *erasureServerPools) CompleteDecommission(ctx context.Context, idx int) if z.poolMeta.DecommissionComplete(idx) { if fn := z.decommissionCancelers[idx]; fn != nil { - defer fn() // cancel any active thread. - } + defer fn() + } // cancel any active thread. + if err = z.poolMeta.save(ctx, z.serverPools); err != nil { return err } diff --git a/docs/bucket/replication/setup_2site_existing_replication.sh b/docs/bucket/replication/setup_2site_existing_replication.sh index 15f0f3be9..d720439d6 100755 --- a/docs/bucket/replication/setup_2site_existing_replication.sh +++ b/docs/bucket/replication/setup_2site_existing_replication.sh @@ -84,7 +84,7 @@ remote_arn=$(./mc replicate ls sitea/bucket --json | jq -r .rule.Destination.Buc sleep 1 ./mc replicate resync start sitea/bucket/ --remote-bucket "${remote_arn}" -sleep 20s ## sleep for 20s idea is that we give 200ms per object. +sleep 30s ## sleep for 30s idea is that we give 300ms per object. count=$(./mc replicate resync status sitea/bucket --remote-bucket "${remote_arn}" --json | jq .resyncInfo.target[].replicationCount) @@ -99,7 +99,7 @@ if [ $ret -ne 0 ]; then fi if [ $count -ne 12 ]; then - echo "resync not complete after 10s unexpected failure" + echo "resync not complete after 30s - unexpected failure" ./mc diff sitea/bucket siteb/bucket exit 1 fi diff --git a/docs/distributed/decom-compressed-sse-s3.sh b/docs/distributed/decom-compressed-sse-s3.sh index 64bb63a30..dd1b0ea01 100644 --- a/docs/distributed/decom-compressed-sse-s3.sh +++ b/docs/distributed/decom-compressed-sse-s3.sh @@ -33,7 +33,7 @@ sleep 2 ./mc admin policy create myminio/ lake ./docs/distributed/rw.json ./mc admin policy attach myminio/ rw --user=minio123 -./mc admin policy attach myminio/ lake,rw --user=minio12345 +./mc admin policy attach myminio/ lake --user=minio12345 ./mc mb -l myminio/versioned diff --git a/docs/distributed/decom-encrypted-sse-s3.sh b/docs/distributed/decom-encrypted-sse-s3.sh index 76223cadf..6d451b94f 100644 --- a/docs/distributed/decom-encrypted-sse-s3.sh +++ b/docs/distributed/decom-encrypted-sse-s3.sh @@ -28,7 +28,7 @@ sleep 2 ./mc admin policy create myminio/ lake ./docs/distributed/rw.json ./mc admin policy attach myminio/ rw --user=minio123 -./mc admin policy attach myminio/ lake,rw --user=minio12345 +./mc admin policy attach myminio/ lake --user=minio12345 ./mc mb -l myminio/versioned diff --git a/docs/distributed/decom-encrypted.sh b/docs/distributed/decom-encrypted.sh index 5e1a2167f..173e1653d 100644 --- a/docs/distributed/decom-encrypted.sh +++ b/docs/distributed/decom-encrypted.sh @@ -30,7 +30,7 @@ export MC_HOST_myminio="http://minioadmin:minioadmin@localhost:9000/" ./mc admin policy create myminio/ lake ./docs/distributed/rw.json ./mc admin policy attach myminio/ rw --user=minio123 -./mc admin policy attach myminio/ lake,rw --user=minio12345 +./mc admin policy attach myminio/ lake --user=minio12345 ./mc mb -l myminio/versioned diff --git a/docs/distributed/decom.sh b/docs/distributed/decom.sh index 21a6cd64a..53c373623 100755 --- a/docs/distributed/decom.sh +++ b/docs/distributed/decom.sh @@ -15,7 +15,7 @@ fi export CI=true -(minio server /tmp/xl/{1...10}/disk{0...1} 2>&1 >/dev/null) & +(minio server /tmp/xl/{1...10}/disk{0...1} 2>&1 >/tmp/decom.log) & pid=$! sleep 2 @@ -29,7 +29,7 @@ export MC_HOST_myminio="http://minioadmin:minioadmin@localhost:9000/" ./mc admin policy create myminio/ lake ./docs/distributed/rw.json ./mc admin policy attach myminio/ rw --user=minio123 -./mc admin policy attach myminio/ lake,rw --user=minio12345 +./mc admin policy attach myminio/ lake --user=minio12345 ./mc mb -l myminio/versioned