mirror of
https://github.com/minio/minio.git
synced 2025-11-07 21:02:58 -05:00
feat: decommission feature for pools (#14012)
``` λ mc admin decommission start alias/ http://minio{1...2}/data{1...4} ``` ``` λ mc admin decommission status alias/ ┌─────┬─────────────────────────────────┬──────────────────────────────────┬────────┐ │ ID │ Pools │ Capacity │ Status │ │ 1st │ http://minio{1...2}/data{1...4} │ 439 GiB (used) / 561 GiB (total) │ Active │ │ 2nd │ http://minio{3...4}/data{1...4} │ 329 GiB (used) / 421 GiB (total) │ Active │ └─────┴─────────────────────────────────┴──────────────────────────────────┴────────┘ ``` ``` λ mc admin decommission status alias/ http://minio{1...2}/data{1...4} Progress: ===================> [1GiB/sec] [15%] [4TiB/50TiB] Time Remaining: 4 hours (started 3 hours ago) ``` ``` λ mc admin decommission status alias/ http://minio{1...2}/data{1...4} ERROR: This pool is not scheduled for decommissioning currently. ``` ``` λ mc admin decommission cancel alias/ ┌─────┬─────────────────────────────────┬──────────────────────────────────┬──────────┐ │ ID │ Pools │ Capacity │ Status │ │ 1st │ http://minio{1...2}/data{1...4} │ 439 GiB (used) / 561 GiB (total) │ Draining │ └─────┴─────────────────────────────────┴──────────────────────────────────┴──────────┘ ``` > NOTE: Canceled decommission will not make the pool active again, since we might have > Potentially partial duplicate content on the other pools, to avoid this scenario be > very sure to start decommissioning as a planned activity. ``` λ mc admin decommission cancel alias/ http://minio{1...2}/data{1...4} ┌─────┬─────────────────────────────────┬──────────────────────────────────┬────────────────────┐ │ ID │ Pools │ Capacity │ Status │ │ 1st │ http://minio{1...2}/data{1...4} │ 439 GiB (used) / 561 GiB (total) │ Draining(Canceled) │ └─────┴─────────────────────────────────┴──────────────────────────────────┴────────────────────┘ ```
This commit is contained in:
@@ -616,6 +616,26 @@ func (sys *NotificationSys) GetClusterBucketStats(ctx context.Context, bucketNam
|
||||
return bucketStats
|
||||
}
|
||||
|
||||
// ReloadPoolMeta reloads on disk updates on pool metadata
|
||||
func (sys *NotificationSys) ReloadPoolMeta(ctx context.Context) {
|
||||
ng := WithNPeers(len(sys.peerClients))
|
||||
for idx, client := range sys.peerClients {
|
||||
if client == nil {
|
||||
continue
|
||||
}
|
||||
client := client
|
||||
ng.Go(ctx, func() error {
|
||||
return client.ReloadPoolMeta(ctx)
|
||||
}, idx, *client.host)
|
||||
}
|
||||
for _, nErr := range ng.Wait() {
|
||||
reqInfo := (&logger.ReqInfo{}).AppendTags("peerAddress", nErr.Host.String())
|
||||
if nErr.Err != nil {
|
||||
logger.LogIf(logger.SetReqInfo(ctx, reqInfo), nErr.Err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// LoadTransitionTierConfig notifies remote peers to load their remote tier
|
||||
// configs from config store.
|
||||
func (sys *NotificationSys) LoadTransitionTierConfig(ctx context.Context) {
|
||||
|
||||
Reference in New Issue
Block a user