add support for configurable replication MRF workers (#12125)

just like replication workers, allow failed replication
workers to be configurable in situations like DR failures
etc to catch up on replication sooner when DR is back
online.

Signed-off-by: Harshavardhana <harsha@minio.io>
This commit is contained in:
Harshavardhana
2021-04-23 21:58:45 -07:00
committed by GitHub
parent 014e419151
commit 82dc6aff1c
5 changed files with 122 additions and 63 deletions

View File

@@ -37,8 +37,9 @@ type apiConfig struct {
extendListLife time.Duration
corsAllowOrigins []string
// total drives per erasure set across pools.
totalDriveCount int
replicationWorkers int
totalDriveCount int
replicationWorkers int
replicationFailedWorkers int
}
func (t *apiConfig) init(cfg api.Config, setDriveCounts []int) {
@@ -83,8 +84,10 @@ func (t *apiConfig) init(cfg api.Config, setDriveCounts []int) {
t.extendListLife = cfg.ExtendListLife
if globalReplicationPool != nil &&
cfg.ReplicationWorkers != t.replicationWorkers {
globalReplicationPool.Resize(cfg.ReplicationWorkers)
globalReplicationPool.ResizeFailedWorkers(cfg.ReplicationFailedWorkers)
globalReplicationPool.ResizeWorkers(cfg.ReplicationWorkers)
}
t.replicationFailedWorkers = cfg.ReplicationFailedWorkers
t.replicationWorkers = cfg.ReplicationWorkers
}
@@ -166,6 +169,13 @@ func maxClients(f http.HandlerFunc) http.HandlerFunc {
}
}
func (t *apiConfig) getReplicationFailedWorkers() int {
t.mu.RLock()
defer t.mu.RUnlock()
return t.replicationFailedWorkers
}
func (t *apiConfig) getReplicationWorkers() int {
t.mu.RLock()
defer t.mu.RUnlock()