mirror of
https://github.com/minio/minio.git
synced 2025-11-07 21:02:58 -05:00
initialize the disk healer early on (#19143)
This PR fixes a bug that perhaps has been long introduced, with no visible workarounds. In any deployment, if an entire erasure set is deleted, there is no way the cluster recovers.
This commit is contained in:
@@ -175,8 +175,29 @@ func newErasureServerPools(ctx context.Context, endpointServerPools EndpointServ
|
||||
z.poolMeta = newPoolMeta(z, poolMeta{})
|
||||
z.poolMeta.dontSave = true
|
||||
|
||||
bootstrapTrace("newSharedLock", func() {
|
||||
globalLeaderLock = newSharedLock(GlobalContext, z, "leader.lock")
|
||||
})
|
||||
|
||||
// Enable background operations on
|
||||
//
|
||||
// - Disk auto healing
|
||||
// - MRF (most recently failed) healing
|
||||
// - Background expiration routine for lifecycle policies
|
||||
bootstrapTrace("initAutoHeal", func() {
|
||||
initAutoHeal(GlobalContext, z)
|
||||
})
|
||||
|
||||
bootstrapTrace("initHealMRF", func() {
|
||||
go globalMRFState.healRoutine(z)
|
||||
})
|
||||
|
||||
bootstrapTrace("initBackgroundExpiry", func() {
|
||||
initBackgroundExpiry(GlobalContext, z)
|
||||
})
|
||||
|
||||
// initialize the object layer.
|
||||
setObjectLayer(z)
|
||||
defer setObjectLayer(z)
|
||||
|
||||
r := rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
attempt := 1
|
||||
|
||||
Reference in New Issue
Block a user