mirror of
https://github.com/minio/minio.git
synced 2025-11-06 20:33:07 -05:00
feat: Single drive XL implementation (#14970)
Main motivation is move towards a common backend format for all different types of modes in MinIO, allowing for a simpler code and predictable behavior across all features. This PR also brings features such as versioning, replication, transitioning to single drive setups.
This commit is contained in:
@@ -222,6 +222,7 @@ func serverHandleCmdArgs(ctx *cli.Context) {
|
||||
if globalIsDistErasure {
|
||||
globalIsErasure = true
|
||||
}
|
||||
globalIsErasureSD = (setupType == ErasureSDSetupType)
|
||||
}
|
||||
|
||||
func serverHandleEnvVars() {
|
||||
@@ -232,13 +233,11 @@ func serverHandleEnvVars() {
|
||||
var globalHealStateLK sync.RWMutex
|
||||
|
||||
func initAllSubsystems() {
|
||||
if globalIsErasure {
|
||||
globalHealStateLK.Lock()
|
||||
// New global heal state
|
||||
globalAllHealState = newHealState(true)
|
||||
globalBackgroundHealState = newHealState(false)
|
||||
globalHealStateLK.Unlock()
|
||||
}
|
||||
globalHealStateLK.Lock()
|
||||
// New global heal state
|
||||
globalAllHealState = newHealState(true)
|
||||
globalBackgroundHealState = newHealState(false)
|
||||
globalHealStateLK.Unlock()
|
||||
|
||||
// Create new notification system and initialize notification peer targets
|
||||
globalNotificationSys = NewNotificationSys(globalEndpoints)
|
||||
@@ -527,11 +526,8 @@ func serverMain(ctx *cli.Context) {
|
||||
xhttp.SetMinIOVersion(Version)
|
||||
|
||||
// Enable background operations for erasure coding
|
||||
if globalIsErasure {
|
||||
initAutoHeal(GlobalContext, newObject)
|
||||
initHealMRF(GlobalContext, newObject)
|
||||
}
|
||||
|
||||
initAutoHeal(GlobalContext, newObject)
|
||||
initHealMRF(GlobalContext, newObject)
|
||||
initBackgroundExpiry(GlobalContext, newObject)
|
||||
|
||||
if globalActiveCred.Equal(auth.DefaultCredentials) {
|
||||
@@ -579,21 +575,19 @@ func serverMain(ctx *cli.Context) {
|
||||
// Background all other operations such as initializing bucket metadata etc.
|
||||
go func() {
|
||||
// Initialize transition tier configuration manager
|
||||
if globalIsErasure {
|
||||
initBackgroundReplication(GlobalContext, newObject)
|
||||
initBackgroundTransition(GlobalContext, newObject)
|
||||
initBackgroundReplication(GlobalContext, newObject)
|
||||
initBackgroundTransition(GlobalContext, newObject)
|
||||
|
||||
go func() {
|
||||
if err := globalTierConfigMgr.Init(GlobalContext, newObject); err != nil {
|
||||
logger.LogIf(GlobalContext, err)
|
||||
}
|
||||
go func() {
|
||||
if err := globalTierConfigMgr.Init(GlobalContext, newObject); err != nil {
|
||||
logger.LogIf(GlobalContext, err)
|
||||
}
|
||||
|
||||
globalTierJournal, err = initTierDeletionJournal(GlobalContext)
|
||||
if err != nil {
|
||||
logger.FatalIf(err, "Unable to initialize remote tier pending deletes journal")
|
||||
}
|
||||
}()
|
||||
}
|
||||
globalTierJournal, err = initTierDeletionJournal(GlobalContext)
|
||||
if err != nil {
|
||||
logger.FatalIf(err, "Unable to initialize remote tier pending deletes journal")
|
||||
}
|
||||
}()
|
||||
|
||||
// Initialize site replication manager.
|
||||
globalSiteReplicationSys.Init(GlobalContext, newObject)
|
||||
@@ -664,7 +658,13 @@ func newObjectLayer(ctx context.Context, endpointServerPools EndpointServerPools
|
||||
// For FS only, directly use the disk.
|
||||
if endpointServerPools.NEndpoints() == 1 {
|
||||
// Initialize new FS object layer.
|
||||
return NewFSObjectLayer(endpointServerPools[0].Endpoints[0].Path)
|
||||
newObject, err = NewFSObjectLayer(endpointServerPools[0].Endpoints[0].Path)
|
||||
if err == nil {
|
||||
return newObject, nil
|
||||
}
|
||||
if err != nil && err != errFreshDisk {
|
||||
return newObject, err
|
||||
}
|
||||
}
|
||||
|
||||
return newErasureServerPools(ctx, endpointServerPools)
|
||||
|
||||
Reference in New Issue
Block a user