mirror of
https://github.com/minio/minio.git
synced 2025-11-09 05:34:56 -05:00
extend server config.yaml to support per pool set drive count (#19663)
This is to support deployments migrating from a multi-pooled
wider stripe to lower stripe. MINIO_STORAGE_CLASS_STANDARD
is still expected to be same for all pools. So you can satisfy
adding custom drive count based pools by adjusting the storage
class value.
```
version: v2
address: ':9000'
rootUser: 'minioadmin'
rootPassword: 'minioadmin'
console-address: ':9001'
pools: # Specify the nodes and drives with pools
-
args:
- 'node{11...14}.example.net/data{1...4}'
-
args:
- 'node{15...18}.example.net/data{1...4}'
-
args:
- 'node{19...22}.example.net/data{1...4}'
-
args:
- 'node{23...34}.example.net/data{1...10}'
set-drive-count: 6
```
This commit is contained in:
@@ -18,6 +18,7 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
@@ -287,23 +288,7 @@ func serverCmdArgs(ctx *cli.Context) []string {
|
||||
return strings.Fields(v)
|
||||
}
|
||||
|
||||
func mergeServerCtxtFromConfigFile(configFile string, ctxt *serverCtxt) error {
|
||||
rd, err := Open(configFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer rd.Close()
|
||||
|
||||
cf := &config.ServerConfig{}
|
||||
dec := yaml.NewDecoder(rd)
|
||||
dec.SetStrict(true)
|
||||
if err = dec.Decode(cf); err != nil {
|
||||
return err
|
||||
}
|
||||
if cf.Version != "v1" {
|
||||
return fmt.Errorf("unexpected version: %s", cf.Version)
|
||||
}
|
||||
|
||||
func configCommonToSrvCtx(cf config.ServerConfigCommon, ctxt *serverCtxt) {
|
||||
ctxt.RootUser = cf.RootUser
|
||||
ctxt.RootPwd = cf.RootPwd
|
||||
|
||||
@@ -331,8 +316,75 @@ func mergeServerCtxtFromConfigFile(configFile string, ctxt *serverCtxt) error {
|
||||
if cf.Options.SFTP.SSHPrivateKey != "" {
|
||||
ctxt.SFTP = append(ctxt.SFTP, fmt.Sprintf("ssh-private-key=%s", cf.Options.SFTP.SSHPrivateKey))
|
||||
}
|
||||
}
|
||||
|
||||
ctxt.Layout, err = buildDisksLayoutFromConfFile(cf.Pools)
|
||||
func mergeServerCtxtFromConfigFile(configFile string, ctxt *serverCtxt) error {
|
||||
rd, err := xioutil.ReadFile(configFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cfReader := bytes.NewReader(rd)
|
||||
|
||||
cv := config.ServerConfigVersion{}
|
||||
if err = yaml.Unmarshal(rd, &cv); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
switch cv.Version {
|
||||
case "v1", "v2":
|
||||
default:
|
||||
return fmt.Errorf("unexpected version: %s", cv.Version)
|
||||
}
|
||||
|
||||
cfCommon := config.ServerConfigCommon{}
|
||||
if err = yaml.Unmarshal(rd, &cfCommon); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
configCommonToSrvCtx(cfCommon, ctxt)
|
||||
|
||||
v, err := env.GetInt(EnvErasureSetDriveCount, 0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
setDriveCount := uint64(v)
|
||||
|
||||
var pools []poolArgs
|
||||
switch cv.Version {
|
||||
case "v1":
|
||||
cfV1 := config.ServerConfigV1{}
|
||||
if err = yaml.Unmarshal(rd, &cfV1); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
pools = make([]poolArgs, 0, len(cfV1.Pools))
|
||||
for _, list := range cfV1.Pools {
|
||||
pools = append(pools, poolArgs{
|
||||
args: list,
|
||||
setDriveCount: setDriveCount,
|
||||
})
|
||||
}
|
||||
case "v2":
|
||||
cf := config.ServerConfig{}
|
||||
cfReader.Seek(0, io.SeekStart)
|
||||
if err = yaml.Unmarshal(rd, &cf); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
pools = make([]poolArgs, 0, len(cf.Pools))
|
||||
for _, list := range cf.Pools {
|
||||
driveCount := list.SetDriveCount
|
||||
if setDriveCount > 0 {
|
||||
driveCount = setDriveCount
|
||||
}
|
||||
pools = append(pools, poolArgs{
|
||||
args: list.Args,
|
||||
setDriveCount: driveCount,
|
||||
})
|
||||
}
|
||||
}
|
||||
ctxt.Layout, err = buildDisksLayoutFromConfFile(pools)
|
||||
return err
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user