mirror of
https://github.com/minio/minio.git
synced 2025-11-07 04:42:56 -05:00
Add support of conf file to pass arguments and options (#18592)
This commit is contained in:
@@ -22,6 +22,7 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/minio/minio/internal/logger"
|
||||
@@ -66,16 +67,28 @@ func (a adminAPIHandlers) StartDecommission(w http.ResponseWriter, r *http.Reque
|
||||
|
||||
vars := mux.Vars(r)
|
||||
v := vars["pool"]
|
||||
byID := vars["by-id"] == "true"
|
||||
|
||||
pools := strings.Split(v, ",")
|
||||
poolIndices := make([]int, 0, len(pools))
|
||||
|
||||
for _, pool := range pools {
|
||||
idx := globalEndpoints.GetPoolIdx(pool)
|
||||
if idx == -1 {
|
||||
// We didn't find any matching pools, invalid input
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, errInvalidArgument), r.URL)
|
||||
return
|
||||
var idx int
|
||||
if byID {
|
||||
var err error
|
||||
idx, err = strconv.Atoi(pool)
|
||||
if err != nil {
|
||||
// We didn't find any matching pools, invalid input
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, errInvalidArgument), r.URL)
|
||||
return
|
||||
}
|
||||
} else {
|
||||
idx = globalEndpoints.GetPoolIdx(pool)
|
||||
if idx == -1 {
|
||||
// We didn't find any matching pools, invalid input
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, errInvalidArgument), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
var pool *erasureSets
|
||||
for pidx := range z.serverPools {
|
||||
@@ -132,8 +145,17 @@ func (a adminAPIHandlers) CancelDecommission(w http.ResponseWriter, r *http.Requ
|
||||
|
||||
vars := mux.Vars(r)
|
||||
v := vars["pool"]
|
||||
byID := vars["by-id"] == "true"
|
||||
idx := -1
|
||||
|
||||
if byID {
|
||||
if i, err := strconv.Atoi(v); err == nil && i >= 0 && i < len(globalEndpoints) {
|
||||
idx = i
|
||||
}
|
||||
} else {
|
||||
idx = globalEndpoints.GetPoolIdx(v)
|
||||
}
|
||||
|
||||
idx := globalEndpoints.GetPoolIdx(v)
|
||||
if idx == -1 {
|
||||
// We didn't find any matching pools, invalid input
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, errInvalidArgument), r.URL)
|
||||
@@ -178,8 +200,17 @@ func (a adminAPIHandlers) StatusPool(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
vars := mux.Vars(r)
|
||||
v := vars["pool"]
|
||||
byID := vars["by-id"] == "true"
|
||||
idx := -1
|
||||
|
||||
if byID {
|
||||
if i, err := strconv.Atoi(v); err == nil && i >= 0 && i < len(globalEndpoints) {
|
||||
idx = i
|
||||
}
|
||||
} else {
|
||||
idx = globalEndpoints.GetPoolIdx(v)
|
||||
}
|
||||
|
||||
idx := globalEndpoints.GetPoolIdx(v)
|
||||
if idx == -1 {
|
||||
apiErr := toAdminAPIErr(ctx, errInvalidArgument)
|
||||
apiErr.Description = fmt.Sprintf("specified pool '%s' not found, please specify a valid pool", v)
|
||||
|
||||
@@ -310,79 +310,113 @@ func checkUpdate(mode string) {
|
||||
logger.Info(prepareUpdateMessage("Run `mc admin update`", lrTime.Sub(crTime)))
|
||||
}
|
||||
|
||||
func newConfigDirFromCtx(ctx *cli.Context, option string, getDefaultDir func() string) (*ConfigDir, bool) {
|
||||
var dir string
|
||||
var dirSet bool
|
||||
|
||||
switch {
|
||||
case ctx.IsSet(option):
|
||||
dir = ctx.String(option)
|
||||
dirSet = true
|
||||
case ctx.GlobalIsSet(option):
|
||||
dir = ctx.GlobalString(option)
|
||||
dirSet = true
|
||||
// cli package does not expose parent's option option. Below code is workaround.
|
||||
if dir == "" || dir == getDefaultDir() {
|
||||
dirSet = false // Unset to false since GlobalIsSet() true is a false positive.
|
||||
if ctx.Parent().GlobalIsSet(option) {
|
||||
dir = ctx.Parent().GlobalString(option)
|
||||
dirSet = true
|
||||
}
|
||||
}
|
||||
default:
|
||||
// Neither local nor global option is provided. In this case, try to use
|
||||
// default directory.
|
||||
func newConfigDir(dir string, dirSet bool, getDefaultDir func() string) (*ConfigDir, error) {
|
||||
if dir == "" {
|
||||
dir = getDefaultDir()
|
||||
if dir == "" {
|
||||
logger.FatalIf(errInvalidArgument, "%s option must be provided", option)
|
||||
}
|
||||
}
|
||||
|
||||
if dir == "" {
|
||||
logger.FatalIf(errors.New("empty directory"), "%s directory cannot be empty", option)
|
||||
if !dirSet {
|
||||
return nil, fmt.Errorf("missing option must be provided")
|
||||
}
|
||||
return nil, fmt.Errorf("provided option cannot be empty")
|
||||
}
|
||||
|
||||
// Disallow relative paths, figure out absolute paths.
|
||||
dirAbs, err := filepath.Abs(dir)
|
||||
logger.FatalIf(err, "Unable to fetch absolute path for %s=%s", option, dir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = mkdirAllIgnorePerm(dirAbs)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to create the directory `%s`: %w", dirAbs, err)
|
||||
}
|
||||
|
||||
logger.FatalIf(mkdirAllIgnorePerm(dirAbs), "Unable to create directory specified %s=%s", option, dir)
|
||||
|
||||
return &ConfigDir{path: dirAbs}, dirSet
|
||||
return &ConfigDir{path: dirAbs}, nil
|
||||
}
|
||||
|
||||
func handleCommonCmdArgs(ctx *cli.Context) {
|
||||
func buildServerCtxt(ctx *cli.Context, ctxt *serverCtxt) (err error) {
|
||||
// Get "json" flag from command line argument and
|
||||
// enable json and quite modes if json flag is turned on.
|
||||
globalCLIContext.JSON = ctx.IsSet("json") || ctx.GlobalIsSet("json")
|
||||
if globalCLIContext.JSON {
|
||||
logger.EnableJSON()
|
||||
}
|
||||
|
||||
ctxt.JSON = ctx.IsSet("json") || ctx.GlobalIsSet("json")
|
||||
// Get quiet flag from command line argument.
|
||||
globalCLIContext.Quiet = ctx.IsSet("quiet") || ctx.GlobalIsSet("quiet")
|
||||
if globalCLIContext.Quiet {
|
||||
logger.EnableQuiet()
|
||||
}
|
||||
|
||||
ctxt.Quiet = ctx.IsSet("quiet") || ctx.GlobalIsSet("quiet")
|
||||
// Get anonymous flag from command line argument.
|
||||
globalCLIContext.Anonymous = ctx.IsSet("anonymous") || ctx.GlobalIsSet("anonymous")
|
||||
if globalCLIContext.Anonymous {
|
||||
logger.EnableAnonymous()
|
||||
}
|
||||
|
||||
ctxt.Anonymous = ctx.IsSet("anonymous") || ctx.GlobalIsSet("anonymous")
|
||||
// Fetch address option
|
||||
addr := ctx.GlobalString("address")
|
||||
if addr == "" || addr == ":"+GlobalMinioDefaultPort {
|
||||
addr = ctx.String("address")
|
||||
ctxt.Addr = ctx.GlobalString("address")
|
||||
if ctxt.Addr == "" || ctxt.Addr == ":"+GlobalMinioDefaultPort {
|
||||
ctxt.Addr = ctx.String("address")
|
||||
}
|
||||
|
||||
// Fetch console address option
|
||||
consoleAddr := ctx.GlobalString("console-address")
|
||||
if consoleAddr == "" {
|
||||
consoleAddr = ctx.String("console-address")
|
||||
ctxt.ConsoleAddr = ctx.GlobalString("console-address")
|
||||
if ctxt.ConsoleAddr == "" {
|
||||
ctxt.ConsoleAddr = ctx.String("console-address")
|
||||
}
|
||||
|
||||
// Check "no-compat" flag from command line argument.
|
||||
ctxt.StrictS3Compat = true
|
||||
if ctx.IsSet("no-compat") || ctx.GlobalIsSet("no-compat") {
|
||||
ctxt.StrictS3Compat = false
|
||||
}
|
||||
|
||||
switch {
|
||||
case ctx.IsSet("config-dir"):
|
||||
ctxt.ConfigDir = ctx.String("config-dir")
|
||||
ctxt.configDirSet = true
|
||||
case ctx.GlobalIsSet("config-dir"):
|
||||
ctxt.ConfigDir = ctx.GlobalString("config-dir")
|
||||
ctxt.configDirSet = true
|
||||
}
|
||||
|
||||
switch {
|
||||
case ctx.IsSet("certs-dir"):
|
||||
ctxt.CertsDir = ctx.String("certs-dir")
|
||||
ctxt.certsDirSet = true
|
||||
case ctx.GlobalIsSet("certs-dir"):
|
||||
ctxt.CertsDir = ctx.GlobalString("certs-dir")
|
||||
ctxt.certsDirSet = true
|
||||
}
|
||||
|
||||
ctxt.FTP = ctx.StringSlice("ftp")
|
||||
ctxt.SFTP = ctx.StringSlice("sftp")
|
||||
|
||||
ctxt.Interface = ctx.String("interface")
|
||||
ctxt.UserTimeout = ctx.Duration("conn-user-timeout")
|
||||
ctxt.ConnReadDeadline = ctx.Duration("conn-read-deadline")
|
||||
ctxt.ConnWriteDeadline = ctx.Duration("conn-write-deadline")
|
||||
|
||||
ctxt.ShutdownTimeout = ctx.Duration("shutdown-timeout")
|
||||
ctxt.IdleTimeout = ctx.Duration("idle-timeout")
|
||||
ctxt.ReadHeaderTimeout = ctx.Duration("read-header-timeout")
|
||||
|
||||
if conf := ctx.String("config"); len(conf) > 0 {
|
||||
err = mergeServerCtxtFromConfigFile(conf, ctxt)
|
||||
} else {
|
||||
err = mergeDisksLayoutFromArgs(serverCmdArgs(ctx), ctxt)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func handleCommonArgs(ctxt serverCtxt) {
|
||||
if ctxt.JSON {
|
||||
logger.EnableJSON()
|
||||
}
|
||||
if ctxt.Quiet {
|
||||
logger.EnableQuiet()
|
||||
}
|
||||
if ctxt.Anonymous {
|
||||
logger.EnableAnonymous()
|
||||
}
|
||||
|
||||
consoleAddr := ctxt.ConsoleAddr
|
||||
addr := ctxt.Addr
|
||||
configDir := ctxt.ConfigDir
|
||||
configSet := ctxt.configDirSet
|
||||
certsDir := ctxt.CertsDir
|
||||
certsSet := ctxt.certsDirSet
|
||||
|
||||
if consoleAddr == "" {
|
||||
p, err := xnet.GetFreePort()
|
||||
if err != nil {
|
||||
@@ -422,16 +456,12 @@ func handleCommonCmdArgs(ctx *cli.Context) {
|
||||
|
||||
globalMinioAddr = addr
|
||||
|
||||
// Check "no-compat" flag from command line argument.
|
||||
globalCLIContext.StrictS3Compat = true
|
||||
if ctx.IsSet("no-compat") || ctx.GlobalIsSet("no-compat") {
|
||||
globalCLIContext.StrictS3Compat = false
|
||||
}
|
||||
|
||||
// Set all config, certs and CAs directories.
|
||||
var configSet, certsSet bool
|
||||
globalConfigDir, configSet = newConfigDirFromCtx(ctx, "config-dir", defaultConfigDir.Get)
|
||||
globalCertsDir, certsSet = newConfigDirFromCtx(ctx, "certs-dir", defaultCertsDir.Get)
|
||||
var err error
|
||||
globalConfigDir, err = newConfigDir(configDir, configSet, defaultConfigDir.Get)
|
||||
logger.FatalIf(err, "Unable to initialize the (deprecated) config directory")
|
||||
globalCertsDir, err = newConfigDir(certsDir, certsSet, defaultCertsDir.Get)
|
||||
logger.FatalIf(err, "Unable to initialize the certs directory")
|
||||
|
||||
// Remove this code when we deprecate and remove config-dir.
|
||||
// This code is to make sure we inherit from the config-dir
|
||||
@@ -443,9 +473,11 @@ func handleCommonCmdArgs(ctx *cli.Context) {
|
||||
globalCertsCADir = &ConfigDir{path: filepath.Join(globalCertsDir.Get(), certsCADir)}
|
||||
|
||||
logger.FatalIf(mkdirAllIgnorePerm(globalCertsCADir.Get()), "Unable to create certs CA directory at %s", globalCertsCADir.Get())
|
||||
}
|
||||
|
||||
// Check if we have configured a custom DNS cache TTL.
|
||||
func runDNSCache(ctx *cli.Context) {
|
||||
dnsTTL := ctx.Duration("dns-cache-ttl")
|
||||
// Check if we have configured a custom DNS cache TTL.
|
||||
if dnsTTL <= 0 {
|
||||
dnsTTL = 10 * time.Minute
|
||||
}
|
||||
|
||||
@@ -18,12 +18,15 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"runtime"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/cespare/xxhash/v2"
|
||||
"github.com/minio/minio-go/v7/pkg/set"
|
||||
"github.com/minio/minio/internal/config"
|
||||
"github.com/minio/pkg/v2/ellipses"
|
||||
@@ -335,13 +338,128 @@ const (
|
||||
|
||||
var globalCustomErasureDriveCount = false
|
||||
|
||||
// CreateServerEndpoints - validates and creates new endpoints from input args, supports
|
||||
// both ellipses and without ellipses transparently.
|
||||
func createServerEndpoints(serverAddr string, args ...string) (
|
||||
endpointServerPools EndpointServerPools, setupType SetupType, err error,
|
||||
) {
|
||||
type node struct {
|
||||
nodeName string
|
||||
disks []string
|
||||
}
|
||||
|
||||
type endpointsList []node
|
||||
|
||||
func (el *endpointsList) add(arg string) error {
|
||||
u, err := url.Parse(arg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
found := false
|
||||
list := *el
|
||||
for i := range list {
|
||||
if list[i].nodeName == u.Host {
|
||||
list[i].disks = append(list[i].disks, u.String())
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
list = append(list, node{nodeName: u.Host, disks: []string{u.String()}})
|
||||
}
|
||||
*el = list
|
||||
return nil
|
||||
}
|
||||
|
||||
// buildDisksLayoutFromConfFile supports with and without ellipses transparently.
|
||||
func buildDisksLayoutFromConfFile(pools [][]string) (layout disksLayout, err error) {
|
||||
if len(pools) == 0 {
|
||||
return layout, errInvalidArgument
|
||||
}
|
||||
|
||||
for _, list := range pools {
|
||||
var endpointsList endpointsList
|
||||
|
||||
for _, arg := range list {
|
||||
switch {
|
||||
case ellipses.HasList(arg):
|
||||
patterns, err := ellipses.FindListPatterns(arg)
|
||||
if err != nil {
|
||||
return layout, err
|
||||
}
|
||||
for _, exp := range patterns.Expand() {
|
||||
for _, ep := range exp {
|
||||
if err := endpointsList.add(ep); err != nil {
|
||||
return layout, err
|
||||
}
|
||||
}
|
||||
}
|
||||
case ellipses.HasEllipses(arg):
|
||||
patterns, err := ellipses.FindEllipsesPatterns(arg)
|
||||
if err != nil {
|
||||
return layout, err
|
||||
}
|
||||
for _, exp := range patterns.Expand() {
|
||||
if err := endpointsList.add(strings.Join(exp, "")); err != nil {
|
||||
return layout, err
|
||||
}
|
||||
}
|
||||
default:
|
||||
if err := endpointsList.add(arg); err != nil {
|
||||
return layout, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var stopping bool
|
||||
var singleNode bool
|
||||
var eps []string
|
||||
|
||||
for i := 0; ; i++ {
|
||||
for _, node := range endpointsList {
|
||||
if node.nodeName == "" {
|
||||
singleNode = true
|
||||
}
|
||||
|
||||
if len(node.disks) <= i {
|
||||
stopping = true
|
||||
continue
|
||||
}
|
||||
if stopping {
|
||||
return layout, errors.New("number of disks per node does not match")
|
||||
}
|
||||
eps = append(eps, node.disks[i])
|
||||
}
|
||||
if stopping {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
for _, node := range endpointsList {
|
||||
if node.nodeName != "" && singleNode {
|
||||
return layout, errors.New("all arguments must but either single node or distributed")
|
||||
}
|
||||
}
|
||||
|
||||
setArgs, err := GetAllSets(eps...)
|
||||
if err != nil {
|
||||
return layout, err
|
||||
}
|
||||
|
||||
h := xxhash.New()
|
||||
for _, s := range setArgs {
|
||||
for _, d := range s {
|
||||
h.WriteString(d)
|
||||
}
|
||||
}
|
||||
|
||||
layout.pools = append(layout.pools, poolDisksLayout{
|
||||
cmdline: fmt.Sprintf("hash:%x", h.Sum(nil)),
|
||||
layout: setArgs,
|
||||
})
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// mergeDisksLayoutFromArgs supports with and without ellipses transparently.
|
||||
func mergeDisksLayoutFromArgs(args []string, ctxt *serverCtxt) (err error) {
|
||||
if len(args) == 0 {
|
||||
return nil, -1, errInvalidArgument
|
||||
return errInvalidArgument
|
||||
}
|
||||
|
||||
ok := true
|
||||
@@ -349,44 +467,42 @@ func createServerEndpoints(serverAddr string, args ...string) (
|
||||
ok = ok && !ellipses.HasEllipses(arg)
|
||||
}
|
||||
|
||||
var setArgs [][]string
|
||||
|
||||
// None of the args have ellipses use the old style.
|
||||
if ok {
|
||||
setArgs, err := GetAllSets(args...)
|
||||
setArgs, err = GetAllSets(args...)
|
||||
if err != nil {
|
||||
return nil, -1, err
|
||||
return err
|
||||
}
|
||||
endpointList, newSetupType, err := CreateEndpoints(serverAddr, setArgs...)
|
||||
if err != nil {
|
||||
return nil, -1, err
|
||||
ctxt.Layout = disksLayout{
|
||||
legacy: true,
|
||||
pools: []poolDisksLayout{{layout: setArgs}},
|
||||
}
|
||||
for i := range endpointList {
|
||||
endpointList[i].SetPoolIndex(0)
|
||||
}
|
||||
endpointServerPools = append(endpointServerPools, PoolEndpoints{
|
||||
Legacy: true,
|
||||
SetCount: len(setArgs),
|
||||
DrivesPerSet: len(setArgs[0]),
|
||||
Endpoints: endpointList,
|
||||
CmdLine: strings.Join(args, " "),
|
||||
Platform: fmt.Sprintf("OS: %s | Arch: %s", runtime.GOOS, runtime.GOARCH),
|
||||
})
|
||||
setupType = newSetupType
|
||||
return endpointServerPools, setupType, nil
|
||||
return
|
||||
}
|
||||
|
||||
var poolArgs [][][]string
|
||||
for _, arg := range args {
|
||||
if !ellipses.HasEllipses(arg) && len(args) > 1 {
|
||||
// TODO: support SNSD deployments to be decommissioned in future
|
||||
return nil, -1, fmt.Errorf("all args must have ellipses for pool expansion (%w) args: %s", errInvalidArgument, args)
|
||||
return fmt.Errorf("all args must have ellipses for pool expansion (%w) args: %s", errInvalidArgument, args)
|
||||
}
|
||||
|
||||
setArgs, err := GetAllSets(arg)
|
||||
setArgs, err = GetAllSets(arg)
|
||||
if err != nil {
|
||||
return nil, -1, err
|
||||
return err
|
||||
}
|
||||
ctxt.Layout.pools = append(ctxt.Layout.pools, poolDisksLayout{cmdline: arg, layout: setArgs})
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
poolArgs = append(poolArgs, setArgs)
|
||||
// CreateServerEndpoints - validates and creates new endpoints from input args, supports
|
||||
// both ellipses and without ellipses transparently.
|
||||
func createServerEndpoints(serverAddr string, poolArgs []poolDisksLayout, legacy bool) (
|
||||
endpointServerPools EndpointServerPools, setupType SetupType, err error,
|
||||
) {
|
||||
if len(poolArgs) == 0 {
|
||||
return nil, -1, errInvalidArgument
|
||||
}
|
||||
|
||||
poolEndpoints, setupType, err := CreatePoolEndpoints(serverAddr, poolArgs...)
|
||||
@@ -396,11 +512,12 @@ func createServerEndpoints(serverAddr string, args ...string) (
|
||||
|
||||
for i, endpointList := range poolEndpoints {
|
||||
if err = endpointServerPools.Add(PoolEndpoints{
|
||||
SetCount: len(poolArgs[i]),
|
||||
DrivesPerSet: len(poolArgs[i][0]),
|
||||
Legacy: legacy,
|
||||
SetCount: len(poolArgs[i].layout),
|
||||
DrivesPerSet: len(poolArgs[i].layout[0]),
|
||||
Endpoints: endpointList,
|
||||
Platform: fmt.Sprintf("OS: %s | Arch: %s", runtime.GOOS, runtime.GOARCH),
|
||||
CmdLine: args[i],
|
||||
CmdLine: poolArgs[i].cmdline,
|
||||
}); err != nil {
|
||||
return nil, -1, err
|
||||
}
|
||||
|
||||
@@ -54,15 +54,20 @@ func TestCreateServerEndpoints(t *testing.T) {
|
||||
{":9001", []string{"http://localhost:9001/export{01...64}"}, true},
|
||||
}
|
||||
|
||||
for _, testCase := range testCases {
|
||||
for i, testCase := range testCases {
|
||||
testCase := testCase
|
||||
t.Run("", func(t *testing.T) {
|
||||
_, _, err := createServerEndpoints(testCase.serverAddr, testCase.args...)
|
||||
srvCtxt := serverCtxt{}
|
||||
err := mergeDisksLayoutFromArgs(testCase.args, &srvCtxt)
|
||||
if err != nil && testCase.success {
|
||||
t.Errorf("Expected success but failed instead %s", err)
|
||||
t.Fatalf("Test %d: unexpected error: %v", i+1, err)
|
||||
}
|
||||
_, _, err = createServerEndpoints(testCase.serverAddr, srvCtxt.Layout.pools, srvCtxt.Layout.legacy)
|
||||
if err != nil && testCase.success {
|
||||
t.Errorf("Test %d: Expected success but failed instead %s", i+1, err)
|
||||
}
|
||||
if err == nil && !testCase.success {
|
||||
t.Errorf("Expected failure but passed instead")
|
||||
t.Errorf("Test %d: Expected failure but passed instead", i+1)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
221
cmd/endpoint.go
221
cmd/endpoint.go
@@ -803,11 +803,23 @@ func (p PoolEndpointList) UpdateIsLocal() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func isEmptyLayout(poolsLayout ...poolDisksLayout) bool {
|
||||
return len(poolsLayout) == 0 || len(poolsLayout[0].layout) == 0 || len(poolsLayout[0].layout[0]) == 0 || len(poolsLayout[0].layout[0][0]) == 0
|
||||
}
|
||||
|
||||
func isSingleDriveLayout(poolsLayout ...poolDisksLayout) bool {
|
||||
return len(poolsLayout) == 1 && len(poolsLayout[0].layout) == 1 && len(poolsLayout[0].layout[0]) == 1
|
||||
}
|
||||
|
||||
// CreatePoolEndpoints creates a list of endpoints per pool, resolves their relevant hostnames and
|
||||
// discovers those are local or remote.
|
||||
func CreatePoolEndpoints(serverAddr string, poolArgs ...[][]string) ([]Endpoints, SetupType, error) {
|
||||
func CreatePoolEndpoints(serverAddr string, poolsLayout ...poolDisksLayout) ([]Endpoints, SetupType, error) {
|
||||
var setupType SetupType
|
||||
|
||||
if isEmptyLayout(poolsLayout...) {
|
||||
return nil, setupType, config.ErrInvalidErasureEndpoints(nil).Msg("invalid number of endpoints")
|
||||
}
|
||||
|
||||
// Check whether serverAddr is valid for this host.
|
||||
if err := CheckLocalServerAddr(serverAddr); err != nil {
|
||||
return nil, setupType, err
|
||||
@@ -815,11 +827,11 @@ func CreatePoolEndpoints(serverAddr string, poolArgs ...[][]string) ([]Endpoints
|
||||
|
||||
_, serverAddrPort := mustSplitHostPort(serverAddr)
|
||||
|
||||
poolEndpoints := make(PoolEndpointList, len(poolArgs))
|
||||
poolEndpoints := make(PoolEndpointList, len(poolsLayout))
|
||||
|
||||
// For single arg, return single drive EC setup.
|
||||
if len(poolArgs) == 1 && len(poolArgs[0]) == 1 && len(poolArgs[0][0]) == 1 && len(poolArgs[0][0][0]) == 1 {
|
||||
endpoint, err := NewEndpoint(poolArgs[0][0][0])
|
||||
if isSingleDriveLayout(poolsLayout...) {
|
||||
endpoint, err := NewEndpoint(poolsLayout[0].layout[0][0])
|
||||
if err != nil {
|
||||
return nil, setupType, err
|
||||
}
|
||||
@@ -847,11 +859,11 @@ func CreatePoolEndpoints(serverAddr string, poolArgs ...[][]string) ([]Endpoints
|
||||
return poolEndpoints, setupType, nil
|
||||
}
|
||||
|
||||
for poolIdx, args := range poolArgs {
|
||||
for poolIdx, pool := range poolsLayout {
|
||||
var endpoints Endpoints
|
||||
for setIdx, iargs := range args {
|
||||
for setIdx, setLayout := range pool.layout {
|
||||
// Convert args to endpoints
|
||||
eps, err := NewEndpoints(iargs...)
|
||||
eps, err := NewEndpoints(setLayout...)
|
||||
if err != nil {
|
||||
return nil, setupType, config.ErrInvalidErasureEndpoints(nil).Msg(err.Error())
|
||||
}
|
||||
@@ -1003,6 +1015,8 @@ func CreatePoolEndpoints(serverAddr string, poolArgs ...[][]string) ([]Endpoints
|
||||
poolEndpoints[i] = endpoints
|
||||
}
|
||||
|
||||
// TODO: ensure that each pool has at least two nodes in a distributed setup
|
||||
|
||||
publicIPs := env.Get(config.EnvPublicIPs, "")
|
||||
if len(publicIPs) == 0 {
|
||||
updateDomainIPs(uniqueArgs)
|
||||
@@ -1021,199 +1035,6 @@ func CreatePoolEndpoints(serverAddr string, poolArgs ...[][]string) ([]Endpoints
|
||||
return poolEndpoints, setupType, nil
|
||||
}
|
||||
|
||||
// CreateEndpoints - validates and creates new endpoints for given args.
|
||||
func CreateEndpoints(serverAddr string, args ...[]string) (Endpoints, SetupType, error) {
|
||||
var endpoints Endpoints
|
||||
var setupType SetupType
|
||||
var err error
|
||||
|
||||
// Check whether serverAddr is valid for this host.
|
||||
if err = CheckLocalServerAddr(serverAddr); err != nil {
|
||||
return endpoints, setupType, err
|
||||
}
|
||||
|
||||
_, serverAddrPort := mustSplitHostPort(serverAddr)
|
||||
|
||||
// For single arg, return single drive setup.
|
||||
if len(args) == 1 && len(args[0]) == 1 {
|
||||
var endpoint Endpoint
|
||||
endpoint, err = NewEndpoint(args[0][0])
|
||||
if err != nil {
|
||||
return endpoints, setupType, err
|
||||
}
|
||||
if err := endpoint.UpdateIsLocal(); err != nil {
|
||||
return endpoints, setupType, err
|
||||
}
|
||||
if endpoint.Type() != PathEndpointType {
|
||||
return endpoints, setupType, config.ErrInvalidEndpoint(nil).Msg("use path style endpoint for single node setup")
|
||||
}
|
||||
|
||||
endpoint.SetPoolIndex(0)
|
||||
endpoint.SetSetIndex(0)
|
||||
endpoint.SetDiskIndex(0)
|
||||
|
||||
endpoints = append(endpoints, endpoint)
|
||||
setupType = ErasureSDSetupType
|
||||
|
||||
// Check for cross device mounts if any.
|
||||
if err = checkCrossDeviceMounts(endpoints); err != nil {
|
||||
return endpoints, setupType, config.ErrInvalidEndpoint(nil).Msg(err.Error())
|
||||
}
|
||||
|
||||
return endpoints, setupType, nil
|
||||
}
|
||||
|
||||
for setIdx, iargs := range args {
|
||||
// Convert args to endpoints
|
||||
eps, err := NewEndpoints(iargs...)
|
||||
if err != nil {
|
||||
return endpoints, setupType, config.ErrInvalidErasureEndpoints(nil).Msg(err.Error())
|
||||
}
|
||||
|
||||
// Check for cross device mounts if any.
|
||||
if err = checkCrossDeviceMounts(eps); err != nil {
|
||||
return endpoints, setupType, config.ErrInvalidErasureEndpoints(nil).Msg(err.Error())
|
||||
}
|
||||
|
||||
for diskIdx := range eps {
|
||||
eps[diskIdx].SetSetIndex(setIdx)
|
||||
eps[diskIdx].SetDiskIndex(diskIdx)
|
||||
}
|
||||
|
||||
endpoints = append(endpoints, eps...)
|
||||
}
|
||||
|
||||
if len(endpoints) == 0 {
|
||||
return endpoints, setupType, config.ErrInvalidErasureEndpoints(nil).Msg("invalid number of endpoints")
|
||||
}
|
||||
|
||||
// Return Erasure setup when all endpoints are path style.
|
||||
if endpoints[0].Type() == PathEndpointType {
|
||||
setupType = ErasureSetupType
|
||||
return endpoints, setupType, nil
|
||||
}
|
||||
|
||||
if err = endpoints.UpdateIsLocal(); err != nil {
|
||||
return endpoints, setupType, config.ErrInvalidErasureEndpoints(nil).Msg(err.Error())
|
||||
}
|
||||
|
||||
// Here all endpoints are URL style.
|
||||
endpointPathSet := set.NewStringSet()
|
||||
localEndpointCount := 0
|
||||
localServerHostSet := set.NewStringSet()
|
||||
localPortSet := set.NewStringSet()
|
||||
|
||||
for _, endpoint := range endpoints {
|
||||
endpointPathSet.Add(endpoint.Path)
|
||||
if endpoint.IsLocal {
|
||||
localServerHostSet.Add(endpoint.Hostname())
|
||||
|
||||
var port string
|
||||
_, port, err = net.SplitHostPort(endpoint.Host)
|
||||
if err != nil {
|
||||
port = serverAddrPort
|
||||
}
|
||||
localPortSet.Add(port)
|
||||
|
||||
localEndpointCount++
|
||||
}
|
||||
}
|
||||
|
||||
orchestrated := IsKubernetes() || IsDocker()
|
||||
reverseProxy := (env.Get("_MINIO_REVERSE_PROXY", "") != "") && ((env.Get("MINIO_CI_CD", "") != "") || (env.Get("CI", "") != ""))
|
||||
// If not orchestrated
|
||||
if !orchestrated &&
|
||||
// and not setup in reverse proxy
|
||||
!reverseProxy {
|
||||
// Check whether same path is not used in endpoints of a host on different port.
|
||||
// Only verify this on baremetal setups, DNS is not available in orchestrated
|
||||
// environments so we can't do much here.
|
||||
pathIPMap := make(map[string]set.StringSet)
|
||||
hostIPCache := make(map[string]set.StringSet)
|
||||
for _, endpoint := range endpoints {
|
||||
host := endpoint.Hostname()
|
||||
hostIPSet, ok := hostIPCache[host]
|
||||
if !ok {
|
||||
hostIPSet, err = getHostIP(host)
|
||||
if err != nil {
|
||||
return endpoints, setupType, config.ErrInvalidErasureEndpoints(nil).Msg(fmt.Sprintf("host '%s' cannot resolve: %s", host, err))
|
||||
}
|
||||
hostIPCache[host] = hostIPSet
|
||||
}
|
||||
if IPSet, ok := pathIPMap[endpoint.Path]; ok {
|
||||
if !IPSet.Intersection(hostIPSet).IsEmpty() {
|
||||
return endpoints, setupType,
|
||||
config.ErrInvalidErasureEndpoints(nil).Msg(fmt.Sprintf("same path '%s' can not be served by different port on same address", endpoint.Path))
|
||||
}
|
||||
pathIPMap[endpoint.Path] = IPSet.Union(hostIPSet)
|
||||
} else {
|
||||
pathIPMap[endpoint.Path] = hostIPSet
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Check whether same path is used for more than 1 local endpoints.
|
||||
{
|
||||
localPathSet := set.CreateStringSet()
|
||||
for _, endpoint := range endpoints {
|
||||
if !endpoint.IsLocal {
|
||||
continue
|
||||
}
|
||||
if localPathSet.Contains(endpoint.Path) {
|
||||
return endpoints, setupType,
|
||||
config.ErrInvalidErasureEndpoints(nil).Msg(fmt.Sprintf("path '%s' cannot be served by different address on same server", endpoint.Path))
|
||||
}
|
||||
localPathSet.Add(endpoint.Path)
|
||||
}
|
||||
}
|
||||
|
||||
// Add missing port in all endpoints.
|
||||
for i := range endpoints {
|
||||
_, port, err := net.SplitHostPort(endpoints[i].Host)
|
||||
if err != nil {
|
||||
endpoints[i].Host = net.JoinHostPort(endpoints[i].Host, serverAddrPort)
|
||||
} else if endpoints[i].IsLocal && serverAddrPort != port {
|
||||
// If endpoint is local, but port is different than serverAddrPort, then make it as remote.
|
||||
endpoints[i].IsLocal = false
|
||||
}
|
||||
}
|
||||
|
||||
// All endpoints are pointing to local host
|
||||
if len(endpoints) == localEndpointCount {
|
||||
// If all endpoints have same port number, Just treat it as local erasure setup
|
||||
// using URL style endpoints.
|
||||
if len(localPortSet) == 1 {
|
||||
if len(localServerHostSet) > 1 {
|
||||
return endpoints, setupType,
|
||||
config.ErrInvalidErasureEndpoints(nil).Msg("all local endpoints should not have different hostnames/ips")
|
||||
}
|
||||
return endpoints, ErasureSetupType, nil
|
||||
}
|
||||
|
||||
// Even though all endpoints are local, but those endpoints use different ports.
|
||||
// This means it is DistErasure setup.
|
||||
}
|
||||
|
||||
uniqueArgs := set.NewStringSet()
|
||||
for _, endpoint := range endpoints {
|
||||
uniqueArgs.Add(endpoint.Host)
|
||||
}
|
||||
|
||||
// Error out if we have less than 2 unique servers.
|
||||
if len(uniqueArgs.ToSlice()) < 2 && setupType == DistErasureSetupType {
|
||||
err := fmt.Errorf("Unsupported number of endpoints (%s), minimum number of servers cannot be less than 2 in distributed setup", endpoints)
|
||||
return endpoints, setupType, err
|
||||
}
|
||||
|
||||
publicIPs := env.Get(config.EnvPublicIPs, "")
|
||||
if len(publicIPs) == 0 {
|
||||
updateDomainIPs(uniqueArgs)
|
||||
}
|
||||
|
||||
setupType = DistErasureSetupType
|
||||
return endpoints, setupType, nil
|
||||
}
|
||||
|
||||
// GetLocalPeer - returns local peer value, returns globalMinioAddr
|
||||
// for FS and Erasure mode. In case of distributed server return
|
||||
// the first element from the set of peers which indicate that
|
||||
|
||||
@@ -225,24 +225,24 @@ func TestCreateEndpoints(t *testing.T) {
|
||||
|
||||
testCases := []struct {
|
||||
serverAddr string
|
||||
args [][]string
|
||||
args []string
|
||||
expectedServerAddr string
|
||||
expectedEndpoints Endpoints
|
||||
expectedSetupType SetupType
|
||||
expectedErr error
|
||||
}{
|
||||
{"localhost", [][]string{}, "", Endpoints{}, -1, fmt.Errorf("address localhost: missing port in address")},
|
||||
{"localhost", []string{}, "", Endpoints{}, -1, fmt.Errorf("address localhost: missing port in address")},
|
||||
|
||||
// Erasure Single Drive
|
||||
{"localhost:9000", [][]string{{"http://localhost/d1"}}, "", Endpoints{}, -1, fmt.Errorf("use path style endpoint for FS setup")},
|
||||
{":443", [][]string{{"/d1"}}, ":443", Endpoints{Endpoint{URL: &url.URL{Path: mustAbs("/d1")}, IsLocal: true}}, ErasureSDSetupType, nil},
|
||||
{"localhost:10000", [][]string{{"/d1"}}, "localhost:10000", Endpoints{Endpoint{URL: &url.URL{Path: mustAbs("/d1")}, IsLocal: true}}, ErasureSDSetupType, nil},
|
||||
{"localhost:9000", [][]string{{"https://127.0.0.1:9000/d1", "https://localhost:9001/d1", "https://example.com/d1", "https://example.com/d2"}}, "", Endpoints{}, -1, fmt.Errorf("path '/d1' can not be served by different port on same address")},
|
||||
{"localhost:9000", []string{"http://localhost/d1"}, "", Endpoints{}, -1, fmt.Errorf("use path style endpoint for SD setup")},
|
||||
{":443", []string{"/d1"}, ":443", Endpoints{Endpoint{URL: &url.URL{Path: mustAbs("/d1")}, IsLocal: true}}, ErasureSDSetupType, nil},
|
||||
{"localhost:10000", []string{"/d1"}, "localhost:10000", Endpoints{Endpoint{URL: &url.URL{Path: mustAbs("/d1")}, IsLocal: true}}, ErasureSDSetupType, nil},
|
||||
{"localhost:9000", []string{"https://127.0.0.1:9000/d1", "https://localhost:9001/d1", "https://example.com/d1", "https://example.com/d2"}, "", Endpoints{}, -1, fmt.Errorf("path '/d1' can not be served by different port on same address")},
|
||||
|
||||
// Erasure Setup with PathEndpointType
|
||||
{
|
||||
":1234",
|
||||
[][]string{{"/d1", "/d2", "/d3", "/d4"}},
|
||||
[]string{"/d1", "/d2", "/d3", "/d4"},
|
||||
":1234",
|
||||
Endpoints{
|
||||
Endpoint{URL: &url.URL{Path: mustAbs("/d1")}, IsLocal: true},
|
||||
@@ -253,49 +253,49 @@ func TestCreateEndpoints(t *testing.T) {
|
||||
ErasureSetupType, nil,
|
||||
},
|
||||
// DistErasure Setup with URLEndpointType
|
||||
{":9000", [][]string{{"http://localhost/d1", "http://localhost/d2", "http://localhost/d3", "http://localhost/d4"}}, ":9000", Endpoints{
|
||||
{":9000", []string{"http://localhost/d1", "http://localhost/d2", "http://localhost/d3", "http://localhost/d4"}, ":9000", Endpoints{
|
||||
Endpoint{URL: &url.URL{Scheme: "http", Host: "localhost:9000", Path: "/d1"}, IsLocal: true},
|
||||
Endpoint{URL: &url.URL{Scheme: "http", Host: "localhost:9000", Path: "/d2"}, IsLocal: true},
|
||||
Endpoint{URL: &url.URL{Scheme: "http", Host: "localhost:9000", Path: "/d3"}, IsLocal: true},
|
||||
Endpoint{URL: &url.URL{Scheme: "http", Host: "localhost:9000", Path: "/d4"}, IsLocal: true},
|
||||
}, ErasureSetupType, nil},
|
||||
}, DistErasureSetupType, nil},
|
||||
// DistErasure Setup with URLEndpointType having mixed naming to local host.
|
||||
{"127.0.0.1:10000", [][]string{{"http://localhost/d1", "http://localhost/d2", "http://127.0.0.1/d3", "http://127.0.0.1/d4"}}, "", Endpoints{}, -1, fmt.Errorf("all local endpoints should not have different hostnames/ips")},
|
||||
{"127.0.0.1:10000", []string{"http://localhost/d1", "http://localhost/d2", "http://127.0.0.1/d3", "http://127.0.0.1/d4"}, "", Endpoints{}, -1, fmt.Errorf("all local endpoints should not have different hostnames/ips")},
|
||||
|
||||
{":9001", [][]string{{"http://10.0.0.1:9000/export", "http://10.0.0.2:9000/export", "http://" + nonLoopBackIP + ":9001/export", "http://10.0.0.2:9001/export"}}, "", Endpoints{}, -1, fmt.Errorf("path '/export' can not be served by different port on same address")},
|
||||
{":9001", []string{"http://10.0.0.1:9000/export", "http://10.0.0.2:9000/export", "http://" + nonLoopBackIP + ":9001/export", "http://10.0.0.2:9001/export"}, "", Endpoints{}, -1, fmt.Errorf("path '/export' can not be served by different port on same address")},
|
||||
|
||||
{":9000", [][]string{{"http://127.0.0.1:9000/export", "http://" + nonLoopBackIP + ":9000/export", "http://10.0.0.1:9000/export", "http://10.0.0.2:9000/export"}}, "", Endpoints{}, -1, fmt.Errorf("path '/export' cannot be served by different address on same server")},
|
||||
{":9000", []string{"http://127.0.0.1:9000/export", "http://" + nonLoopBackIP + ":9000/export", "http://10.0.0.1:9000/export", "http://10.0.0.2:9000/export"}, "", Endpoints{}, -1, fmt.Errorf("path '/export' cannot be served by different address on same server")},
|
||||
|
||||
// DistErasure type
|
||||
{"127.0.0.1:10000", [][]string{{case1Endpoint1, case1Endpoint2, "http://example.org/d3", "http://example.com/d4"}}, "127.0.0.1:10000", Endpoints{
|
||||
{"127.0.0.1:10000", []string{case1Endpoint1, case1Endpoint2, "http://example.org/d3", "http://example.com/d4"}, "127.0.0.1:10000", Endpoints{
|
||||
Endpoint{URL: case1URLs[0], IsLocal: case1LocalFlags[0]},
|
||||
Endpoint{URL: case1URLs[1], IsLocal: case1LocalFlags[1]},
|
||||
Endpoint{URL: case1URLs[2], IsLocal: case1LocalFlags[2]},
|
||||
Endpoint{URL: case1URLs[3], IsLocal: case1LocalFlags[3]},
|
||||
}, DistErasureSetupType, nil},
|
||||
|
||||
{"127.0.0.1:10000", [][]string{{case2Endpoint1, case2Endpoint2, "http://example.org/d3", "http://example.com/d4"}}, "127.0.0.1:10000", Endpoints{
|
||||
{"127.0.0.1:10000", []string{case2Endpoint1, case2Endpoint2, "http://example.org/d3", "http://example.com/d4"}, "127.0.0.1:10000", Endpoints{
|
||||
Endpoint{URL: case2URLs[0], IsLocal: case2LocalFlags[0]},
|
||||
Endpoint{URL: case2URLs[1], IsLocal: case2LocalFlags[1]},
|
||||
Endpoint{URL: case2URLs[2], IsLocal: case2LocalFlags[2]},
|
||||
Endpoint{URL: case2URLs[3], IsLocal: case2LocalFlags[3]},
|
||||
}, DistErasureSetupType, nil},
|
||||
|
||||
{":80", [][]string{{case3Endpoint1, "http://example.org:9000/d2", "http://example.com/d3", "http://example.net/d4"}}, ":80", Endpoints{
|
||||
{":80", []string{case3Endpoint1, "http://example.org:9000/d2", "http://example.com/d3", "http://example.net/d4"}, ":80", Endpoints{
|
||||
Endpoint{URL: case3URLs[0], IsLocal: case3LocalFlags[0]},
|
||||
Endpoint{URL: case3URLs[1], IsLocal: case3LocalFlags[1]},
|
||||
Endpoint{URL: case3URLs[2], IsLocal: case3LocalFlags[2]},
|
||||
Endpoint{URL: case3URLs[3], IsLocal: case3LocalFlags[3]},
|
||||
}, DistErasureSetupType, nil},
|
||||
|
||||
{":9000", [][]string{{case4Endpoint1, "http://example.org/d2", "http://example.com/d3", "http://example.net/d4"}}, ":9000", Endpoints{
|
||||
{":9000", []string{case4Endpoint1, "http://example.org/d2", "http://example.com/d3", "http://example.net/d4"}, ":9000", Endpoints{
|
||||
Endpoint{URL: case4URLs[0], IsLocal: case4LocalFlags[0]},
|
||||
Endpoint{URL: case4URLs[1], IsLocal: case4LocalFlags[1]},
|
||||
Endpoint{URL: case4URLs[2], IsLocal: case4LocalFlags[2]},
|
||||
Endpoint{URL: case4URLs[3], IsLocal: case4LocalFlags[3]},
|
||||
}, DistErasureSetupType, nil},
|
||||
|
||||
{":9000", [][]string{{case5Endpoint1, case5Endpoint2, case5Endpoint3, case5Endpoint4}}, ":9000", Endpoints{
|
||||
{":9000", []string{case5Endpoint1, case5Endpoint2, case5Endpoint3, case5Endpoint4}, ":9000", Endpoints{
|
||||
Endpoint{URL: case5URLs[0], IsLocal: case5LocalFlags[0]},
|
||||
Endpoint{URL: case5URLs[1], IsLocal: case5LocalFlags[1]},
|
||||
Endpoint{URL: case5URLs[2], IsLocal: case5LocalFlags[2]},
|
||||
@@ -303,7 +303,7 @@ func TestCreateEndpoints(t *testing.T) {
|
||||
}, DistErasureSetupType, nil},
|
||||
|
||||
// DistErasure Setup using only local host.
|
||||
{":9003", [][]string{{"http://localhost:9000/d1", "http://localhost:9001/d2", "http://127.0.0.1:9002/d3", case6Endpoint}}, ":9003", Endpoints{
|
||||
{":9003", []string{"http://localhost:9000/d1", "http://localhost:9001/d2", "http://127.0.0.1:9002/d3", case6Endpoint}, ":9003", Endpoints{
|
||||
Endpoint{URL: case6URLs[0], IsLocal: case6LocalFlags[0]},
|
||||
Endpoint{URL: case6URLs[1], IsLocal: case6LocalFlags[1]},
|
||||
Endpoint{URL: case6URLs[2], IsLocal: case6LocalFlags[2]},
|
||||
@@ -311,24 +311,32 @@ func TestCreateEndpoints(t *testing.T) {
|
||||
}, DistErasureSetupType, nil},
|
||||
}
|
||||
|
||||
for _, testCase := range testCases {
|
||||
for i, testCase := range testCases {
|
||||
i := i
|
||||
testCase := testCase
|
||||
t.Run("", func(t *testing.T) {
|
||||
endpoints, setupType, err := CreateEndpoints(testCase.serverAddr, testCase.args...)
|
||||
var srvCtxt serverCtxt
|
||||
err := mergeDisksLayoutFromArgs(testCase.args, &srvCtxt)
|
||||
if err != nil && testCase.expectedErr == nil {
|
||||
t.Errorf("Test %d: unexpected error: %v", i+1, err)
|
||||
}
|
||||
pools, setupType, err := CreatePoolEndpoints(testCase.serverAddr, srvCtxt.Layout.pools...)
|
||||
if err == nil && testCase.expectedErr != nil {
|
||||
t.Errorf("error: expected = %v, got = <nil>", testCase.expectedErr)
|
||||
t.Errorf("Test %d: expected = %v, got = <nil>", i+1, testCase.expectedErr)
|
||||
}
|
||||
if err == nil {
|
||||
if setupType != testCase.expectedSetupType {
|
||||
t.Errorf("setupType: expected = %v, got = %v", testCase.expectedSetupType, setupType)
|
||||
t.Errorf("Test %d: setupType: expected = %v, got = %v", i+1, testCase.expectedSetupType, setupType)
|
||||
}
|
||||
endpoints := pools[0]
|
||||
if len(endpoints) != len(testCase.expectedEndpoints) {
|
||||
t.Errorf("endpoints: expected = %d, got = %d", len(testCase.expectedEndpoints),
|
||||
t.Errorf("Test %d: endpoints: expected = %d, got = %d", i+1, len(testCase.expectedEndpoints),
|
||||
len(endpoints))
|
||||
} else {
|
||||
for i, endpoint := range endpoints {
|
||||
if testCase.expectedEndpoints[i].String() != endpoint.String() {
|
||||
t.Errorf("endpoints: expected = %s, got = %s",
|
||||
t.Errorf("Test %d: endpoints: expected = %s, got = %s",
|
||||
i+1,
|
||||
testCase.expectedEndpoints[i],
|
||||
endpoint)
|
||||
}
|
||||
@@ -336,7 +344,7 @@ func TestCreateEndpoints(t *testing.T) {
|
||||
}
|
||||
}
|
||||
if err != nil && testCase.expectedErr == nil {
|
||||
t.Errorf("error: expected = <nil>, got = %v, testCase: %v", err, testCase)
|
||||
t.Errorf("Test %d: error: expected = <nil>, got = %v, testCase: %v", i+1, err, testCase)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -23,7 +23,6 @@ import (
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/minio/cli"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
ftp "goftp.io/server/v2"
|
||||
)
|
||||
@@ -69,9 +68,7 @@ func (log *minioLogger) PrintResponse(sessionID string, code int, message string
|
||||
}
|
||||
}
|
||||
|
||||
func startFTPServer(c *cli.Context) {
|
||||
args := c.StringSlice("ftp")
|
||||
|
||||
func startFTPServer(args []string) {
|
||||
var (
|
||||
port int
|
||||
publicIP string
|
||||
|
||||
@@ -127,13 +127,44 @@ const (
|
||||
tlsClientSessionCacheSize = 100
|
||||
)
|
||||
|
||||
var globalCLIContext = struct {
|
||||
JSON, Quiet bool
|
||||
Anonymous bool
|
||||
StrictS3Compat bool
|
||||
}{}
|
||||
type poolDisksLayout struct {
|
||||
cmdline string
|
||||
layout [][]string
|
||||
}
|
||||
|
||||
type disksLayout struct {
|
||||
legacy bool
|
||||
pools []poolDisksLayout
|
||||
}
|
||||
|
||||
type serverCtxt struct {
|
||||
JSON, Quiet bool
|
||||
Anonymous bool
|
||||
StrictS3Compat bool
|
||||
Addr, ConsoleAddr string
|
||||
ConfigDir, CertsDir string
|
||||
configDirSet, certsDirSet bool
|
||||
Interface string
|
||||
|
||||
FTP []string
|
||||
SFTP []string
|
||||
|
||||
UserTimeout time.Duration
|
||||
ConnReadDeadline time.Duration
|
||||
ConnWriteDeadline time.Duration
|
||||
|
||||
ShutdownTimeout time.Duration
|
||||
IdleTimeout time.Duration
|
||||
ReadHeaderTimeout time.Duration
|
||||
|
||||
// The layout of disks as interpreted
|
||||
Layout disksLayout
|
||||
}
|
||||
|
||||
var (
|
||||
// Global user opts context
|
||||
globalServerCtxt serverCtxt
|
||||
|
||||
// Indicates if the running minio server is distributed setup.
|
||||
globalIsDistErasure = false
|
||||
|
||||
|
||||
@@ -51,10 +51,16 @@ import (
|
||||
"github.com/minio/pkg/v2/certs"
|
||||
"github.com/minio/pkg/v2/env"
|
||||
"golang.org/x/exp/slices"
|
||||
"gopkg.in/yaml.v2"
|
||||
)
|
||||
|
||||
// ServerFlags - server command specific flags
|
||||
var ServerFlags = []cli.Flag{
|
||||
cli.StringFlag{
|
||||
Name: "config",
|
||||
Usage: "specify server configuration via YAML configuration",
|
||||
EnvVar: "MINIO_CONFIG",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "address",
|
||||
Value: ":" + GlobalMinioDefaultPort,
|
||||
@@ -226,9 +232,53 @@ func serverCmdArgs(ctx *cli.Context) []string {
|
||||
return strings.Fields(v)
|
||||
}
|
||||
|
||||
func serverHandleCmdArgs(ctx *cli.Context) {
|
||||
// Handle common command args.
|
||||
handleCommonCmdArgs(ctx)
|
||||
func mergeServerCtxtFromConfigFile(configFile string, ctxt *serverCtxt) error {
|
||||
rd, err := Open(configFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer rd.Close()
|
||||
|
||||
cf := &config.ServerConfig{}
|
||||
dec := yaml.NewDecoder(rd)
|
||||
dec.SetStrict(true)
|
||||
if err = dec.Decode(cf); err != nil {
|
||||
return err
|
||||
}
|
||||
if cf.Version != "v1" {
|
||||
return fmt.Errorf("unexpected version: %s", cf.Version)
|
||||
}
|
||||
if cf.Addr != "" {
|
||||
ctxt.Addr = cf.Addr
|
||||
}
|
||||
if cf.ConsoleAddr != "" {
|
||||
ctxt.ConsoleAddr = cf.ConsoleAddr
|
||||
}
|
||||
if cf.CertsDir != "" {
|
||||
ctxt.CertsDir = cf.CertsDir
|
||||
ctxt.certsDirSet = true
|
||||
}
|
||||
|
||||
if cf.Options.FTP.Address != "" {
|
||||
ctxt.FTP = append(ctxt.FTP, fmt.Sprintf("address=%s", cf.Options.FTP.Address))
|
||||
}
|
||||
if cf.Options.FTP.PassivePortRange != "" {
|
||||
ctxt.FTP = append(ctxt.FTP, fmt.Sprintf("passive-port-range=%s", cf.Options.FTP.PassivePortRange))
|
||||
}
|
||||
|
||||
if cf.Options.SFTP.Address != "" {
|
||||
ctxt.SFTP = append(ctxt.SFTP, fmt.Sprintf("address=%s", cf.Options.SFTP.Address))
|
||||
}
|
||||
if cf.Options.SFTP.SSHPrivateKey != "" {
|
||||
ctxt.SFTP = append(ctxt.SFTP, fmt.Sprintf("ssh-private-key=%s", cf.Options.SFTP.SSHPrivateKey))
|
||||
}
|
||||
|
||||
ctxt.Layout, err = buildDisksLayoutFromConfFile(cf.Pools)
|
||||
return err
|
||||
}
|
||||
|
||||
func serverHandleCmdArgs(ctxt serverCtxt) {
|
||||
handleCommonArgs(ctxt)
|
||||
|
||||
logger.FatalIf(CheckLocalServerAddr(globalMinioAddr), "Unable to validate passed arguments")
|
||||
|
||||
@@ -251,7 +301,7 @@ func serverHandleCmdArgs(ctx *cli.Context) {
|
||||
// Register root CAs for remote ENVs
|
||||
env.RegisterGlobalCAs(globalRootCAs)
|
||||
|
||||
globalEndpoints, setupType, err = createServerEndpoints(globalMinioAddr, serverCmdArgs(ctx)...)
|
||||
globalEndpoints, setupType, err = createServerEndpoints(globalMinioAddr, ctxt.Layout.pools, ctxt.Layout.legacy)
|
||||
logger.FatalIf(err, "Invalid command line arguments")
|
||||
globalNodes = globalEndpoints.GetNodes()
|
||||
|
||||
@@ -262,7 +312,7 @@ func serverHandleCmdArgs(ctx *cli.Context) {
|
||||
}
|
||||
globalIsErasureSD = (setupType == ErasureSDSetupType)
|
||||
if globalDynamicAPIPort && globalIsDistErasure {
|
||||
logger.FatalIf(errInvalidArgument, "Invalid --address=\"%s\", port '0' is not allowed in a distributed erasure coded setup", ctx.String("address"))
|
||||
logger.FatalIf(errInvalidArgument, "Invalid --address=\"%s\", port '0' is not allowed in a distributed erasure coded setup", ctxt.Addr)
|
||||
}
|
||||
|
||||
globalLocalNodeName = GetLocalPeer(globalEndpoints, globalMinioHost, globalMinioPort)
|
||||
@@ -270,7 +320,7 @@ func serverHandleCmdArgs(ctx *cli.Context) {
|
||||
globalLocalNodeNameHex = hex.EncodeToString(nodeNameSum[:])
|
||||
|
||||
// Initialize, see which NIC the service is running on, and save it as global value
|
||||
setGlobalInternodeInterface(ctx.String("interface"))
|
||||
setGlobalInternodeInterface(ctxt.Interface)
|
||||
|
||||
// allow transport to be HTTP/1.1 for proxying.
|
||||
globalProxyTransport = NewCustomHTTPProxyTransport()()
|
||||
@@ -289,8 +339,8 @@ func serverHandleCmdArgs(ctx *cli.Context) {
|
||||
})
|
||||
|
||||
globalTCPOptions = xhttp.TCPOptions{
|
||||
UserTimeout: int(ctx.Duration("conn-user-timeout").Milliseconds()),
|
||||
Interface: ctx.String("interface"),
|
||||
UserTimeout: int(ctxt.UserTimeout.Milliseconds()),
|
||||
Interface: ctxt.Interface,
|
||||
}
|
||||
|
||||
// On macOS, if a process already listens on LOCALIPADDR:PORT, net.Listen() falls back
|
||||
@@ -299,8 +349,8 @@ func serverHandleCmdArgs(ctx *cli.Context) {
|
||||
// To avoid this error situation we check for port availability.
|
||||
logger.FatalIf(xhttp.CheckPortAvailability(globalMinioHost, globalMinioPort, globalTCPOptions), "Unable to start the server")
|
||||
|
||||
globalConnReadDeadline = ctx.Duration("conn-read-deadline")
|
||||
globalConnWriteDeadline = ctx.Duration("conn-write-deadline")
|
||||
globalConnReadDeadline = ctxt.ConnReadDeadline
|
||||
globalConnWriteDeadline = ctxt.ConnWriteDeadline
|
||||
}
|
||||
|
||||
func serverHandleEnvVars() {
|
||||
@@ -590,11 +640,17 @@ func serverMain(ctx *cli.Context) {
|
||||
// Always load ENV variables from files first.
|
||||
loadEnvVarsFromFiles()
|
||||
|
||||
// Handle all server command args.
|
||||
// Handle all server command args and build the disks layout
|
||||
bootstrapTrace("serverHandleCmdArgs", func() {
|
||||
serverHandleCmdArgs(ctx)
|
||||
err := buildServerCtxt(ctx, &globalServerCtxt)
|
||||
logger.FatalIf(err, "Unable to prepare the list of endpoints")
|
||||
|
||||
serverHandleCmdArgs(globalServerCtxt)
|
||||
})
|
||||
|
||||
// DNS cache subsystem to reduce outgoing DNS requests
|
||||
runDNSCache(ctx)
|
||||
|
||||
// Handle all server environment vars.
|
||||
serverHandleEnvVars()
|
||||
|
||||
@@ -637,7 +693,7 @@ func serverMain(ctx *cli.Context) {
|
||||
|
||||
// Check for updates in non-blocking manner.
|
||||
go func() {
|
||||
if !globalCLIContext.Quiet && !globalInplaceUpdateDisabled {
|
||||
if !globalServerCtxt.Quiet && !globalInplaceUpdateDisabled {
|
||||
// Check for new updates from dl.min.io.
|
||||
bootstrapTrace("checkUpdate", func() {
|
||||
checkUpdate(getMinioMode())
|
||||
@@ -683,9 +739,9 @@ func serverMain(ctx *cli.Context) {
|
||||
httpServer := xhttp.NewServer(getServerListenAddrs()).
|
||||
UseHandler(setCriticalErrorHandler(corsHandler(handler))).
|
||||
UseTLSConfig(newTLSConfig(getCert)).
|
||||
UseShutdownTimeout(ctx.Duration("shutdown-timeout")).
|
||||
UseIdleTimeout(ctx.Duration("idle-timeout")).
|
||||
UseReadHeaderTimeout(ctx.Duration("read-header-timeout")).
|
||||
UseShutdownTimeout(globalServerCtxt.ShutdownTimeout).
|
||||
UseIdleTimeout(globalServerCtxt.IdleTimeout).
|
||||
UseReadHeaderTimeout(globalServerCtxt.ReadHeaderTimeout).
|
||||
UseBaseContext(GlobalContext).
|
||||
UseCustomLogger(log.New(io.Discard, "", 0)). // Turn-off random logging by Go stdlib
|
||||
UseTCPOptions(globalTCPOptions)
|
||||
@@ -779,7 +835,7 @@ func serverMain(ctx *cli.Context) {
|
||||
logger.LogIf(GlobalContext, err)
|
||||
}
|
||||
|
||||
if !globalCLIContext.StrictS3Compat {
|
||||
if !globalServerCtxt.StrictS3Compat {
|
||||
logger.Info(color.RedBold("WARNING: Strict AWS S3 compatible incoming PUT, POST content payload validation is turned off, caution is advised do not use in production"))
|
||||
}
|
||||
})
|
||||
@@ -813,16 +869,16 @@ func serverMain(ctx *cli.Context) {
|
||||
}
|
||||
|
||||
// if we see FTP args, start FTP if possible
|
||||
if len(ctx.StringSlice("ftp")) > 0 {
|
||||
if len(globalServerCtxt.FTP) > 0 {
|
||||
bootstrapTrace("go startFTPServer", func() {
|
||||
go startFTPServer(ctx)
|
||||
go startFTPServer(globalServerCtxt.FTP)
|
||||
})
|
||||
}
|
||||
|
||||
// If we see SFTP args, start SFTP if possible
|
||||
if len(ctx.StringSlice("sftp")) > 0 {
|
||||
bootstrapTrace("go startFTPServer", func() {
|
||||
go startSFTPServer(ctx)
|
||||
if len(globalServerCtxt.SFTP) > 0 {
|
||||
bootstrapTrace("go startSFTPServer", func() {
|
||||
go startSFTPServer(globalServerCtxt.SFTP)
|
||||
})
|
||||
}
|
||||
}()
|
||||
|
||||
@@ -23,6 +23,57 @@ import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestServerConfigFile(t *testing.T) {
|
||||
for _, testcase := range []struct {
|
||||
config string
|
||||
expectedErr bool
|
||||
hash string
|
||||
}{
|
||||
{
|
||||
config: "testdata/config/1.yaml",
|
||||
expectedErr: false,
|
||||
hash: "hash:02bf70285dc71f76",
|
||||
},
|
||||
{
|
||||
config: "testdata/config/2.yaml",
|
||||
expectedErr: false,
|
||||
hash: "hash:676d2da00f71f205",
|
||||
},
|
||||
{
|
||||
config: "testdata/config/invalid.yaml",
|
||||
expectedErr: true,
|
||||
},
|
||||
{
|
||||
config: "testdata/config/invalid-types.yaml",
|
||||
expectedErr: true,
|
||||
},
|
||||
{
|
||||
config: "testdata/config/invalid-disks.yaml",
|
||||
expectedErr: true,
|
||||
},
|
||||
} {
|
||||
testcase := testcase
|
||||
t.Run(testcase.config, func(t *testing.T) {
|
||||
sctx := &serverCtxt{}
|
||||
err := mergeServerCtxtFromConfigFile(testcase.config, sctx)
|
||||
if testcase.expectedErr && err == nil {
|
||||
t.Error("expected failure, got success")
|
||||
}
|
||||
if !testcase.expectedErr && err != nil {
|
||||
t.Error("expected success, got failure", err)
|
||||
}
|
||||
if err == nil {
|
||||
if len(sctx.Layout.pools) != 2 {
|
||||
t.Error("expected parsed pools to be 2, not", len(sctx.Layout.pools))
|
||||
}
|
||||
if sctx.Layout.pools[0].cmdline != testcase.hash {
|
||||
t.Error("expected hash", testcase.hash, "got", sctx.Layout.pools[0].cmdline)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Tests initializing new object layer.
|
||||
func TestNewObjectLayer(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
|
||||
@@ -121,7 +121,7 @@ func printServerCommonMsg(apiEndpoints []string) {
|
||||
|
||||
// Colorize the message and print.
|
||||
logger.Info(color.Blue("S3-API: ") + color.Bold(fmt.Sprintf("%s ", apiEndpointStr)))
|
||||
if color.IsTerminal() && (!globalCLIContext.Anonymous && !globalCLIContext.JSON) {
|
||||
if color.IsTerminal() && (!globalServerCtxt.Anonymous && !globalServerCtxt.JSON) {
|
||||
logger.Info(color.Blue("RootUser: ") + color.Bold("%s ", cred.AccessKey))
|
||||
logger.Info(color.Blue("RootPass: ") + color.Bold("%s \n", cred.SecretKey))
|
||||
if region != "" {
|
||||
@@ -132,7 +132,7 @@ func printServerCommonMsg(apiEndpoints []string) {
|
||||
if globalBrowserEnabled {
|
||||
consoleEndpointStr := strings.Join(stripStandardPorts(getConsoleEndpoints(), globalMinioConsoleHost), " ")
|
||||
logger.Info(color.Blue("Console: ") + color.Bold(fmt.Sprintf("%s ", consoleEndpointStr)))
|
||||
if color.IsTerminal() && (!globalCLIContext.Anonymous && !globalCLIContext.JSON) {
|
||||
if color.IsTerminal() && (!globalServerCtxt.Anonymous && !globalServerCtxt.JSON) {
|
||||
logger.Info(color.Blue("RootUser: ") + color.Bold("%s ", cred.AccessKey))
|
||||
logger.Info(color.Blue("RootPass: ") + color.Bold("%s ", cred.SecretKey))
|
||||
}
|
||||
@@ -187,7 +187,7 @@ func printCLIAccessMsg(endPoint string, alias string) {
|
||||
const mcQuickStartGuide = "https://min.io/docs/minio/linux/reference/minio-mc.html#quickstart"
|
||||
|
||||
// Configure 'mc', following block prints platform specific information for minio client.
|
||||
if color.IsTerminal() && !globalCLIContext.Anonymous {
|
||||
if color.IsTerminal() && !globalServerCtxt.Anonymous {
|
||||
logger.Info(color.Blue("\nCommand-line: ") + mcQuickStartGuide)
|
||||
mcMessage := fmt.Sprintf("$ mc alias set '%s' '%s' '%s' '%s'", alias,
|
||||
endPoint, cred.AccessKey, cred.SecretKey)
|
||||
|
||||
@@ -137,7 +137,7 @@ func TestServerSuite(t *testing.T) {
|
||||
// Init and run test on ErasureSet backend.
|
||||
{serverType: "ErasureSet", signer: signerV4},
|
||||
}
|
||||
globalCLIContext.StrictS3Compat = true
|
||||
globalServerCtxt.StrictS3Compat = true
|
||||
for i, testCase := range testCases {
|
||||
t.Run(fmt.Sprintf("Test: %d, ServerType: %s", i+1, testCase.serverType), func(t *testing.T) {
|
||||
runAllTests(testCase, &check{t, testCase.serverType})
|
||||
|
||||
@@ -28,7 +28,6 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/minio/cli"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
xsftp "github.com/minio/pkg/v2/sftp"
|
||||
"github.com/pkg/sftp"
|
||||
@@ -54,9 +53,7 @@ func (s *sftpLogger) Error(tag xsftp.LogType, err error) {
|
||||
}
|
||||
}
|
||||
|
||||
func startSFTPServer(c *cli.Context) {
|
||||
args := c.StringSlice("sftp")
|
||||
|
||||
func startSFTPServer(args []string) {
|
||||
var (
|
||||
port int
|
||||
publicIP string
|
||||
|
||||
@@ -79,7 +79,7 @@ func skipContentSha256Cksum(r *http.Request) bool {
|
||||
// We return true only in situations when
|
||||
// deployment has asked MinIO to allow for
|
||||
// such broken clients and content-length > 0.
|
||||
return r.ContentLength > 0 && !globalCLIContext.StrictS3Compat
|
||||
return r.ContentLength > 0 && !globalServerCtxt.StrictS3Compat
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
23
cmd/testdata/config/1.yaml
vendored
Normal file
23
cmd/testdata/config/1.yaml
vendored
Normal file
@@ -0,0 +1,23 @@
|
||||
version: v1
|
||||
address: ':9000'
|
||||
console-address: ':9001'
|
||||
certs-dir: '/home/user/.minio/certs/'
|
||||
pools: # Specify the nodes and drives with pools
|
||||
-
|
||||
- 'https://server-example-pool1:9000/mnt/disk{1...4}/'
|
||||
- 'https://server{1...2}-pool1:9000/mnt/disk{1...4}/'
|
||||
- 'https://server3-pool1:9000/mnt/disk{1...4}/'
|
||||
- 'https://server4-pool1:9000/mnt/disk{1...4}/'
|
||||
-
|
||||
- 'https://server-example-pool2:9000/mnt/disk{1...4}/'
|
||||
- 'https://server{1...2}-pool2:9000/mnt/disk{1...4}/'
|
||||
- 'https://server3-pool2:9000/mnt/disk{1...4}/'
|
||||
- 'https://server4-pool2:9000/mnt/disk{1...4}/'
|
||||
|
||||
options:
|
||||
ftp: # settings for MinIO to act as an ftp server
|
||||
address: ':8021'
|
||||
passive-port-range: '30000-40000'
|
||||
sftp: # settings for MinIO to act as an sftp server
|
||||
address: ':8022'
|
||||
ssh-private-key: '/home/user/.ssh/id_rsa'
|
||||
23
cmd/testdata/config/2.yaml
vendored
Normal file
23
cmd/testdata/config/2.yaml
vendored
Normal file
@@ -0,0 +1,23 @@
|
||||
version: v1
|
||||
address: ':9000'
|
||||
console-address: ':9001'
|
||||
certs-dir: '/home/user/.minio/certs/'
|
||||
pools: # Specify the nodes and drives with pools
|
||||
-
|
||||
- 'https://server-example-pool1:9000/mnt/disk{1...4}/'
|
||||
- 'https://server1-pool1:9000/mnt/disk{1...4}/'
|
||||
- 'https://server3-pool1:9000/mnt/disk{1...4}/'
|
||||
- 'https://server4-pool1:9000/mnt/disk{1...4}/'
|
||||
-
|
||||
- 'https://server-example-pool2:9000/mnt/disk{1...4}/'
|
||||
- 'https://server1-pool2:9000/mnt/disk{1...4}/'
|
||||
- 'https://server3-pool2:9000/mnt/disk{1...4}/'
|
||||
- 'https://server4-pool2:9000/mnt/disk{1...4}/'
|
||||
|
||||
options:
|
||||
ftp: # settings for MinIO to act as an ftp server
|
||||
address: ':8021'
|
||||
passive-port-range: '30000-40000'
|
||||
sftp: # settings for MinIO to act as an sftp server
|
||||
address: ':8022'
|
||||
ssh-private-key: '/home/user/.ssh/id_rsa'
|
||||
23
cmd/testdata/config/invalid-disks.yaml
vendored
Normal file
23
cmd/testdata/config/invalid-disks.yaml
vendored
Normal file
@@ -0,0 +1,23 @@
|
||||
version: v1
|
||||
address: ':9000'
|
||||
console-address: ':9001'
|
||||
certs-dir: '/home/user/.minio/certs/'
|
||||
pools: # Specify the nodes and drives with pools
|
||||
-
|
||||
- 'https://server-example-pool1:9000/mnt/disk1/'
|
||||
- 'https://server1-pool1:9000/mnt/disk{1...4}/'
|
||||
- 'https://server3-pool1:9000/mnt/disk{1...4}/'
|
||||
- 'https://server4-pool1:9000/mnt/disk{1...4}/'
|
||||
-
|
||||
- 'https://server-example-pool2:9000/mnt/disk{1...4}/'
|
||||
- 'https://server1-pool2:9000/mnt/disk{1...4}/'
|
||||
- 'https://server3-pool2:9000/mnt/disk{1...4}/'
|
||||
- 'https://server4-pool2:9000/mnt/disk{1...4}/'
|
||||
|
||||
options:
|
||||
ftp: # settings for MinIO to act as an ftp server
|
||||
address: ':8021'
|
||||
passive-port-range: '30000-40000'
|
||||
sftp: # settings for MinIO to act as an sftp server
|
||||
address: ':8022'
|
||||
ssh-private-key: '/home/user/.ssh/id_rsa'
|
||||
23
cmd/testdata/config/invalid-types.yaml
vendored
Normal file
23
cmd/testdata/config/invalid-types.yaml
vendored
Normal file
@@ -0,0 +1,23 @@
|
||||
version: v1
|
||||
address: ':9000'
|
||||
console-address: ':9001'
|
||||
certs-dir: '/home/user/.minio/certs/'
|
||||
pools: # Specify the nodes and drives with pools
|
||||
-
|
||||
- '/mnt/disk{1...4}/'
|
||||
- 'https://server1-pool1:9000/mnt/disk{1...4}/'
|
||||
- 'https://server3-pool1:9000/mnt/disk{1...4}/'
|
||||
- 'https://server4-pool1:9000/mnt/disk{1...4}/'
|
||||
-
|
||||
- 'https://server-example-pool2:9000/mnt/disk{1...4}/'
|
||||
- 'https://server1-pool2:9000/mnt/disk{1...4}/'
|
||||
- 'https://server3-pool2:9000/mnt/disk{1...4}/'
|
||||
- 'https://server4-pool2:9000/mnt/disk{1...4}/'
|
||||
|
||||
options:
|
||||
ftp: # settings for MinIO to act as an ftp server
|
||||
address: ':8021'
|
||||
passive-port-range: '30000-40000'
|
||||
sftp: # settings for MinIO to act as an sftp server
|
||||
address: ':8022'
|
||||
ssh-private-key: '/home/user/.ssh/id_rsa'
|
||||
23
cmd/testdata/config/invalid.yaml
vendored
Normal file
23
cmd/testdata/config/invalid.yaml
vendored
Normal file
@@ -0,0 +1,23 @@
|
||||
version:
|
||||
address: ':9000'
|
||||
console-address: ':9001'
|
||||
certs-dir: '/home/user/.minio/certs/'
|
||||
pools: # Specify the nodes and drives with pools
|
||||
-
|
||||
- 'https://server-example-pool1:9000/mnt/disk{1...4}/'
|
||||
- 'https://server1-pool1:9000/mnt/disk{1...4}/'
|
||||
- 'https://server3-pool1:9000/mnt/disk{1...4}/'
|
||||
- 'https://server4-pool1:9000/mnt/disk{1...4}/'
|
||||
-
|
||||
- 'https://server-example-pool2:9000/mnt/disk{1...4}/'
|
||||
- 'https://server1-pool2:9000/mnt/disk{1...4}/'
|
||||
- 'https://server3-pool2:9000/mnt/disk{1...4}/'
|
||||
- 'https://server4-pool2:9000/mnt/disk{1...4}/'
|
||||
|
||||
options:
|
||||
ftp: # settings for MinIO to act as an ftp server
|
||||
address: ':8021'
|
||||
passive-port-range: '30000-40000'
|
||||
sftp: # settings for MinIO to act as an sftp server
|
||||
address: ':8022'
|
||||
ssh-private-key: '/home/user/.ssh/id_rsa'
|
||||
@@ -41,7 +41,7 @@ func prepareUpdateMessage(downloadURL string, older time.Duration) string {
|
||||
t := time.Time{}
|
||||
newerThan := humanize.RelTime(t, t.Add(older), "before the latest release", "")
|
||||
|
||||
if globalCLIContext.JSON {
|
||||
if globalServerCtxt.JSON {
|
||||
return fmt.Sprintf("You are running an older version of MinIO released %s, update: %s", newerThan, downloadURL)
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user