mirror of
https://github.com/minio/minio.git
synced 2025-11-09 21:49:46 -05:00
Migrate config to KV data format (#8392)
- adding oauth support to MinIO browser (#8400) by @kanagaraj - supports multi-line get/set/del for all config fields - add support for comments, allow toggle - add extensive validation of config before saving - support MinIO browser to support proper claims, using STS tokens - env support for all config parameters, legacy envs are also supported with all documentation now pointing to latest ENVs - preserve accessKey/secretKey from FS mode setups - add history support implements three APIs - ClearHistory - RestoreHistory - ListHistory - add help command support for each config parameters - all the bug fixes after migration to KV, and other bug fixes encountered during testing.
This commit is contained in:
committed by
kannappanr
parent
8836d57e3c
commit
ee4a6a823d
436
cmd/admin-handlers-config-kv.go
Normal file
436
cmd/admin-handlers-config-kv.go
Normal file
@@ -0,0 +1,436 @@
|
||||
/*
|
||||
* MinIO Cloud Storage, (C) 2019 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/minio/minio/cmd/config"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/color"
|
||||
"github.com/minio/minio/pkg/madmin"
|
||||
)
|
||||
|
||||
// DelConfigKVHandler - DELETE /minio/admin/v2/del-config-kv
|
||||
func (a adminAPIHandlers) DelConfigKVHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "DelConfigKVHandler")
|
||||
|
||||
objectAPI := validateAdminReq(ctx, w, r)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Deny if WORM is enabled
|
||||
if globalWORMEnabled {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrMethodNotAllowed), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if r.ContentLength > maxEConfigJSONSize || r.ContentLength == -1 {
|
||||
// More than maxConfigSize bytes were available
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigTooLarge), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
password := globalActiveCred.SecretKey
|
||||
kvBytes, err := madmin.DecryptData(password, io.LimitReader(r.Body, r.ContentLength))
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err, logger.Application)
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigBadJSON), r.URL)
|
||||
return
|
||||
}
|
||||
cfg, err := readServerConfig(ctx, objectAPI)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
oldCfg := cfg.Clone()
|
||||
scanner := bufio.NewScanner(bytes.NewReader(kvBytes))
|
||||
for scanner.Scan() {
|
||||
if err = cfg.DelKVS(scanner.Text()); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
if err = scanner.Err(); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if err = saveServerConfig(ctx, objectAPI, cfg, oldCfg); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// SetConfigKVHandler - PUT /minio/admin/v2/set-config-kv
|
||||
func (a adminAPIHandlers) SetConfigKVHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "SetConfigKVHandler")
|
||||
|
||||
objectAPI := validateAdminReq(ctx, w, r)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Deny if WORM is enabled
|
||||
if globalWORMEnabled {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrMethodNotAllowed), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if r.ContentLength > maxEConfigJSONSize || r.ContentLength == -1 {
|
||||
// More than maxConfigSize bytes were available
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigTooLarge), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
password := globalActiveCred.SecretKey
|
||||
kvBytes, err := madmin.DecryptData(password, io.LimitReader(r.Body, r.ContentLength))
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err, logger.Application)
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigBadJSON), r.URL)
|
||||
return
|
||||
}
|
||||
cfg, err := readServerConfig(ctx, objectAPI)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
defaultKVS := configDefaultKVS()
|
||||
oldCfg := cfg.Clone()
|
||||
scanner := bufio.NewScanner(bytes.NewReader(kvBytes))
|
||||
var comment string
|
||||
for scanner.Scan() {
|
||||
if strings.HasPrefix(scanner.Text(), config.KvComment) {
|
||||
// Join multiple comments for each newline, separated by ","
|
||||
comments := []string{comment, strings.TrimPrefix(scanner.Text(), config.KvComment)}
|
||||
comment = strings.Join(comments, config.KvNewline)
|
||||
continue
|
||||
}
|
||||
if err = cfg.SetKVS(scanner.Text(), comment, defaultKVS); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
// Empty the comment for the next sub-system
|
||||
comment = ""
|
||||
}
|
||||
if err = scanner.Err(); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if err = validateConfig(cfg); err != nil {
|
||||
writeCustomErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigBadJSON), err.Error(), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Update the actual server config on disk.
|
||||
if err = saveServerConfig(ctx, objectAPI, cfg, oldCfg); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Write to the config input KV to history.
|
||||
if err = saveServerConfigHistory(ctx, objectAPI, kvBytes); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// GetConfigKVHandler - GET /minio/admin/v2/get-config-kv?key={key}
|
||||
func (a adminAPIHandlers) GetConfigKVHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "GetConfigKVHandler")
|
||||
|
||||
objectAPI := validateAdminReq(ctx, w, r)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
||||
vars := mux.Vars(r)
|
||||
var body strings.Builder
|
||||
if vars["key"] != "" {
|
||||
kvs, err := globalServerConfig.GetKVS(vars["key"])
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
for k, kv := range kvs {
|
||||
c, ok := kv[config.Comment]
|
||||
if ok {
|
||||
// For multiple comments split it correctly.
|
||||
for _, c1 := range strings.Split(c, config.KvNewline) {
|
||||
if c1 == "" {
|
||||
continue
|
||||
}
|
||||
body.WriteString(color.YellowBold(config.KvComment))
|
||||
body.WriteString(config.KvSpaceSeparator)
|
||||
body.WriteString(color.BlueBold(strings.TrimSpace(c1)))
|
||||
body.WriteString(config.KvNewline)
|
||||
}
|
||||
}
|
||||
body.WriteString(color.CyanBold(k))
|
||||
body.WriteString(config.KvSpaceSeparator)
|
||||
body.WriteString(kv.String())
|
||||
if len(kvs) > 1 {
|
||||
body.WriteString(config.KvNewline)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
body.WriteString(globalServerConfig.String())
|
||||
}
|
||||
password := globalActiveCred.SecretKey
|
||||
econfigData, err := madmin.EncryptData(password, []byte(body.String()))
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
writeSuccessResponseJSON(w, econfigData)
|
||||
}
|
||||
|
||||
func (a adminAPIHandlers) ClearConfigHistoryKVHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "ClearConfigHistoryKVHandler")
|
||||
|
||||
objectAPI := validateAdminReq(ctx, w, r)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
||||
vars := mux.Vars(r)
|
||||
restoreID := vars["restoreId"]
|
||||
if restoreID == "" {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL)
|
||||
return
|
||||
}
|
||||
if restoreID == "all" {
|
||||
chEntries, err := listServerConfigHistory(ctx, objectAPI)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
for _, chEntry := range chEntries {
|
||||
if err = delServerConfigHistory(ctx, objectAPI, chEntry.RestoreID); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if err := delServerConfigHistory(ctx, objectAPI, restoreID); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// RestoreConfigHistoryKVHandler - restores a config with KV settings for the given KV id.
|
||||
func (a adminAPIHandlers) RestoreConfigHistoryKVHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "RestoreConfigHistoryKVHandler")
|
||||
|
||||
objectAPI := validateAdminReq(ctx, w, r)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
||||
vars := mux.Vars(r)
|
||||
restoreID := vars["restoreId"]
|
||||
if restoreID == "" {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
kvBytes, err := readServerConfigHistory(ctx, objectAPI, restoreID)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
cfg, err := readServerConfig(ctx, objectAPI)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
defaultKVS := configDefaultKVS()
|
||||
oldCfg := cfg.Clone()
|
||||
scanner := bufio.NewScanner(bytes.NewReader(kvBytes))
|
||||
var comment string
|
||||
for scanner.Scan() {
|
||||
if strings.HasPrefix(scanner.Text(), config.KvComment) {
|
||||
// Join multiple comments for each newline, separated by "\n"
|
||||
comment = strings.Join([]string{comment, scanner.Text()}, config.KvNewline)
|
||||
continue
|
||||
}
|
||||
if err = cfg.SetKVS(scanner.Text(), comment, defaultKVS); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
comment = ""
|
||||
}
|
||||
if err = scanner.Err(); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if err = validateConfig(cfg); err != nil {
|
||||
writeCustomErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigBadJSON), err.Error(), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if err = saveServerConfig(ctx, objectAPI, cfg, oldCfg); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
delServerConfigHistory(ctx, objectAPI, restoreID)
|
||||
}
|
||||
|
||||
// ListConfigHistoryKVHandler - lists all the KV ids.
|
||||
func (a adminAPIHandlers) ListConfigHistoryKVHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "ListConfigHistoryKVHandler")
|
||||
|
||||
objectAPI := validateAdminReq(ctx, w, r)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
||||
chEntries, err := listServerConfigHistory(ctx, objectAPI)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
data, err := json.Marshal(chEntries)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
writeSuccessResponseJSON(w, data)
|
||||
}
|
||||
|
||||
// HelpConfigKVHandler - GET /minio/admin/v2/help-config-kv?subSys={subSys}&key={key}
|
||||
func (a adminAPIHandlers) HelpConfigKVHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "HelpConfigKVHandler")
|
||||
|
||||
objectAPI := validateAdminReq(ctx, w, r)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
||||
vars := mux.Vars(r)
|
||||
subSys := vars["subSys"]
|
||||
key := vars["key"]
|
||||
|
||||
rd, err := GetHelp(subSys, key)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
io.Copy(w, rd)
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
// SetConfigHandler - PUT /minio/admin/v1/config
|
||||
func (a adminAPIHandlers) SetConfigHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "SetConfigHandler")
|
||||
|
||||
objectAPI := validateAdminReq(ctx, w, r)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Deny if WORM is enabled
|
||||
if globalWORMEnabled {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrMethodNotAllowed), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if r.ContentLength > maxEConfigJSONSize || r.ContentLength == -1 {
|
||||
// More than maxConfigSize bytes were available
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigTooLarge), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
password := globalActiveCred.SecretKey
|
||||
configBytes, err := madmin.DecryptData(password, io.LimitReader(r.Body, r.ContentLength))
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err, logger.Application)
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigBadJSON), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
var cfg config.Config
|
||||
if err = json.Unmarshal(configBytes, &cfg); err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
writeCustomErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigBadJSON), err.Error(), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if err = validateConfig(cfg); err != nil {
|
||||
writeCustomErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigBadJSON), err.Error(), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if err = saveServerConfig(ctx, objectAPI, cfg, nil); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Reply to the client before restarting minio server.
|
||||
writeSuccessResponseHeadersOnly(w)
|
||||
}
|
||||
|
||||
// GetConfigHandler - GET /minio/admin/v1/config
|
||||
// Get config.json of this minio setup.
|
||||
func (a adminAPIHandlers) GetConfigHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "GetConfigHandler")
|
||||
|
||||
objectAPI := validateAdminReq(ctx, w, r)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
||||
config, err := readServerConfig(ctx, objectAPI)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
configData, err := json.MarshalIndent(config, "", "\t")
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
password := globalActiveCred.SecretKey
|
||||
econfigData, err := madmin.EncryptData(password, configData)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
writeSuccessResponseJSON(w, econfigData)
|
||||
}
|
||||
@@ -36,6 +36,7 @@ import (
|
||||
humanize "github.com/dustin/go-humanize"
|
||||
"github.com/gorilla/mux"
|
||||
|
||||
"github.com/minio/minio/cmd/config"
|
||||
"github.com/minio/minio/cmd/crypto"
|
||||
xhttp "github.com/minio/minio/cmd/http"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
@@ -45,7 +46,6 @@ import (
|
||||
"github.com/minio/minio/pkg/madmin"
|
||||
"github.com/minio/minio/pkg/mem"
|
||||
xnet "github.com/minio/minio/pkg/net"
|
||||
"github.com/minio/minio/pkg/quick"
|
||||
trace "github.com/minio/minio/pkg/trace"
|
||||
)
|
||||
|
||||
@@ -286,7 +286,7 @@ func (a adminAPIHandlers) ServerInfoHandler(w http.ResponseWriter, r *http.Reque
|
||||
CommitID: CommitID,
|
||||
DeploymentID: globalDeploymentID,
|
||||
SQSARN: globalNotificationSys.GetARNList(),
|
||||
Region: globalServerConfig.GetRegion(),
|
||||
Region: globalServerRegion,
|
||||
},
|
||||
},
|
||||
})
|
||||
@@ -931,38 +931,6 @@ func (a adminAPIHandlers) BackgroundHealStatusHandler(w http.ResponseWriter, r *
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
// GetConfigHandler - GET /minio/admin/v1/config
|
||||
// Get config.json of this minio setup.
|
||||
func (a adminAPIHandlers) GetConfigHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "GetConfigHandler")
|
||||
|
||||
objectAPI := validateAdminReq(ctx, w, r)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
||||
config, err := readServerConfig(ctx, objectAPI)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
configData, err := json.MarshalIndent(config, "", "\t")
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
password := config.GetCredential().SecretKey
|
||||
econfigData, err := madmin.EncryptData(password, configData)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
writeSuccessResponseJSON(w, econfigData)
|
||||
}
|
||||
|
||||
func validateAdminReq(ctx context.Context, w http.ResponseWriter, r *http.Request) ObjectLayer {
|
||||
// Get current object layer instance.
|
||||
objectAPI := newObjectLayerFn()
|
||||
@@ -1017,6 +985,12 @@ func toAdminAPIErr(ctx context.Context, err error) APIError {
|
||||
apiErr := errorCodes.ToAPIErr(toAdminAPIErrCode(ctx, err))
|
||||
if apiErr.Code == "InternalError" {
|
||||
switch e := err.(type) {
|
||||
case config.Error:
|
||||
apiErr = APIError{
|
||||
Code: "XMinioConfigError",
|
||||
Description: e.Error(),
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
}
|
||||
case AdminError:
|
||||
apiErr = APIError{
|
||||
Code: e.Code,
|
||||
@@ -1081,7 +1055,7 @@ func (a adminAPIHandlers) ListUsers(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
password := globalServerConfig.GetCredential().SecretKey
|
||||
password := globalActiveCred.SecretKey
|
||||
econfigData, err := madmin.EncryptData(password, data)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
@@ -1267,7 +1241,7 @@ func (a adminAPIHandlers) SetUserStatus(w http.ResponseWriter, r *http.Request)
|
||||
status := vars["status"]
|
||||
|
||||
// Custom IAM policies not allowed for admin user.
|
||||
if accessKey == globalServerConfig.GetCredential().AccessKey {
|
||||
if accessKey == globalActiveCred.AccessKey {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -1305,7 +1279,7 @@ func (a adminAPIHandlers) AddUser(w http.ResponseWriter, r *http.Request) {
|
||||
accessKey := vars["accessKey"]
|
||||
|
||||
// Custom IAM policies not allowed for admin user.
|
||||
if accessKey == globalServerConfig.GetCredential().AccessKey {
|
||||
if accessKey == globalActiveCred.AccessKey {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAddUserInvalidArgument), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -1316,7 +1290,7 @@ func (a adminAPIHandlers) AddUser(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
password := globalServerConfig.GetCredential().SecretKey
|
||||
password := globalActiveCred.SecretKey
|
||||
configBytes, err := madmin.DecryptData(password, io.LimitReader(r.Body, r.ContentLength))
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
@@ -1509,78 +1483,6 @@ func (a adminAPIHandlers) SetPolicyForUserOrGroup(w http.ResponseWriter, r *http
|
||||
}
|
||||
}
|
||||
|
||||
// SetConfigHandler - PUT /minio/admin/v1/config
|
||||
func (a adminAPIHandlers) SetConfigHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "SetConfigHandler")
|
||||
|
||||
objectAPI := validateAdminReq(ctx, w, r)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Deny if WORM is enabled
|
||||
if globalWORMEnabled {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrMethodNotAllowed), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if r.ContentLength > maxEConfigJSONSize || r.ContentLength == -1 {
|
||||
// More than maxConfigSize bytes were available
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigTooLarge), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
password := globalServerConfig.GetCredential().SecretKey
|
||||
configBytes, err := madmin.DecryptData(password, io.LimitReader(r.Body, r.ContentLength))
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err, logger.Application)
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigBadJSON), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Validate JSON provided in the request body: check the
|
||||
// client has not sent JSON objects with duplicate keys.
|
||||
if err = quick.CheckDuplicateKeys(string(configBytes)); err != nil {
|
||||
logger.LogIf(ctx, err, logger.Application)
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigBadJSON), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
var config serverConfig
|
||||
if err = json.Unmarshal(configBytes, &config); err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
writeCustomErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigBadJSON), err.Error(), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// If credentials for the server are provided via environment,
|
||||
// then credentials in the provided configuration must match.
|
||||
if globalIsEnvCreds {
|
||||
if !globalServerConfig.GetCredential().Equal(config.Credential) {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminCredentialsMismatch), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if err = config.Validate(); err != nil {
|
||||
writeCustomErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigBadJSON), err.Error(), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if err = config.TestNotificationTargets(); err != nil {
|
||||
writeCustomErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigBadJSON), err.Error(), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if err = saveServerConfig(ctx, objectAPI, &config); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Reply to the client before restarting minio server.
|
||||
writeSuccessResponseHeadersOnly(w)
|
||||
}
|
||||
|
||||
// Returns true if the trace.Info should be traced,
|
||||
// false if certain conditions are not met.
|
||||
// - input entry is not of the type *trace.Info*
|
||||
|
||||
@@ -25,7 +25,6 @@ import (
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"net/url"
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
@@ -35,217 +34,6 @@ import (
|
||||
"github.com/minio/minio/pkg/madmin"
|
||||
)
|
||||
|
||||
var (
|
||||
configJSON = []byte(`{
|
||||
"version": "33",
|
||||
"credential": {
|
||||
"accessKey": "minio",
|
||||
"secretKey": "minio123"
|
||||
},
|
||||
"region": "us-east-1",
|
||||
"worm": "off",
|
||||
"storageclass": {
|
||||
"standard": "",
|
||||
"rrs": ""
|
||||
},
|
||||
"cache": {
|
||||
"drives": [],
|
||||
"expiry": 90,
|
||||
"maxuse": 80,
|
||||
"exclude": []
|
||||
},
|
||||
"kms": {
|
||||
"vault": {
|
||||
"endpoint": "",
|
||||
"auth": {
|
||||
"type": "",
|
||||
"approle": {
|
||||
"id": "",
|
||||
"secret": ""
|
||||
}
|
||||
},
|
||||
"key-id": {
|
||||
"name": "",
|
||||
"version": 0
|
||||
}
|
||||
}
|
||||
},
|
||||
"notify": {
|
||||
"amqp": {
|
||||
"1": {
|
||||
"enable": false,
|
||||
"url": "",
|
||||
"exchange": "",
|
||||
"routingKey": "",
|
||||
"exchangeType": "",
|
||||
"deliveryMode": 0,
|
||||
"mandatory": false,
|
||||
"immediate": false,
|
||||
"durable": false,
|
||||
"internal": false,
|
||||
"noWait": false,
|
||||
"autoDeleted": false,
|
||||
"queueDir": "",
|
||||
"queueLimit": 0
|
||||
}
|
||||
},
|
||||
"elasticsearch": {
|
||||
"1": {
|
||||
"enable": false,
|
||||
"format": "namespace",
|
||||
"url": "",
|
||||
"index": "",
|
||||
"queueDir": "",
|
||||
"queueLimit": 0
|
||||
}
|
||||
},
|
||||
"kafka": {
|
||||
"1": {
|
||||
"enable": false,
|
||||
"brokers": null,
|
||||
"topic": "",
|
||||
"queueDir": "",
|
||||
"queueLimit": 0,
|
||||
"tls": {
|
||||
"enable": false,
|
||||
"skipVerify": false,
|
||||
"clientAuth": 0
|
||||
},
|
||||
"sasl": {
|
||||
"enable": false,
|
||||
"username": "",
|
||||
"password": ""
|
||||
}
|
||||
}
|
||||
},
|
||||
"mqtt": {
|
||||
"1": {
|
||||
"enable": false,
|
||||
"broker": "",
|
||||
"topic": "",
|
||||
"qos": 0,
|
||||
"username": "",
|
||||
"password": "",
|
||||
"reconnectInterval": 0,
|
||||
"keepAliveInterval": 0,
|
||||
"queueDir": "",
|
||||
"queueLimit": 0
|
||||
}
|
||||
},
|
||||
"mysql": {
|
||||
"1": {
|
||||
"enable": false,
|
||||
"format": "namespace",
|
||||
"dsnString": "",
|
||||
"table": "",
|
||||
"host": "",
|
||||
"port": "",
|
||||
"user": "",
|
||||
"password": "",
|
||||
"database": "",
|
||||
"queueDir": "",
|
||||
"queueLimit": 0
|
||||
}
|
||||
},
|
||||
"nats": {
|
||||
"1": {
|
||||
"enable": false,
|
||||
"address": "",
|
||||
"subject": "",
|
||||
"username": "",
|
||||
"password": "",
|
||||
"token": "",
|
||||
"secure": false,
|
||||
"pingInterval": 0,
|
||||
"queueDir": "",
|
||||
"queueLimit": 0,
|
||||
"streaming": {
|
||||
"enable": false,
|
||||
"clusterID": "",
|
||||
"async": false,
|
||||
"maxPubAcksInflight": 0
|
||||
}
|
||||
}
|
||||
},
|
||||
"nsq": {
|
||||
"1": {
|
||||
"enable": false,
|
||||
"nsqdAddress": "",
|
||||
"topic": "",
|
||||
"tls": {
|
||||
"enable": false,
|
||||
"skipVerify": false
|
||||
},
|
||||
"queueDir": "",
|
||||
"queueLimit": 0
|
||||
}
|
||||
},
|
||||
"postgresql": {
|
||||
"1": {
|
||||
"enable": false,
|
||||
"format": "namespace",
|
||||
"connectionString": "",
|
||||
"table": "",
|
||||
"host": "",
|
||||
"port": "",
|
||||
"user": "",
|
||||
"password": "",
|
||||
"database": "",
|
||||
"queueDir": "",
|
||||
"queueLimit": 0
|
||||
}
|
||||
},
|
||||
"redis": {
|
||||
"1": {
|
||||
"enable": false,
|
||||
"format": "namespace",
|
||||
"address": "",
|
||||
"password": "",
|
||||
"key": "",
|
||||
"queueDir": "",
|
||||
"queueLimit": 0
|
||||
}
|
||||
},
|
||||
"webhook": {
|
||||
"1": {
|
||||
"enable": false,
|
||||
"endpoint": "",
|
||||
"queueDir": "",
|
||||
"queueLimit": 0
|
||||
}
|
||||
}
|
||||
},
|
||||
"logger": {
|
||||
"console": {
|
||||
"enabled": true
|
||||
},
|
||||
"http": {
|
||||
"1": {
|
||||
"enabled": false,
|
||||
"endpoint": "https://username:password@example.com/api"
|
||||
}
|
||||
}
|
||||
},
|
||||
"compress": {
|
||||
"enabled": false,
|
||||
"extensions":[".txt",".log",".csv",".json"],
|
||||
"mime-types":["text/csv","text/plain","application/json"]
|
||||
},
|
||||
"openid": {
|
||||
"jwks": {
|
||||
"url": ""
|
||||
}
|
||||
},
|
||||
"policy": {
|
||||
"opa": {
|
||||
"url": "",
|
||||
"authToken": ""
|
||||
}
|
||||
}
|
||||
}
|
||||
`)
|
||||
)
|
||||
|
||||
// adminXLTestBed - encapsulates subsystems that need to be setup for
|
||||
// admin-handler unit tests.
|
||||
type adminXLTestBed struct {
|
||||
@@ -435,7 +223,7 @@ func testServicesCmdHandler(cmd cmdType, t *testing.T) {
|
||||
testServiceSignalReceiver(cmd, t)
|
||||
}()
|
||||
}
|
||||
credentials := globalServerConfig.GetCredential()
|
||||
credentials := globalActiveCred
|
||||
|
||||
req, err := getServiceCmdRequest(cmd, credentials)
|
||||
if err != nil {
|
||||
@@ -471,7 +259,7 @@ func buildAdminRequest(queryVal url.Values, method, path string,
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cred := globalServerConfig.GetCredential()
|
||||
cred := globalActiveCred
|
||||
err = signRequestV4(req, cred.AccessKey, cred.SecretKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -480,106 +268,6 @@ func buildAdminRequest(queryVal url.Values, method, path string,
|
||||
return req, nil
|
||||
}
|
||||
|
||||
// TestGetConfigHandler - test for GetConfigHandler.
|
||||
func TestGetConfigHandler(t *testing.T) {
|
||||
adminTestBed, err := prepareAdminXLTestBed()
|
||||
if err != nil {
|
||||
t.Fatal("Failed to initialize a single node XL backend for admin handler tests.")
|
||||
}
|
||||
defer adminTestBed.TearDown()
|
||||
|
||||
// Initialize admin peers to make admin RPC calls.
|
||||
globalMinioAddr = "127.0.0.1:9000"
|
||||
|
||||
// Prepare query params for get-config mgmt REST API.
|
||||
queryVal := url.Values{}
|
||||
queryVal.Set("config", "")
|
||||
|
||||
req, err := buildAdminRequest(queryVal, http.MethodGet, "/config", 0, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to construct get-config object request - %v", err)
|
||||
}
|
||||
|
||||
rec := httptest.NewRecorder()
|
||||
adminTestBed.router.ServeHTTP(rec, req)
|
||||
if rec.Code != http.StatusOK {
|
||||
t.Errorf("Expected to succeed but failed with %d", rec.Code)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// TestSetConfigHandler - test for SetConfigHandler.
|
||||
func TestSetConfigHandler(t *testing.T) {
|
||||
adminTestBed, err := prepareAdminXLTestBed()
|
||||
if err != nil {
|
||||
t.Fatal("Failed to initialize a single node XL backend for admin handler tests.")
|
||||
}
|
||||
defer adminTestBed.TearDown()
|
||||
|
||||
// Initialize admin peers to make admin RPC calls.
|
||||
globalMinioAddr = "127.0.0.1:9000"
|
||||
|
||||
// Prepare query params for set-config mgmt REST API.
|
||||
queryVal := url.Values{}
|
||||
queryVal.Set("config", "")
|
||||
|
||||
password := globalServerConfig.GetCredential().SecretKey
|
||||
econfigJSON, err := madmin.EncryptData(password, configJSON)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
req, err := buildAdminRequest(queryVal, http.MethodPut, "/config",
|
||||
int64(len(econfigJSON)), bytes.NewReader(econfigJSON))
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to construct set-config object request - %v", err)
|
||||
}
|
||||
|
||||
rec := httptest.NewRecorder()
|
||||
adminTestBed.router.ServeHTTP(rec, req)
|
||||
if rec.Code != http.StatusOK {
|
||||
t.Errorf("Expected to succeed but failed with %d, body: %s", rec.Code, rec.Body)
|
||||
}
|
||||
|
||||
// Check that a very large config file returns an error.
|
||||
{
|
||||
// Make a large enough config string
|
||||
invalidCfg := []byte(strings.Repeat("A", maxEConfigJSONSize+1))
|
||||
req, err := buildAdminRequest(queryVal, http.MethodPut, "/config",
|
||||
int64(len(invalidCfg)), bytes.NewReader(invalidCfg))
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to construct set-config object request - %v", err)
|
||||
}
|
||||
|
||||
rec := httptest.NewRecorder()
|
||||
adminTestBed.router.ServeHTTP(rec, req)
|
||||
respBody := rec.Body.String()
|
||||
if rec.Code != http.StatusBadRequest ||
|
||||
!strings.Contains(respBody, "Configuration data provided exceeds the allowed maximum of") {
|
||||
t.Errorf("Got unexpected response code or body %d - %s", rec.Code, respBody)
|
||||
}
|
||||
}
|
||||
|
||||
// Check that a config with duplicate keys in an object return
|
||||
// error.
|
||||
{
|
||||
invalidCfg := append(econfigJSON[:len(econfigJSON)-1], []byte(`, "version": "15"}`)...)
|
||||
req, err := buildAdminRequest(queryVal, http.MethodPut, "/config",
|
||||
int64(len(invalidCfg)), bytes.NewReader(invalidCfg))
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to construct set-config object request - %v", err)
|
||||
}
|
||||
|
||||
rec := httptest.NewRecorder()
|
||||
adminTestBed.router.ServeHTTP(rec, req)
|
||||
respBody := rec.Body.String()
|
||||
if rec.Code != http.StatusBadRequest ||
|
||||
!strings.Contains(respBody, "JSON configuration provided is of incorrect format") {
|
||||
t.Errorf("Got unexpected response code or body %d - %s", rec.Code, respBody)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestAdminServerInfo(t *testing.T) {
|
||||
adminTestBed, err := prepareAdminXLTestBed()
|
||||
if err != nil {
|
||||
|
||||
@@ -74,6 +74,17 @@ func registerAdminRouter(router *mux.Router, enableConfigOps, enableIAMOps bool)
|
||||
Queries("profilerType", "{profilerType:.*}")
|
||||
adminRouter.Methods(http.MethodGet).Path("/profiling/download").HandlerFunc(httpTraceAll(adminAPI.DownloadProfilingHandler))
|
||||
|
||||
// Config KV operations.
|
||||
if enableConfigOps {
|
||||
adminRouter.Methods(http.MethodGet).Path("/get-config-kv").HandlerFunc(httpTraceHdrs(adminAPI.GetConfigKVHandler)).Queries("key", "{key:.*}")
|
||||
adminRouter.Methods(http.MethodPut).Path("/set-config-kv").HandlerFunc(httpTraceHdrs(adminAPI.SetConfigKVHandler))
|
||||
adminRouter.Methods(http.MethodDelete).Path("/del-config-kv").HandlerFunc(httpTraceHdrs(adminAPI.DelConfigKVHandler))
|
||||
adminRouter.Methods(http.MethodGet).Path("/help-config-kv").HandlerFunc(httpTraceAll(adminAPI.HelpConfigKVHandler)).Queries("subSys", "{subSys:.*}", "key", "{key:.*}")
|
||||
adminRouter.Methods(http.MethodGet).Path("/list-config-history-kv").HandlerFunc(httpTraceAll(adminAPI.ListConfigHistoryKVHandler))
|
||||
adminRouter.Methods(http.MethodDelete).Path("/clear-config-history-kv").HandlerFunc(httpTraceHdrs(adminAPI.ClearConfigHistoryKVHandler)).Queries("restoreId", "{restoreId:.*}")
|
||||
adminRouter.Methods(http.MethodPut).Path("/restore-config-history-kv").HandlerFunc(httpTraceHdrs(adminAPI.RestoreConfigHistoryKVHandler)).Queries("restoreId", "{restoreId:.*}")
|
||||
}
|
||||
|
||||
/// Config operations
|
||||
if enableConfigOps {
|
||||
// Get config
|
||||
|
||||
@@ -1796,7 +1796,7 @@ func getAPIErrorResponse(ctx context.Context, err APIError, resource, requestID,
|
||||
BucketName: reqInfo.BucketName,
|
||||
Key: reqInfo.ObjectName,
|
||||
Resource: resource,
|
||||
Region: globalServerConfig.GetRegion(),
|
||||
Region: globalServerRegion,
|
||||
RequestID: requestID,
|
||||
HostID: hostID,
|
||||
}
|
||||
|
||||
@@ -40,7 +40,7 @@ func setCommonHeaders(w http.ResponseWriter) {
|
||||
w.Header().Set(xhttp.ServerInfo, "MinIO/"+ReleaseTag)
|
||||
// Set `x-amz-bucket-region` only if region is set on the server
|
||||
// by default minio uses an empty region.
|
||||
if region := globalServerConfig.GetRegion(); region != "" {
|
||||
if region := globalServerRegion; region != "" {
|
||||
w.Header().Set(xhttp.AmzBucketRegion, region)
|
||||
}
|
||||
w.Header().Set(xhttp.AcceptRanges, "bytes")
|
||||
|
||||
@@ -676,7 +676,7 @@ func writeErrorResponse(ctx context.Context, w http.ResponseWriter, err APIError
|
||||
case "AccessDenied":
|
||||
// The request is from browser and also if browser
|
||||
// is enabled we need to redirect.
|
||||
if browser && globalIsBrowserEnabled {
|
||||
if browser && globalBrowserEnabled {
|
||||
w.Header().Set(xhttp.Location, minioReservedBucketPath+reqURL.Path)
|
||||
w.WriteHeader(http.StatusTemporaryRedirect)
|
||||
return
|
||||
@@ -747,7 +747,7 @@ func writeCustomErrorResponseXML(ctx context.Context, w http.ResponseWriter, err
|
||||
case "AccessDenied":
|
||||
// The request is from browser and also if browser
|
||||
// is enabled we need to redirect.
|
||||
if browser && globalIsBrowserEnabled {
|
||||
if browser && globalBrowserEnabled {
|
||||
w.Header().Set(xhttp.Location, minioReservedBucketPath+reqURL.Path)
|
||||
w.WriteHeader(http.StatusTemporaryRedirect)
|
||||
return
|
||||
|
||||
@@ -179,7 +179,7 @@ func getClaimsFromToken(r *http.Request) (map[string]interface{}, error) {
|
||||
// hijacking the policies. We need to make sure that this is
|
||||
// based an admin credential such that token cannot be decoded
|
||||
// on the client side and is treated like an opaque value.
|
||||
return []byte(globalServerConfig.GetCredential().SecretKey), nil
|
||||
return []byte(globalActiveCred.SecretKey), nil
|
||||
}
|
||||
p := &jwtgo.Parser{
|
||||
ValidMethods: []string{
|
||||
@@ -212,7 +212,7 @@ func getClaimsFromToken(r *http.Request) (map[string]interface{}, error) {
|
||||
// If OPA is not set, session token should
|
||||
// have a policy and its mandatory, reject
|
||||
// requests without policy claim.
|
||||
p, pok := claims[iampolicy.PolicyName]
|
||||
p, pok := claims[iamPolicyName()]
|
||||
if !pok {
|
||||
return nil, errAuthentication
|
||||
}
|
||||
@@ -286,7 +286,7 @@ func checkRequestAuthTypeToAccessKey(ctx context.Context, r *http.Request, actio
|
||||
}
|
||||
cred, owner, s3Err = getReqAccessKeyV2(r)
|
||||
case authTypeSigned, authTypePresigned:
|
||||
region := globalServerConfig.GetRegion()
|
||||
region := globalServerRegion
|
||||
switch action {
|
||||
case policy.GetBucketLocationAction, policy.ListAllMyBucketsAction:
|
||||
region = ""
|
||||
@@ -485,7 +485,7 @@ func isPutAllowed(atype authType, bucketName, objectName string, r *http.Request
|
||||
case authTypeSignedV2, authTypePresignedV2:
|
||||
cred, owner, s3Err = getReqAccessKeyV2(r)
|
||||
case authTypeStreamingSigned, authTypePresigned, authTypeSigned:
|
||||
region := globalServerConfig.GetRegion()
|
||||
region := globalServerRegion
|
||||
cred, owner, s3Err = getReqAccessKeyV4(r, region, serviceS3)
|
||||
}
|
||||
if s3Err != ErrNone {
|
||||
|
||||
@@ -273,7 +273,7 @@ func mustNewRequest(method string, urlStr string, contentLength int64, body io.R
|
||||
// is signed with AWS Signature V4, fails if not able to do so.
|
||||
func mustNewSignedRequest(method string, urlStr string, contentLength int64, body io.ReadSeeker, t *testing.T) *http.Request {
|
||||
req := mustNewRequest(method, urlStr, contentLength, body, t)
|
||||
cred := globalServerConfig.GetCredential()
|
||||
cred := globalActiveCred
|
||||
if err := signRequestV4(req, cred.AccessKey, cred.SecretKey); err != nil {
|
||||
t.Fatalf("Unable to inititalized new signed http request %s", err)
|
||||
}
|
||||
@@ -284,7 +284,7 @@ func mustNewSignedRequest(method string, urlStr string, contentLength int64, bod
|
||||
// is signed with AWS Signature V2, fails if not able to do so.
|
||||
func mustNewSignedV2Request(method string, urlStr string, contentLength int64, body io.ReadSeeker, t *testing.T) *http.Request {
|
||||
req := mustNewRequest(method, urlStr, contentLength, body, t)
|
||||
cred := globalServerConfig.GetCredential()
|
||||
cred := globalActiveCred
|
||||
if err := signRequestV2(req, cred.AccessKey, cred.SecretKey); err != nil {
|
||||
t.Fatalf("Unable to inititalized new signed http request %s", err)
|
||||
}
|
||||
@@ -295,7 +295,7 @@ func mustNewSignedV2Request(method string, urlStr string, contentLength int64, b
|
||||
// is presigned with AWS Signature V2, fails if not able to do so.
|
||||
func mustNewPresignedV2Request(method string, urlStr string, contentLength int64, body io.ReadSeeker, t *testing.T) *http.Request {
|
||||
req := mustNewRequest(method, urlStr, contentLength, body, t)
|
||||
cred := globalServerConfig.GetCredential()
|
||||
cred := globalActiveCred
|
||||
if err := preSignV2(req, cred.AccessKey, cred.SecretKey, time.Now().Add(10*time.Minute).Unix()); err != nil {
|
||||
t.Fatalf("Unable to inititalized new signed http request %s", err)
|
||||
}
|
||||
@@ -306,7 +306,7 @@ func mustNewPresignedV2Request(method string, urlStr string, contentLength int64
|
||||
// is presigned with AWS Signature V4, fails if not able to do so.
|
||||
func mustNewPresignedRequest(method string, urlStr string, contentLength int64, body io.ReadSeeker, t *testing.T) *http.Request {
|
||||
req := mustNewRequest(method, urlStr, contentLength, body, t)
|
||||
cred := globalServerConfig.GetCredential()
|
||||
cred := globalActiveCred
|
||||
if err := preSignV4(req, cred.AccessKey, cred.SecretKey, time.Now().Add(10*time.Minute).Unix()); err != nil {
|
||||
t.Fatalf("Unable to inititalized new signed http request %s", err)
|
||||
}
|
||||
@@ -316,7 +316,7 @@ func mustNewPresignedRequest(method string, urlStr string, contentLength int64,
|
||||
func mustNewSignedShortMD5Request(method string, urlStr string, contentLength int64, body io.ReadSeeker, t *testing.T) *http.Request {
|
||||
req := mustNewRequest(method, urlStr, contentLength, body, t)
|
||||
req.Header.Set("Content-Md5", "invalid-digest")
|
||||
cred := globalServerConfig.GetCredential()
|
||||
cred := globalActiveCred
|
||||
if err := signRequestV4(req, cred.AccessKey, cred.SecretKey); err != nil {
|
||||
t.Fatalf("Unable to initialized new signed http request %s", err)
|
||||
}
|
||||
@@ -326,17 +326,18 @@ func mustNewSignedShortMD5Request(method string, urlStr string, contentLength in
|
||||
func mustNewSignedEmptyMD5Request(method string, urlStr string, contentLength int64, body io.ReadSeeker, t *testing.T) *http.Request {
|
||||
req := mustNewRequest(method, urlStr, contentLength, body, t)
|
||||
req.Header.Set("Content-Md5", "")
|
||||
cred := globalServerConfig.GetCredential()
|
||||
cred := globalActiveCred
|
||||
if err := signRequestV4(req, cred.AccessKey, cred.SecretKey); err != nil {
|
||||
t.Fatalf("Unable to initialized new signed http request %s", err)
|
||||
}
|
||||
return req
|
||||
}
|
||||
|
||||
func mustNewSignedBadMD5Request(method string, urlStr string, contentLength int64, body io.ReadSeeker, t *testing.T) *http.Request {
|
||||
func mustNewSignedBadMD5Request(method string, urlStr string, contentLength int64,
|
||||
body io.ReadSeeker, t *testing.T) *http.Request {
|
||||
req := mustNewRequest(method, urlStr, contentLength, body, t)
|
||||
req.Header.Set("Content-Md5", "YWFhYWFhYWFhYWFhYWFhCg==")
|
||||
cred := globalServerConfig.GetCredential()
|
||||
cred := globalActiveCred
|
||||
if err := signRequestV4(req, cred.AccessKey, cred.SecretKey); err != nil {
|
||||
t.Fatalf("Unable to initialized new signed http request %s", err)
|
||||
}
|
||||
@@ -359,7 +360,7 @@ func TestIsReqAuthenticated(t *testing.T) {
|
||||
t.Fatalf("unable create credential, %s", err)
|
||||
}
|
||||
|
||||
globalServerConfig.SetCredential(creds)
|
||||
globalActiveCred = creds
|
||||
|
||||
// List of test cases for validating http request authentication.
|
||||
testCases := []struct {
|
||||
@@ -381,7 +382,7 @@ func TestIsReqAuthenticated(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
// Validates all testcases.
|
||||
for i, testCase := range testCases {
|
||||
s3Error := isReqAuthenticated(ctx, testCase.req, globalServerConfig.GetRegion(), serviceS3)
|
||||
s3Error := isReqAuthenticated(ctx, testCase.req, globalServerRegion, serviceS3)
|
||||
if s3Error != testCase.s3Error {
|
||||
if _, err := ioutil.ReadAll(testCase.req.Body); toAPIErrorCode(ctx, err) != testCase.s3Error {
|
||||
t.Fatalf("Test %d: Unexpected S3 error: want %d - got %d (got after reading request %s)", i, testCase.s3Error, s3Error, toAPIError(ctx, err).Code)
|
||||
@@ -405,7 +406,7 @@ func TestCheckAdminRequestAuthType(t *testing.T) {
|
||||
t.Fatalf("unable create credential, %s", err)
|
||||
}
|
||||
|
||||
globalServerConfig.SetCredential(creds)
|
||||
globalActiveCred = creds
|
||||
testCases := []struct {
|
||||
Request *http.Request
|
||||
ErrCode APIErrorCode
|
||||
@@ -418,7 +419,7 @@ func TestCheckAdminRequestAuthType(t *testing.T) {
|
||||
}
|
||||
ctx := context.Background()
|
||||
for i, testCase := range testCases {
|
||||
if s3Error := checkAdminRequestAuthType(ctx, testCase.Request, globalServerConfig.GetRegion()); s3Error != testCase.ErrCode {
|
||||
if s3Error := checkAdminRequestAuthType(ctx, testCase.Request, globalServerRegion); s3Error != testCase.ErrCode {
|
||||
t.Errorf("Test %d: Unexpected s3error returned wanted %d, got %d", i, testCase.ErrCode, s3Error)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -159,7 +159,7 @@ func (api objectAPIHandlers) GetBucketLocationHandler(w http.ResponseWriter, r *
|
||||
// Generate response.
|
||||
encodedSuccessResponse := encodeResponse(LocationResponse{})
|
||||
// Get current region.
|
||||
region := globalServerConfig.GetRegion()
|
||||
region := globalServerRegion
|
||||
if region != globalMinioDefaultRegion {
|
||||
encodedSuccessResponse = encodeResponse(LocationResponse{
|
||||
Location: region,
|
||||
|
||||
@@ -92,7 +92,7 @@ func (api objectAPIHandlers) GetBucketNotificationHandler(w http.ResponseWriter,
|
||||
}
|
||||
}
|
||||
|
||||
config.SetRegion(globalServerConfig.GetRegion())
|
||||
config.SetRegion(globalServerRegion)
|
||||
|
||||
// If xml namespace is empty, set a default value before returning.
|
||||
if config.XMLNS == "" {
|
||||
@@ -147,7 +147,7 @@ func (api objectAPIHandlers) PutBucketNotificationHandler(w http.ResponseWriter,
|
||||
}
|
||||
|
||||
var config *event.Config
|
||||
config, err = event.ParseConfig(io.LimitReader(r.Body, r.ContentLength), globalServerConfig.GetRegion(), globalNotificationSys.targetList)
|
||||
config, err = event.ParseConfig(io.LimitReader(r.Body, r.ContentLength), globalServerRegion, globalNotificationSys.targetList)
|
||||
if err != nil {
|
||||
apiErr := errorCodes.ToAPIErr(ErrMalformedXML)
|
||||
if event.IsEventError(err) {
|
||||
|
||||
@@ -30,7 +30,6 @@ import (
|
||||
"github.com/minio/minio/cmd/config"
|
||||
"github.com/minio/minio/cmd/config/etcd"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/auth"
|
||||
"github.com/minio/minio/pkg/certs"
|
||||
"github.com/minio/minio/pkg/dns"
|
||||
"github.com/minio/minio/pkg/env"
|
||||
@@ -49,7 +48,7 @@ func verifyObjectLayerFeatures(name string, objAPI ObjectLayer) {
|
||||
}
|
||||
}
|
||||
|
||||
if globalIsCompressionEnabled && !objAPI.IsCompressionSupported() {
|
||||
if globalCompressConfig.Enabled && !objAPI.IsCompressionSupported() {
|
||||
logger.Fatal(errInvalidArgument,
|
||||
"Compression support is requested but '%s' does not support compression", name)
|
||||
}
|
||||
@@ -159,33 +158,12 @@ func handleCommonCmdArgs(ctx *cli.Context) {
|
||||
}
|
||||
|
||||
func handleCommonEnvVars() {
|
||||
accessKey := env.Get(config.EnvAccessKey, "")
|
||||
secretKey := env.Get(config.EnvSecretKey, "")
|
||||
if accessKey != "" && secretKey != "" {
|
||||
cred, err := auth.CreateCredentials(accessKey, secretKey)
|
||||
if err != nil {
|
||||
logger.Fatal(config.ErrInvalidCredentials(err), "Unable to validate credentials inherited from the shell environment")
|
||||
}
|
||||
cred.Expiration = timeSentinel
|
||||
|
||||
// credential Envs are set globally.
|
||||
globalIsEnvCreds = true
|
||||
globalActiveCred = cred
|
||||
}
|
||||
|
||||
if browser := env.Get(config.EnvBrowser, "on"); browser != "" {
|
||||
browserFlag, err := config.ParseBoolFlag(browser)
|
||||
if err != nil {
|
||||
logger.Fatal(config.ErrInvalidBrowserValue(nil).Msg("Unknown value `%s`", browser), "Invalid MINIO_BROWSER value in environment variable")
|
||||
}
|
||||
|
||||
// browser Envs are set globally, this does not represent
|
||||
// if browser is turned off or on.
|
||||
globalIsEnvBrowser = true
|
||||
globalIsBrowserEnabled = bool(browserFlag)
|
||||
}
|
||||
|
||||
var err error
|
||||
globalBrowserEnabled, err = config.ParseBool(env.Get(config.EnvBrowser, "on"))
|
||||
if err != nil {
|
||||
logger.Fatal(config.ErrInvalidBrowserValue(err), "Invalid MINIO_BROWSER value in environment variable")
|
||||
}
|
||||
|
||||
globalEtcdClient, err = etcd.New(globalRootCAs)
|
||||
if err != nil {
|
||||
logger.FatalIf(err, "Unable to initialize etcd config")
|
||||
@@ -235,21 +213,7 @@ func handleCommonEnvVars() {
|
||||
// In place update is true by default if the MINIO_UPDATE is not set
|
||||
// or is not set to 'off', if MINIO_UPDATE is set to 'off' then
|
||||
// in-place update is off.
|
||||
globalInplaceUpdateDisabled = strings.EqualFold(env.Get(config.EnvUpdate, "on"), "off")
|
||||
|
||||
// Get WORM environment variable.
|
||||
if worm := env.Get(config.EnvWorm, "off"); worm != "" {
|
||||
wormFlag, err := config.ParseBoolFlag(worm)
|
||||
if err != nil {
|
||||
logger.Fatal(config.ErrInvalidWormValue(nil).Msg("Unknown value `%s`", worm), "Invalid MINIO_WORM value in environment variable")
|
||||
}
|
||||
|
||||
// worm Envs are set globally, this does not represent
|
||||
// if worm is turned off or on.
|
||||
globalIsEnvWORM = true
|
||||
globalWORMEnabled = bool(wormFlag)
|
||||
}
|
||||
|
||||
globalInplaceUpdateDisabled = strings.EqualFold(env.Get(config.EnvUpdate, config.StateOn), config.StateOff)
|
||||
}
|
||||
|
||||
func logStartupMessage(msg string) {
|
||||
|
||||
@@ -18,260 +18,113 @@ package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
"sync"
|
||||
"text/tabwriter"
|
||||
|
||||
"github.com/minio/minio/cmd/config"
|
||||
"github.com/minio/minio/cmd/config/cache"
|
||||
"github.com/minio/minio/cmd/config/compress"
|
||||
xldap "github.com/minio/minio/cmd/config/ldap"
|
||||
xldap "github.com/minio/minio/cmd/config/identity/ldap"
|
||||
"github.com/minio/minio/cmd/config/identity/openid"
|
||||
"github.com/minio/minio/cmd/config/notify"
|
||||
"github.com/minio/minio/cmd/config/policy/opa"
|
||||
"github.com/minio/minio/cmd/config/storageclass"
|
||||
"github.com/minio/minio/cmd/crypto"
|
||||
xhttp "github.com/minio/minio/cmd/http"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/cmd/logger/target/http"
|
||||
"github.com/minio/minio/pkg/auth"
|
||||
"github.com/minio/minio/pkg/env"
|
||||
"github.com/minio/minio/pkg/event"
|
||||
"github.com/minio/minio/pkg/event/target"
|
||||
"github.com/minio/minio/pkg/iam/openid"
|
||||
iampolicy "github.com/minio/minio/pkg/iam/policy"
|
||||
)
|
||||
|
||||
// Steps to move from version N to version N+1
|
||||
// 1. Add new struct serverConfigVN+1 in config-versions.go
|
||||
// 2. Set serverConfigVersion to "N+1"
|
||||
// 3. Set serverConfig to serverConfigVN+1
|
||||
// 4. Add new migration function (ex. func migrateVNToVN+1()) in config-migrate.go
|
||||
// 5. Call migrateVNToVN+1() from migrateConfig() in config-migrate.go
|
||||
// 6. Make changes in config-current_test.go for any test change
|
||||
|
||||
// Config version
|
||||
const serverConfigVersion = "33"
|
||||
|
||||
type serverConfig = serverConfigV33
|
||||
|
||||
var (
|
||||
// globalServerConfig server config.
|
||||
globalServerConfig *serverConfig
|
||||
globalServerConfig config.Config
|
||||
globalServerConfigMu sync.RWMutex
|
||||
)
|
||||
|
||||
// GetVersion get current config version.
|
||||
func (s *serverConfig) GetVersion() string {
|
||||
return s.Version
|
||||
}
|
||||
|
||||
// SetRegion set a new region.
|
||||
func (s *serverConfig) SetRegion(region string) {
|
||||
// Save new region.
|
||||
s.Region = region
|
||||
}
|
||||
|
||||
// GetRegion get current region.
|
||||
func (s *serverConfig) GetRegion() string {
|
||||
if globalIsEnvRegion {
|
||||
return globalServerRegion
|
||||
func validateConfig(s config.Config) error {
|
||||
if _, err := config.LookupCreds(s[config.CredentialsSubSys][config.Default]); err != nil {
|
||||
return err
|
||||
}
|
||||
if s == nil {
|
||||
return ""
|
||||
if _, err := config.LookupRegion(s[config.RegionSubSys][config.Default]); err != nil {
|
||||
return err
|
||||
}
|
||||
return s.Region
|
||||
}
|
||||
|
||||
// SetCredential sets new credential and returns the previous credential.
|
||||
func (s *serverConfig) SetCredential(creds auth.Credentials) (prevCred auth.Credentials) {
|
||||
if s == nil {
|
||||
return creds
|
||||
if _, err := config.LookupWorm(s[config.WormSubSys][config.Default]); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if creds.IsValid() && globalActiveCred.IsValid() {
|
||||
globalActiveCred = creds
|
||||
}
|
||||
|
||||
// Save previous credential.
|
||||
prevCred = s.Credential
|
||||
|
||||
// Set updated credential.
|
||||
s.Credential = creds
|
||||
|
||||
// Return previous credential.
|
||||
return prevCred
|
||||
}
|
||||
|
||||
// GetCredentials get current credentials.
|
||||
func (s *serverConfig) GetCredential() auth.Credentials {
|
||||
if globalActiveCred.IsValid() {
|
||||
return globalActiveCred
|
||||
}
|
||||
return s.Credential
|
||||
}
|
||||
|
||||
// SetWorm set if worm is enabled.
|
||||
func (s *serverConfig) SetWorm(b bool) {
|
||||
// Set the new value.
|
||||
s.Worm = config.BoolFlag(b)
|
||||
}
|
||||
|
||||
// GetStorageClass reads storage class fields from current config.
|
||||
// It returns the standard and reduced redundancy storage class struct
|
||||
func (s *serverConfig) GetStorageClass() storageclass.Config {
|
||||
if s == nil {
|
||||
return storageclass.Config{}
|
||||
}
|
||||
return s.StorageClass
|
||||
}
|
||||
|
||||
// GetWorm get current credentials.
|
||||
func (s *serverConfig) GetWorm() bool {
|
||||
if globalIsEnvWORM {
|
||||
return globalWORMEnabled
|
||||
}
|
||||
if s == nil {
|
||||
return false
|
||||
}
|
||||
return bool(s.Worm)
|
||||
}
|
||||
|
||||
// GetCacheConfig gets the current cache config
|
||||
func (s *serverConfig) GetCacheConfig() cache.Config {
|
||||
if globalIsDiskCacheEnabled {
|
||||
return cache.Config{
|
||||
Drives: globalCacheDrives,
|
||||
Exclude: globalCacheExcludes,
|
||||
Expiry: globalCacheExpiry,
|
||||
MaxUse: globalCacheMaxUse,
|
||||
}
|
||||
}
|
||||
if s == nil {
|
||||
return cache.Config{}
|
||||
}
|
||||
return s.Cache
|
||||
}
|
||||
|
||||
func (s *serverConfig) Validate() error {
|
||||
if s == nil {
|
||||
return nil
|
||||
}
|
||||
if s.Version != serverConfigVersion {
|
||||
return fmt.Errorf("configuration version mismatch. Expected: ‘%s’, Got: ‘%s’", serverConfigVersion, s.Version)
|
||||
}
|
||||
|
||||
// Validate credential fields only when
|
||||
// they are not set via the environment
|
||||
// Error out if global is env credential is not set and config has invalid credential
|
||||
if !globalIsEnvCreds && !s.Credential.IsValid() {
|
||||
return errors.New("invalid credential in config file")
|
||||
}
|
||||
|
||||
// Region: nothing to validate
|
||||
// Worm, Cache and StorageClass values are already validated during json unmarshal
|
||||
for _, v := range s.Notify.AMQP {
|
||||
if err := v.Validate(); err != nil {
|
||||
return fmt.Errorf("amqp: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
for _, v := range s.Notify.Elasticsearch {
|
||||
if err := v.Validate(); err != nil {
|
||||
return fmt.Errorf("elasticsearch: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
for _, v := range s.Notify.Kafka {
|
||||
if err := v.Validate(); err != nil {
|
||||
return fmt.Errorf("kafka: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
for _, v := range s.Notify.MQTT {
|
||||
if err := v.Validate(); err != nil {
|
||||
return fmt.Errorf("mqtt: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
for _, v := range s.Notify.MySQL {
|
||||
if err := v.Validate(); err != nil {
|
||||
return fmt.Errorf("mysql: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
for _, v := range s.Notify.NATS {
|
||||
if err := v.Validate(); err != nil {
|
||||
return fmt.Errorf("nats: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
for _, v := range s.Notify.NSQ {
|
||||
if err := v.Validate(); err != nil {
|
||||
return fmt.Errorf("nsq: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
for _, v := range s.Notify.PostgreSQL {
|
||||
if err := v.Validate(); err != nil {
|
||||
return fmt.Errorf("postgreSQL: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
for _, v := range s.Notify.Redis {
|
||||
if err := v.Validate(); err != nil {
|
||||
return fmt.Errorf("redis: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
for _, v := range s.Notify.Webhook {
|
||||
if err := v.Validate(); err != nil {
|
||||
return fmt.Errorf("webhook: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *serverConfig) lookupConfigs() {
|
||||
// If env is set override the credentials from config file.
|
||||
if globalIsEnvCreds {
|
||||
s.SetCredential(globalActiveCred)
|
||||
} else {
|
||||
globalActiveCred = s.GetCredential()
|
||||
}
|
||||
|
||||
if globalIsEnvWORM {
|
||||
s.SetWorm(globalWORMEnabled)
|
||||
} else {
|
||||
globalWORMEnabled = s.GetWorm()
|
||||
}
|
||||
|
||||
if globalIsEnvRegion {
|
||||
s.SetRegion(globalServerRegion)
|
||||
} else {
|
||||
globalServerRegion = s.GetRegion()
|
||||
}
|
||||
|
||||
var err error
|
||||
if globalIsXL {
|
||||
s.StorageClass, err = storageclass.LookupConfig(s.StorageClass, globalXLSetDriveCount)
|
||||
if _, err := storageclass.LookupConfig(s[config.StorageClassSubSys][config.Default],
|
||||
globalXLSetDriveCount); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if _, err := cache.LookupConfig(s[config.CacheSubSys][config.Default]); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := crypto.LookupConfig(s[config.KmsVaultSubSys][config.Default]); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := compress.LookupConfig(s[config.CompressionSubSys][config.Default]); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := openid.LookupConfig(s[config.IdentityOpenIDSubSys][config.Default],
|
||||
NewCustomHTTPTransport(), xhttp.DrainBody); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := xldap.Lookup(s[config.IdentityLDAPSubSys][config.Default],
|
||||
globalRootCAs); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := opa.LookupConfig(s[config.PolicyOPASubSys][config.Default],
|
||||
NewCustomHTTPTransport(), xhttp.DrainBody); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := logger.LookupConfig(s); err != nil {
|
||||
return err
|
||||
}
|
||||
return notify.TestNotificationTargets(s, GlobalServiceDoneCh, globalRootCAs)
|
||||
}
|
||||
|
||||
func lookupConfigs(s config.Config) {
|
||||
var err error
|
||||
|
||||
if !globalActiveCred.IsValid() {
|
||||
// Env doesn't seem to be set, we fallback to lookup
|
||||
// creds from the config.
|
||||
globalActiveCred, err = config.LookupCreds(s[config.CredentialsSubSys][config.Default])
|
||||
if err != nil {
|
||||
logger.Fatal(err, "Invalid credentials configuration")
|
||||
}
|
||||
}
|
||||
|
||||
globalServerRegion, err = config.LookupRegion(s[config.RegionSubSys][config.Default])
|
||||
if err != nil {
|
||||
logger.Fatal(err, "Invalid region configuration")
|
||||
}
|
||||
|
||||
globalWORMEnabled, err = config.LookupWorm(s[config.WormSubSys][config.Default])
|
||||
if err != nil {
|
||||
logger.Fatal(config.ErrInvalidWormValue(err),
|
||||
"Invalid worm configuration")
|
||||
}
|
||||
|
||||
if globalIsXL {
|
||||
globalStorageClass, err = storageclass.LookupConfig(s[config.StorageClassSubSys][config.Default],
|
||||
globalXLSetDriveCount)
|
||||
if err != nil {
|
||||
logger.FatalIf(err, "Unable to initialize storage class config")
|
||||
}
|
||||
}
|
||||
|
||||
s.Cache, err = cache.LookupConfig(s.Cache)
|
||||
globalCacheConfig, err = cache.LookupConfig(s[config.CacheSubSys][config.Default])
|
||||
if err != nil {
|
||||
logger.FatalIf(err, "Unable to setup cache")
|
||||
}
|
||||
|
||||
if len(s.Cache.Drives) > 0 {
|
||||
globalIsDiskCacheEnabled = true
|
||||
globalCacheDrives = s.Cache.Drives
|
||||
globalCacheExcludes = s.Cache.Exclude
|
||||
globalCacheExpiry = s.Cache.Expiry
|
||||
globalCacheMaxUse = s.Cache.MaxUse
|
||||
|
||||
if globalCacheConfig.Enabled {
|
||||
if cacheEncKey := env.Get(cache.EnvCacheEncryptionMasterKey, ""); cacheEncKey != "" {
|
||||
globalCacheKMS, err = crypto.ParseMasterKey(cacheEncKey)
|
||||
if err != nil {
|
||||
@@ -281,218 +134,163 @@ func (s *serverConfig) lookupConfigs() {
|
||||
}
|
||||
}
|
||||
|
||||
s.KMS, err = crypto.LookupConfig(s.KMS)
|
||||
kmsCfg, err := crypto.LookupConfig(s[config.KmsVaultSubSys][config.Default])
|
||||
if err != nil {
|
||||
logger.FatalIf(err, "Unable to setup KMS config")
|
||||
}
|
||||
|
||||
GlobalKMS, err = crypto.NewKMS(s.KMS)
|
||||
GlobalKMS, err = crypto.NewKMS(kmsCfg)
|
||||
if err != nil {
|
||||
logger.FatalIf(err, "Unable to setup KMS with current KMS config")
|
||||
}
|
||||
|
||||
globalAutoEncryption = strings.EqualFold(env.Get(crypto.EnvAutoEncryption, "off"), "on")
|
||||
if globalAutoEncryption && GlobalKMS == nil {
|
||||
logger.FatalIf(errors.New("Invalid KMS configuration: auto-encryption is enabled but no valid KMS configuration is present"), "")
|
||||
}
|
||||
// Enable auto-encryption if enabled
|
||||
globalAutoEncryption = kmsCfg.AutoEncryption
|
||||
|
||||
s.Compression, err = compress.LookupConfig(s.Compression)
|
||||
globalCompressConfig, err = compress.LookupConfig(s[config.CompressionSubSys][config.Default])
|
||||
if err != nil {
|
||||
logger.FatalIf(err, "Unable to setup Compression")
|
||||
}
|
||||
|
||||
if s.Compression.Enabled {
|
||||
globalIsCompressionEnabled = s.Compression.Enabled
|
||||
globalCompressExtensions = s.Compression.Extensions
|
||||
globalCompressMimeTypes = s.Compression.MimeTypes
|
||||
}
|
||||
|
||||
s.OpenID.JWKS, err = openid.LookupConfig(s.OpenID.JWKS, NewCustomHTTPTransport(), xhttp.DrainBody)
|
||||
globalOpenIDConfig, err = openid.LookupConfig(s[config.IdentityOpenIDSubSys][config.Default],
|
||||
NewCustomHTTPTransport(), xhttp.DrainBody)
|
||||
if err != nil {
|
||||
logger.FatalIf(err, "Unable to initialize OpenID")
|
||||
}
|
||||
|
||||
s.Policy.OPA, err = iampolicy.LookupConfig(s.Policy.OPA, NewCustomHTTPTransport(), xhttp.DrainBody)
|
||||
opaCfg, err := opa.LookupConfig(s[config.PolicyOPASubSys][config.Default],
|
||||
NewCustomHTTPTransport(), xhttp.DrainBody)
|
||||
if err != nil {
|
||||
logger.FatalIf(err, "Unable to initialize OPA")
|
||||
}
|
||||
|
||||
globalOpenIDValidators = getOpenIDValidators(s)
|
||||
globalPolicyOPA = iampolicy.NewOpa(s.Policy.OPA)
|
||||
globalOpenIDValidators = getOpenIDValidators(globalOpenIDConfig)
|
||||
globalPolicyOPA = opa.New(opaCfg)
|
||||
|
||||
s.LDAPServerConfig, err = xldap.Lookup(s.LDAPServerConfig, globalRootCAs)
|
||||
globalLDAPConfig, err = xldap.Lookup(s[config.IdentityLDAPSubSys][config.Default],
|
||||
globalRootCAs)
|
||||
if err != nil {
|
||||
logger.FatalIf(err, "Unable to parse LDAP configuration from env")
|
||||
logger.FatalIf(err, "Unable to parse LDAP configuration")
|
||||
}
|
||||
|
||||
// Load logger targets based on user's configuration
|
||||
loggerUserAgent := getUserAgent(getMinioMode())
|
||||
|
||||
s.Logger, err = logger.LookupConfig(s.Logger)
|
||||
loggerCfg, err := logger.LookupConfig(s)
|
||||
if err != nil {
|
||||
logger.FatalIf(err, "Unable to initialize logger")
|
||||
}
|
||||
|
||||
for _, l := range s.Logger.HTTP {
|
||||
for _, l := range loggerCfg.HTTP {
|
||||
if l.Enabled {
|
||||
// Enable http logging
|
||||
logger.AddTarget(http.New(l.Endpoint, loggerUserAgent, string(logger.All), NewCustomHTTPTransport()))
|
||||
}
|
||||
}
|
||||
|
||||
for _, l := range s.Logger.Audit {
|
||||
for _, l := range loggerCfg.Audit {
|
||||
if l.Enabled {
|
||||
// Enable http audit logging
|
||||
logger.AddAuditTarget(http.New(l.Endpoint, loggerUserAgent, string(logger.All), NewCustomHTTPTransport()))
|
||||
}
|
||||
}
|
||||
|
||||
if s.Logger.Console.Enabled {
|
||||
// Enable console logging
|
||||
logger.AddTarget(globalConsoleSys.Console())
|
||||
}
|
||||
|
||||
// Enable console logging
|
||||
logger.AddTarget(globalConsoleSys.Console())
|
||||
}
|
||||
|
||||
// TestNotificationTargets tries to establish connections to all notification
|
||||
// targets when enabled. This is a good way to make sure all configurations
|
||||
// set by the user can work.
|
||||
func (s *serverConfig) TestNotificationTargets() error {
|
||||
for k, v := range s.Notify.AMQP {
|
||||
if !v.Enable {
|
||||
continue
|
||||
}
|
||||
t, err := target.NewAMQPTarget(k, v, GlobalServiceDoneCh, logger.LogOnceIf)
|
||||
if err != nil {
|
||||
return fmt.Errorf("amqp(%s): %s", k, err.Error())
|
||||
}
|
||||
t.Close()
|
||||
}
|
||||
|
||||
for k, v := range s.Notify.Elasticsearch {
|
||||
if !v.Enable {
|
||||
continue
|
||||
}
|
||||
t, err := target.NewElasticsearchTarget(k, v, GlobalServiceDoneCh, logger.LogOnceIf)
|
||||
if err != nil {
|
||||
return fmt.Errorf("elasticsearch(%s): %s", k, err.Error())
|
||||
}
|
||||
t.Close()
|
||||
}
|
||||
|
||||
for k, v := range s.Notify.Kafka {
|
||||
if !v.Enable {
|
||||
continue
|
||||
}
|
||||
if v.TLS.Enable {
|
||||
v.TLS.RootCAs = globalRootCAs
|
||||
}
|
||||
t, err := target.NewKafkaTarget(k, v, GlobalServiceDoneCh, logger.LogOnceIf)
|
||||
if err != nil {
|
||||
return fmt.Errorf("kafka(%s): %s", k, err.Error())
|
||||
}
|
||||
t.Close()
|
||||
}
|
||||
|
||||
for k, v := range s.Notify.MQTT {
|
||||
if !v.Enable {
|
||||
continue
|
||||
}
|
||||
v.RootCAs = globalRootCAs
|
||||
t, err := target.NewMQTTTarget(k, v, GlobalServiceDoneCh, logger.LogOnceIf)
|
||||
if err != nil {
|
||||
return fmt.Errorf("mqtt(%s): %s", k, err.Error())
|
||||
}
|
||||
t.Close()
|
||||
}
|
||||
|
||||
for k, v := range s.Notify.MySQL {
|
||||
if !v.Enable {
|
||||
continue
|
||||
}
|
||||
t, err := target.NewMySQLTarget(k, v, GlobalServiceDoneCh, logger.LogOnceIf)
|
||||
if err != nil {
|
||||
return fmt.Errorf("mysql(%s): %s", k, err.Error())
|
||||
}
|
||||
t.Close()
|
||||
}
|
||||
|
||||
for k, v := range s.Notify.NATS {
|
||||
if !v.Enable {
|
||||
continue
|
||||
}
|
||||
t, err := target.NewNATSTarget(k, v, GlobalServiceDoneCh, logger.LogOnceIf)
|
||||
if err != nil {
|
||||
return fmt.Errorf("nats(%s): %s", k, err.Error())
|
||||
}
|
||||
t.Close()
|
||||
}
|
||||
|
||||
for k, v := range s.Notify.NSQ {
|
||||
if !v.Enable {
|
||||
continue
|
||||
}
|
||||
t, err := target.NewNSQTarget(k, v, GlobalServiceDoneCh, logger.LogOnceIf)
|
||||
if err != nil {
|
||||
return fmt.Errorf("nsq(%s): %s", k, err.Error())
|
||||
}
|
||||
t.Close()
|
||||
}
|
||||
|
||||
for k, v := range s.Notify.PostgreSQL {
|
||||
if !v.Enable {
|
||||
continue
|
||||
}
|
||||
t, err := target.NewPostgreSQLTarget(k, v, GlobalServiceDoneCh, logger.LogOnceIf)
|
||||
if err != nil {
|
||||
return fmt.Errorf("postgreSQL(%s): %s", k, err.Error())
|
||||
}
|
||||
t.Close()
|
||||
}
|
||||
|
||||
for k, v := range s.Notify.Redis {
|
||||
if !v.Enable {
|
||||
continue
|
||||
}
|
||||
t, err := target.NewRedisTarget(k, v, GlobalServiceDoneCh, logger.LogOnceIf)
|
||||
if err != nil {
|
||||
return fmt.Errorf("redis(%s): %s", k, err.Error())
|
||||
}
|
||||
t.Close()
|
||||
|
||||
}
|
||||
|
||||
return nil
|
||||
var helpMap = map[string]config.HelpKV{
|
||||
config.RegionSubSys: config.RegionHelp,
|
||||
config.WormSubSys: config.WormHelp,
|
||||
config.CacheSubSys: cache.Help,
|
||||
config.CompressionSubSys: compress.Help,
|
||||
config.StorageClassSubSys: storageclass.Help,
|
||||
config.IdentityOpenIDSubSys: openid.Help,
|
||||
config.IdentityLDAPSubSys: xldap.Help,
|
||||
config.PolicyOPASubSys: opa.Help,
|
||||
config.KmsVaultSubSys: crypto.Help,
|
||||
config.LoggerHTTPSubSys: logger.Help,
|
||||
config.LoggerHTTPAuditSubSys: logger.HelpAudit,
|
||||
config.NotifyAMQPSubSys: notify.HelpAMQP,
|
||||
config.NotifyKafkaSubSys: notify.HelpKafka,
|
||||
config.NotifyMQTTSubSys: notify.HelpMQTT,
|
||||
config.NotifyNATSSubSys: notify.HelpNATS,
|
||||
config.NotifyNSQSubSys: notify.HelpNSQ,
|
||||
config.NotifyMySQLSubSys: notify.HelpMySQL,
|
||||
config.NotifyPostgresSubSys: notify.HelpPostgres,
|
||||
config.NotifyRedisSubSys: notify.HelpRedis,
|
||||
config.NotifyWebhookSubSys: notify.HelpWebhook,
|
||||
config.NotifyESSubSys: notify.HelpES,
|
||||
}
|
||||
|
||||
func newServerConfig() *serverConfig {
|
||||
cred, err := auth.GetNewCredentials()
|
||||
logger.FatalIf(err, "")
|
||||
|
||||
srvCfg := &serverConfig{
|
||||
Version: serverConfigVersion,
|
||||
Credential: cred,
|
||||
Region: globalMinioDefaultRegion,
|
||||
StorageClass: storageclass.Config{
|
||||
Standard: storageclass.StorageClass{},
|
||||
RRS: storageclass.StorageClass{},
|
||||
},
|
||||
Cache: cache.Config{
|
||||
Drives: []string{},
|
||||
Exclude: []string{},
|
||||
Expiry: globalCacheExpiry,
|
||||
MaxUse: globalCacheMaxUse,
|
||||
},
|
||||
KMS: crypto.KMSConfig{},
|
||||
Notify: notify.NewConfig(),
|
||||
Compression: compress.Config{
|
||||
Enabled: false,
|
||||
Extensions: globalCompressExtensions,
|
||||
MimeTypes: globalCompressMimeTypes,
|
||||
},
|
||||
Logger: logger.NewConfig(),
|
||||
// GetHelp - returns help for sub-sys, a key for a sub-system or all the help.
|
||||
func GetHelp(subSys, key string) (io.Reader, error) {
|
||||
if len(subSys) == 0 {
|
||||
return nil, config.Error("no help available for empty sub-system inputs")
|
||||
}
|
||||
help, ok := helpMap[subSys]
|
||||
if !ok {
|
||||
return nil, config.Error(fmt.Sprintf("unknown sub-system %s", subSys))
|
||||
}
|
||||
if key != "" {
|
||||
value, ok := help[key]
|
||||
if !ok {
|
||||
return nil, config.Error(fmt.Sprintf("unknown key %s for sub-system %s", key, subSys))
|
||||
}
|
||||
return strings.NewReader(value), nil
|
||||
}
|
||||
|
||||
var s strings.Builder
|
||||
w := tabwriter.NewWriter(&s, 1, 8, 2, ' ', 0)
|
||||
if err := config.HelpTemplate.Execute(w, help); err != nil {
|
||||
return nil, config.Error(err.Error())
|
||||
}
|
||||
w.Flush()
|
||||
return strings.NewReader(s.String()), nil
|
||||
}
|
||||
|
||||
func configDefaultKVS() map[string]config.KVS {
|
||||
m := make(map[string]config.KVS)
|
||||
for k, tgt := range newServerConfig() {
|
||||
m[k] = tgt[config.Default]
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
func newServerConfig() config.Config {
|
||||
srvCfg := config.New()
|
||||
for k := range srvCfg {
|
||||
// Initialize with default KVS
|
||||
switch k {
|
||||
case config.CacheSubSys:
|
||||
srvCfg[k][config.Default] = cache.DefaultKVS
|
||||
case config.CompressionSubSys:
|
||||
srvCfg[k][config.Default] = compress.DefaultKVS
|
||||
case config.StorageClassSubSys:
|
||||
srvCfg[k][config.Default] = storageclass.DefaultKVS
|
||||
case config.IdentityLDAPSubSys:
|
||||
srvCfg[k][config.Default] = xldap.DefaultKVS
|
||||
case config.IdentityOpenIDSubSys:
|
||||
srvCfg[k][config.Default] = openid.DefaultKVS
|
||||
case config.PolicyOPASubSys:
|
||||
srvCfg[k][config.Default] = opa.DefaultKVS
|
||||
case config.WormSubSys:
|
||||
srvCfg[k][config.Default] = config.DefaultWormKVS
|
||||
case config.RegionSubSys:
|
||||
srvCfg[k][config.Default] = config.DefaultRegionKVS
|
||||
case config.CredentialsSubSys:
|
||||
srvCfg[k][config.Default] = config.DefaultCredentialKVS
|
||||
case config.KmsVaultSubSys:
|
||||
srvCfg[k][config.Default] = crypto.DefaultKVS
|
||||
case config.LoggerHTTPSubSys:
|
||||
srvCfg[k][config.Default] = logger.DefaultKVS
|
||||
case config.LoggerHTTPAuditSubSys:
|
||||
srvCfg[k][config.Default] = logger.DefaultAuditKVS
|
||||
}
|
||||
}
|
||||
for k, v := range notify.DefaultNotificationKVS {
|
||||
srvCfg[k][config.Default] = v
|
||||
}
|
||||
return srvCfg
|
||||
}
|
||||
|
||||
@@ -503,7 +301,7 @@ func newSrvConfig(objAPI ObjectLayer) error {
|
||||
srvCfg := newServerConfig()
|
||||
|
||||
// Override any values from ENVs.
|
||||
srvCfg.lookupConfigs()
|
||||
lookupConfigs(srvCfg)
|
||||
|
||||
// hold the mutex lock before a new config is assigned.
|
||||
globalServerConfigMu.Lock()
|
||||
@@ -511,17 +309,17 @@ func newSrvConfig(objAPI ObjectLayer) error {
|
||||
globalServerConfigMu.Unlock()
|
||||
|
||||
// Save config into file.
|
||||
return saveServerConfig(context.Background(), objAPI, globalServerConfig)
|
||||
return saveServerConfig(context.Background(), objAPI, globalServerConfig, nil)
|
||||
}
|
||||
|
||||
// getValidConfig - returns valid server configuration
|
||||
func getValidConfig(objAPI ObjectLayer) (*serverConfig, error) {
|
||||
func getValidConfig(objAPI ObjectLayer) (config.Config, error) {
|
||||
srvCfg, err := readServerConfig(context.Background(), objAPI)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return srvCfg, srvCfg.Validate()
|
||||
return srvCfg, nil
|
||||
}
|
||||
|
||||
// loadConfig - loads a new config from disk, overrides params from env
|
||||
@@ -529,11 +327,11 @@ func getValidConfig(objAPI ObjectLayer) (*serverConfig, error) {
|
||||
func loadConfig(objAPI ObjectLayer) error {
|
||||
srvCfg, err := getValidConfig(objAPI)
|
||||
if err != nil {
|
||||
return config.ErrInvalidConfig(nil).Msg(err.Error())
|
||||
return config.ErrInvalidConfig(err)
|
||||
}
|
||||
|
||||
// Override any values from ENVs.
|
||||
srvCfg.lookupConfigs()
|
||||
lookupConfigs(srvCfg)
|
||||
|
||||
// hold the mutex lock before a new config is assigned.
|
||||
globalServerConfigMu.Lock()
|
||||
@@ -547,168 +345,12 @@ func loadConfig(objAPI ObjectLayer) error {
|
||||
// enabled providers in server config.
|
||||
// A new authentication provider is added like below
|
||||
// * Add a new provider in pkg/iam/openid package.
|
||||
func getOpenIDValidators(config *serverConfig) *openid.Validators {
|
||||
func getOpenIDValidators(cfg openid.Config) *openid.Validators {
|
||||
validators := openid.NewValidators()
|
||||
|
||||
if config.OpenID.JWKS.URL != nil {
|
||||
validators.Add(openid.NewJWT(config.OpenID.JWKS))
|
||||
if cfg.JWKS.URL != nil {
|
||||
validators.Add(openid.NewJWT(cfg))
|
||||
}
|
||||
|
||||
return validators
|
||||
}
|
||||
|
||||
// getNotificationTargets - returns TargetList which contains enabled targets in serverConfig.
|
||||
// A new notification target is added like below
|
||||
// * Add a new target in pkg/event/target package.
|
||||
// * Add newly added target configuration to serverConfig.Notify.<TARGET_NAME>.
|
||||
// * Handle the configuration in this function to create/add into TargetList.
|
||||
func getNotificationTargets(config *serverConfig) *event.TargetList {
|
||||
targetList := event.NewTargetList()
|
||||
if config == nil {
|
||||
return targetList
|
||||
}
|
||||
for id, args := range config.Notify.AMQP {
|
||||
if args.Enable {
|
||||
newTarget, err := target.NewAMQPTarget(id, args, GlobalServiceDoneCh, logger.LogOnceIf)
|
||||
if err != nil {
|
||||
logger.LogIf(context.Background(), err)
|
||||
continue
|
||||
}
|
||||
if err = targetList.Add(newTarget); err != nil {
|
||||
logger.LogIf(context.Background(), err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for id, args := range config.Notify.Elasticsearch {
|
||||
if args.Enable {
|
||||
newTarget, err := target.NewElasticsearchTarget(id, args, GlobalServiceDoneCh, logger.LogOnceIf)
|
||||
if err != nil {
|
||||
logger.LogIf(context.Background(), err)
|
||||
continue
|
||||
|
||||
}
|
||||
if err = targetList.Add(newTarget); err != nil {
|
||||
logger.LogIf(context.Background(), err)
|
||||
continue
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for id, args := range config.Notify.Kafka {
|
||||
if args.Enable {
|
||||
if args.TLS.Enable {
|
||||
args.TLS.RootCAs = globalRootCAs
|
||||
}
|
||||
newTarget, err := target.NewKafkaTarget(id, args, GlobalServiceDoneCh, logger.LogOnceIf)
|
||||
if err != nil {
|
||||
logger.LogIf(context.Background(), err)
|
||||
continue
|
||||
}
|
||||
if err = targetList.Add(newTarget); err != nil {
|
||||
logger.LogIf(context.Background(), err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for id, args := range config.Notify.MQTT {
|
||||
if args.Enable {
|
||||
args.RootCAs = globalRootCAs
|
||||
newTarget, err := target.NewMQTTTarget(id, args, GlobalServiceDoneCh, logger.LogOnceIf)
|
||||
if err != nil {
|
||||
logger.LogIf(context.Background(), err)
|
||||
continue
|
||||
}
|
||||
if err = targetList.Add(newTarget); err != nil {
|
||||
logger.LogIf(context.Background(), err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for id, args := range config.Notify.MySQL {
|
||||
if args.Enable {
|
||||
newTarget, err := target.NewMySQLTarget(id, args, GlobalServiceDoneCh, logger.LogOnceIf)
|
||||
if err != nil {
|
||||
logger.LogIf(context.Background(), err)
|
||||
continue
|
||||
}
|
||||
if err = targetList.Add(newTarget); err != nil {
|
||||
logger.LogIf(context.Background(), err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for id, args := range config.Notify.NATS {
|
||||
if args.Enable {
|
||||
newTarget, err := target.NewNATSTarget(id, args, GlobalServiceDoneCh, logger.LogOnceIf)
|
||||
if err != nil {
|
||||
logger.LogIf(context.Background(), err)
|
||||
continue
|
||||
}
|
||||
if err = targetList.Add(newTarget); err != nil {
|
||||
logger.LogIf(context.Background(), err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for id, args := range config.Notify.NSQ {
|
||||
if args.Enable {
|
||||
newTarget, err := target.NewNSQTarget(id, args, GlobalServiceDoneCh, logger.LogOnceIf)
|
||||
if err != nil {
|
||||
logger.LogIf(context.Background(), err)
|
||||
continue
|
||||
}
|
||||
if err = targetList.Add(newTarget); err != nil {
|
||||
logger.LogIf(context.Background(), err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for id, args := range config.Notify.PostgreSQL {
|
||||
if args.Enable {
|
||||
newTarget, err := target.NewPostgreSQLTarget(id, args, GlobalServiceDoneCh, logger.LogOnceIf)
|
||||
if err != nil {
|
||||
logger.LogIf(context.Background(), err)
|
||||
continue
|
||||
}
|
||||
if err = targetList.Add(newTarget); err != nil {
|
||||
logger.LogIf(context.Background(), err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for id, args := range config.Notify.Redis {
|
||||
if args.Enable {
|
||||
newTarget, err := target.NewRedisTarget(id, args, GlobalServiceDoneCh, logger.LogOnceIf)
|
||||
if err != nil {
|
||||
logger.LogIf(context.Background(), err)
|
||||
continue
|
||||
}
|
||||
if err = targetList.Add(newTarget); err != nil {
|
||||
logger.LogIf(context.Background(), err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for id, args := range config.Notify.Webhook {
|
||||
if args.Enable {
|
||||
args.RootCAs = globalRootCAs
|
||||
newTarget := target.NewWebhookTarget(id, args, GlobalServiceDoneCh, logger.LogOnceIf)
|
||||
if err := targetList.Add(newTarget); err != nil {
|
||||
logger.LogIf(context.Background(), err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return targetList
|
||||
}
|
||||
|
||||
@@ -19,8 +19,9 @@ package cmd
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"path"
|
||||
"testing"
|
||||
|
||||
"github.com/minio/minio/cmd/config"
|
||||
)
|
||||
|
||||
func TestServerConfig(t *testing.T) {
|
||||
@@ -34,22 +35,21 @@ func TestServerConfig(t *testing.T) {
|
||||
t.Fatalf("Init Test config failed")
|
||||
}
|
||||
|
||||
if globalServerConfig.GetRegion() != globalMinioDefaultRegion {
|
||||
t.Errorf("Expecting region `us-east-1` found %s", globalServerConfig.GetRegion())
|
||||
if globalServerRegion != globalMinioDefaultRegion {
|
||||
t.Errorf("Expecting region `us-east-1` found %s", globalServerRegion)
|
||||
}
|
||||
|
||||
// Set new region and verify.
|
||||
globalServerConfig.SetRegion("us-west-1")
|
||||
if globalServerConfig.GetRegion() != "us-west-1" {
|
||||
t.Errorf("Expecting region `us-west-1` found %s", globalServerConfig.GetRegion())
|
||||
config.SetRegion(globalServerConfig, "us-west-1")
|
||||
region, err := config.LookupRegion(globalServerConfig[config.RegionSubSys][config.Default])
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if region != "us-west-1" {
|
||||
t.Errorf("Expecting region `us-west-1` found %s", globalServerRegion)
|
||||
}
|
||||
|
||||
// Match version.
|
||||
if globalServerConfig.GetVersion() != serverConfigVersion {
|
||||
t.Errorf("Expecting version %s found %s", globalServerConfig.GetVersion(), serverConfigVersion)
|
||||
}
|
||||
|
||||
if err := saveServerConfig(context.Background(), objLayer, globalServerConfig); err != nil {
|
||||
if err := saveServerConfig(context.Background(), objLayer, globalServerConfig, nil); err != nil {
|
||||
t.Fatalf("Unable to save updated config file %s", err)
|
||||
}
|
||||
|
||||
@@ -58,122 +58,3 @@ func TestServerConfig(t *testing.T) {
|
||||
t.Fatalf("Unable to initialize from updated config file %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Tests config validator..
|
||||
func TestValidateConfig(t *testing.T) {
|
||||
objLayer, fsDir, err := prepareFS()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(fsDir)
|
||||
|
||||
if err = newTestConfig(globalMinioDefaultRegion, objLayer); err != nil {
|
||||
t.Fatalf("Init Test config failed")
|
||||
}
|
||||
|
||||
configPath := path.Join(minioConfigPrefix, minioConfigFile)
|
||||
v := serverConfigVersion
|
||||
|
||||
testCases := []struct {
|
||||
configData string
|
||||
shouldPass bool
|
||||
}{
|
||||
// Test 1 - wrong json
|
||||
{`{`, false},
|
||||
|
||||
// Test 2 - empty json
|
||||
{`{}`, false},
|
||||
|
||||
// Test 3 - wrong config version
|
||||
{`{"version": "10"}`, false},
|
||||
|
||||
// Test 4 - wrong browser parameter
|
||||
{`{"version": "` + v + `", "browser": "foo"}`, false},
|
||||
|
||||
// Test 5 - missing credential
|
||||
{`{"version": "` + v + `", "browser": "on"}`, false},
|
||||
|
||||
// Test 6 - missing secret key
|
||||
{`{"version": "` + v + `", "browser": "on", "credential" : {"accessKey":"minio", "secretKey":""}}`, false},
|
||||
|
||||
// Test 7 - missing region should pass, defaults to 'us-east-1'.
|
||||
{`{"version": "` + v + `", "browser": "on", "credential" : {"accessKey":"minio", "secretKey":"minio123"}}`, true},
|
||||
|
||||
// Test 8 - missing browser should pass, defaults to 'on'.
|
||||
{`{"version": "` + v + `", "region": "us-east-1", "credential" : {"accessKey":"minio", "secretKey":"minio123"}}`, true},
|
||||
|
||||
// Test 9 - success
|
||||
{`{"version": "` + v + `", "browser": "on", "region":"us-east-1", "credential" : {"accessKey":"minio", "secretKey":"minio123"}}`, true},
|
||||
|
||||
// Test 10 - duplicated json keys
|
||||
{`{"version": "` + v + `", "browser": "on", "browser": "on", "region":"us-east-1", "credential" : {"accessKey":"minio", "secretKey":"minio123"}}`, false},
|
||||
|
||||
// Test 11 - Test AMQP
|
||||
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "amqp": { "1": { "enable": true, "url": "", "exchange": "", "routingKey": "", "exchangeType": "", "mandatory": false, "immediate": false, "durable": false, "internal": false, "noWait": false, "autoDeleted": false, "queueDir": "", "queueLimit": 0}}}}`, false},
|
||||
|
||||
// Test 12 - Test NATS
|
||||
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "nats": { "1": { "enable": true, "address": "", "subject": "", "username": "", "password": "", "token": "", "secure": false, "pingInterval": 0, "queueDir": "", "queueLimit": 0, "streaming": { "enable": false, "clusterID": "", "async": false, "maxPubAcksInflight": 0 } } }}}`, false},
|
||||
|
||||
// Test 13 - Test ElasticSearch
|
||||
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "elasticsearch": { "1": { "enable": true, "url": "", "index": "" } }}}`, false},
|
||||
|
||||
// Test 14 - Test Redis
|
||||
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "redis": { "1": { "enable": true, "address": "", "password": "", "key": "" } }}}`, false},
|
||||
|
||||
// Test 15 - Test PostgreSQL
|
||||
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "postgresql": { "1": { "enable": true, "connectionString": "", "table": "", "host": "", "port": "", "user": "", "password": "", "database": "", "queueDir": "", "queueLimit": 0 }}}}`, false},
|
||||
|
||||
// Test 16 - Test Kafka
|
||||
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "kafka": { "1": { "enable": true, "brokers": null, "topic": "", "queueDir": "", "queueLimit": 0 } }}}`, false},
|
||||
|
||||
// Test 17 - Test Webhook
|
||||
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "webhook": { "1": { "enable": true, "endpoint": "", "queueDir": "", "queueLimit": 0} }}}`, false},
|
||||
|
||||
// Test 18 - Test MySQL
|
||||
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "mysql": { "1": { "enable": true, "dsnString": "", "table": "", "host": "", "port": "", "user": "", "password": "", "database": "", "queueDir": "", "queueLimit": 0 }}}}`, false},
|
||||
|
||||
// Test 19 - Test Format for MySQL
|
||||
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "mysql": { "1": { "enable": true, "dsnString": "", "format": "invalid", "table": "xxx", "host": "10.0.0.1", "port": "3306", "user": "abc", "password": "pqr", "database": "test1" }}}}`, false},
|
||||
|
||||
// Test 20 - Test valid Format for MySQL
|
||||
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "mysql": { "1": { "enable": true, "dsnString": "", "format": "namespace", "table": "xxx", "host": "10.0.0.1", "port": "3306", "user": "abc", "password": "pqr", "database": "test1" }}}}`, true},
|
||||
|
||||
// Test 21 - Test Format for PostgreSQL
|
||||
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "postgresql": { "1": { "enable": true, "connectionString": "", "format": "invalid", "table": "xxx", "host": "myhost", "port": "5432", "user": "abc", "password": "pqr", "database": "test1" }}}}`, false},
|
||||
|
||||
// Test 22 - Test valid Format for PostgreSQL
|
||||
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "postgresql": { "1": { "enable": true, "connectionString": "", "format": "namespace", "table": "xxx", "host": "myhost", "port": "5432", "user": "abc", "password": "pqr", "database": "test1" }}}}`, true},
|
||||
|
||||
// Test 23 - Test Format for ElasticSearch
|
||||
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "elasticsearch": { "1": { "enable": true, "format": "invalid", "url": "example.com", "index": "myindex" } }}}`, false},
|
||||
|
||||
// Test 24 - Test valid Format for ElasticSearch
|
||||
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "elasticsearch": { "1": { "enable": true, "format": "namespace", "url": "example.com", "index": "myindex", "queueDir": "", "queueLimit": 0 } }}}`, true},
|
||||
|
||||
// Test 25 - Test Format for Redis
|
||||
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "redis": { "1": { "enable": true, "format": "invalid", "address": "example.com:80", "password": "xxx", "key": "key1", "queueDir": "", "queueLimit": 0 } }}}`, false},
|
||||
|
||||
// Test 26 - Test valid Format for Redis
|
||||
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "redis": { "1": { "enable": true, "format": "namespace", "address": "example.com:80", "password": "xxx", "key": "key1" } }}}`, true},
|
||||
|
||||
// Test 27 - Test MQTT
|
||||
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "mqtt": { "1": { "enable": true, "broker": "", "topic": "", "qos": 0, "username": "", "password": "", "queueDir": "", "queueLimit": 0}}}}`, false},
|
||||
|
||||
// Test 28 - Test NSQ
|
||||
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "nsq": { "1": { "enable": true, "nsqdAddress": "", "topic": "", "queueDir": "", "queueLimit": 0} }}}`, false},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
if err = saveConfig(context.Background(), objLayer, configPath, []byte(testCase.configData)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
_, err = getValidConfig(objLayer)
|
||||
if testCase.shouldPass && err != nil {
|
||||
t.Errorf("Test %d, should pass but it failed with err = %v", i+1, err)
|
||||
}
|
||||
if !testCase.shouldPass && err == nil {
|
||||
t.Errorf("Test %d, should fail but it succeeded.", i+1)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* MinIO Cloud Storage, (C) 2016, 2017, 2018 MinIO, Inc.
|
||||
* MinIO Cloud Storage, (C) 2016-2019 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@@ -23,14 +23,21 @@ import (
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/minio/minio/cmd/config"
|
||||
"github.com/minio/minio/cmd/config/cache"
|
||||
"github.com/minio/minio/cmd/config/compress"
|
||||
xldap "github.com/minio/minio/cmd/config/identity/ldap"
|
||||
"github.com/minio/minio/cmd/config/identity/openid"
|
||||
"github.com/minio/minio/cmd/config/notify"
|
||||
"github.com/minio/minio/cmd/config/policy/opa"
|
||||
"github.com/minio/minio/cmd/config/storageclass"
|
||||
"github.com/minio/minio/cmd/crypto"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/auth"
|
||||
"github.com/minio/minio/pkg/event"
|
||||
"github.com/minio/minio/pkg/event/target"
|
||||
"github.com/minio/minio/pkg/iam/openid"
|
||||
iampolicy "github.com/minio/minio/pkg/iam/policy"
|
||||
xnet "github.com/minio/minio/pkg/net"
|
||||
"github.com/minio/minio/pkg/quick"
|
||||
)
|
||||
@@ -221,7 +228,7 @@ func migrateConfig() error {
|
||||
return err
|
||||
}
|
||||
fallthrough
|
||||
case serverConfigVersion:
|
||||
case "33":
|
||||
// No migration needed. this always points to current version.
|
||||
err = nil
|
||||
}
|
||||
@@ -1991,7 +1998,7 @@ func migrateV22ToV23() error {
|
||||
// Init cache config.For future migration, Cache config needs to be copied over from previous version.
|
||||
srvConfig.Cache.Drives = []string{}
|
||||
srvConfig.Cache.Exclude = []string{}
|
||||
srvConfig.Cache.Expiry = globalCacheExpiry
|
||||
srvConfig.Cache.Expiry = 90
|
||||
|
||||
if err = Save(configFile, srvConfig); err != nil {
|
||||
return fmt.Errorf("Failed to migrate config from ‘%s’ to ‘%s’. %v", cv22.Version, srvConfig.Version, err)
|
||||
@@ -2341,7 +2348,7 @@ func migrateV25ToV26() error {
|
||||
srvConfig.Cache.Expiry = cv25.Cache.Expiry
|
||||
|
||||
// Add predefined value to new server config.
|
||||
srvConfig.Cache.MaxUse = globalCacheMaxUse
|
||||
srvConfig.Cache.MaxUse = 80
|
||||
|
||||
if err = quick.SaveConfig(srvConfig, configFile, globalEtcdClient); err != nil {
|
||||
return fmt.Errorf("Failed to migrate config from ‘%s’ to ‘%s’. %v", cv25.Version, srvConfig.Version, err)
|
||||
@@ -2456,7 +2463,7 @@ func migrateConfigToMinioSys(objAPI ObjectLayer) (err error) {
|
||||
getConfigFile() + ".deprecated",
|
||||
configFile,
|
||||
}
|
||||
var config = &serverConfig{}
|
||||
var config = &serverConfigV27{}
|
||||
for _, cfgFile := range configFiles {
|
||||
if _, err = Load(cfgFile, config); err != nil {
|
||||
if !os.IsNotExist(err) && !os.IsPermission(err) {
|
||||
@@ -2473,7 +2480,7 @@ func migrateConfigToMinioSys(objAPI ObjectLayer) (err error) {
|
||||
// Initialize the server config, if no config exists.
|
||||
return newSrvConfig(objAPI)
|
||||
}
|
||||
return saveServerConfig(context.Background(), objAPI, config)
|
||||
return saveServerConfig(context.Background(), objAPI, config, nil)
|
||||
}
|
||||
|
||||
// Migrates '.minio.sys/config.json' to v33.
|
||||
@@ -2481,7 +2488,7 @@ func migrateMinioSysConfig(objAPI ObjectLayer) error {
|
||||
configFile := path.Join(minioConfigPrefix, minioConfigFile)
|
||||
|
||||
// Check if the config version is latest, if not migrate.
|
||||
ok, _, err := checkConfigVersion(objAPI, configFile, serverConfigVersion)
|
||||
ok, _, err := checkConfigVersion(objAPI, configFile, "33")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -2622,8 +2629,8 @@ func migrateV29ToV30MinioSys(objAPI ObjectLayer) error {
|
||||
cfg.Version = "30"
|
||||
// Init compression config.For future migration, Compression config needs to be copied over from previous version.
|
||||
cfg.Compression.Enabled = false
|
||||
cfg.Compression.Extensions = globalCompressExtensions
|
||||
cfg.Compression.MimeTypes = globalCompressMimeTypes
|
||||
cfg.Compression.Extensions = strings.Split(compress.DefaultExtensions, config.ValueSeparator)
|
||||
cfg.Compression.MimeTypes = strings.Split(compress.DefaultMimeTypes, config.ValueSeparator)
|
||||
|
||||
data, err = json.Marshal(cfg)
|
||||
if err != nil {
|
||||
@@ -2657,10 +2664,10 @@ func migrateV30ToV31MinioSys(objAPI ObjectLayer) error {
|
||||
}
|
||||
|
||||
cfg.Version = "31"
|
||||
cfg.OpenID.JWKS = openid.JWKSArgs{
|
||||
URL: &xnet.URL{},
|
||||
}
|
||||
cfg.Policy.OPA = iampolicy.OpaArgs{
|
||||
cfg.OpenID = openid.Config{}
|
||||
cfg.OpenID.JWKS.URL = &xnet.URL{}
|
||||
|
||||
cfg.Policy.OPA = opa.Args{
|
||||
URL: &xnet.URL{},
|
||||
AuthToken: "",
|
||||
}
|
||||
@@ -2745,3 +2752,94 @@ func migrateV32ToV33MinioSys(objAPI ObjectLayer) error {
|
||||
logger.Info(configMigrateMSGTemplate, configFile, "32", "33")
|
||||
return nil
|
||||
}
|
||||
|
||||
func migrateMinioSysConfigToKV(objAPI ObjectLayer) error {
|
||||
configFile := path.Join(minioConfigPrefix, minioConfigFile)
|
||||
|
||||
// Check if the config version is latest, if not migrate.
|
||||
ok, data, err := checkConfigVersion(objAPI, configFile, "33")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
cfg := &serverConfigV33{}
|
||||
if err = json.Unmarshal(data, cfg); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
newCfg := newServerConfig()
|
||||
|
||||
config.SetCredentials(newCfg, cfg.Credential)
|
||||
config.SetRegion(newCfg, cfg.Region)
|
||||
config.SetWorm(newCfg, bool(cfg.Worm))
|
||||
|
||||
storageclass.SetStorageClass(newCfg, cfg.StorageClass)
|
||||
|
||||
for k, loggerArgs := range cfg.Logger.HTTP {
|
||||
logger.SetLoggerHTTP(newCfg, k, loggerArgs)
|
||||
}
|
||||
for k, auditArgs := range cfg.Logger.Audit {
|
||||
logger.SetLoggerHTTPAudit(newCfg, k, auditArgs)
|
||||
}
|
||||
|
||||
crypto.SetKMSConfig(newCfg, cfg.KMS)
|
||||
xldap.SetIdentityLDAP(newCfg, cfg.LDAPServerConfig)
|
||||
openid.SetIdentityOpenID(newCfg, cfg.OpenID)
|
||||
opa.SetPolicyOPAConfig(newCfg, cfg.Policy.OPA)
|
||||
cache.SetCacheConfig(newCfg, cfg.Cache)
|
||||
compress.SetCompressionConfig(newCfg, cfg.Compression)
|
||||
|
||||
for k, args := range cfg.Notify.AMQP {
|
||||
notify.SetNotifyAMQP(newCfg, k, args)
|
||||
}
|
||||
for k, args := range cfg.Notify.Elasticsearch {
|
||||
notify.SetNotifyES(newCfg, k, args)
|
||||
}
|
||||
for k, args := range cfg.Notify.Kafka {
|
||||
notify.SetNotifyKafka(newCfg, k, args)
|
||||
}
|
||||
for k, args := range cfg.Notify.MQTT {
|
||||
notify.SetNotifyMQTT(newCfg, k, args)
|
||||
}
|
||||
for k, args := range cfg.Notify.MySQL {
|
||||
notify.SetNotifyMySQL(newCfg, k, args)
|
||||
}
|
||||
for k, args := range cfg.Notify.NATS {
|
||||
notify.SetNotifyNATS(newCfg, k, args)
|
||||
}
|
||||
for k, args := range cfg.Notify.NSQ {
|
||||
notify.SetNotifyNSQ(newCfg, k, args)
|
||||
}
|
||||
for k, args := range cfg.Notify.PostgreSQL {
|
||||
notify.SetNotifyPostgres(newCfg, k, args)
|
||||
}
|
||||
for k, args := range cfg.Notify.Redis {
|
||||
notify.SetNotifyRedis(newCfg, k, args)
|
||||
}
|
||||
for k, args := range cfg.Notify.Webhook {
|
||||
notify.SetNotifyWebhook(newCfg, k, args)
|
||||
}
|
||||
|
||||
// Construct path to config.json for the given bucket.
|
||||
transactionConfigFile := configFile + ".transaction"
|
||||
|
||||
// As object layer's GetObject() and PutObject() take respective lock on minioMetaBucket
|
||||
// and configFile, take a transaction lock to avoid data race between readConfig()
|
||||
// and saveConfig().
|
||||
objLock := globalNSMutex.NewNSLock(context.Background(), minioMetaBucket, transactionConfigFile)
|
||||
if err = objLock.GetLock(globalOperationTimeout); err != nil {
|
||||
return err
|
||||
}
|
||||
defer objLock.Unlock()
|
||||
|
||||
if err = saveServerConfig(context.Background(), objAPI, newCfg, cfg); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
logger.Info("Configuration file %s migrated from version '%s' to new KV format successfully.",
|
||||
configFile, "33")
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -21,6 +21,8 @@ import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/minio/minio/cmd/config"
|
||||
)
|
||||
|
||||
// Test if config v1 is purged
|
||||
@@ -208,24 +210,24 @@ func TestServerConfigMigrateV2toV33(t *testing.T) {
|
||||
t.Fatal("Unexpected error: ", err)
|
||||
}
|
||||
|
||||
if err := migrateMinioSysConfigToKV(objLayer); err != nil {
|
||||
t.Fatal("Unexpected error: ", err)
|
||||
}
|
||||
|
||||
// Initialize server config and check again if everything is fine
|
||||
if err := loadConfig(objLayer); err != nil {
|
||||
t.Fatalf("Unable to initialize from updated config file %s", err)
|
||||
}
|
||||
|
||||
// Check the version number in the upgraded config file
|
||||
expectedVersion := serverConfigVersion
|
||||
if globalServerConfig.Version != expectedVersion {
|
||||
t.Fatalf("Expect version "+expectedVersion+", found: %v", globalServerConfig.Version)
|
||||
}
|
||||
|
||||
// Check if accessKey and secretKey are not altered during migration
|
||||
if globalServerConfig.Credential.AccessKey != accessKey {
|
||||
t.Fatalf("Access key lost during migration, expected: %v, found:%v", accessKey, globalServerConfig.Credential.AccessKey)
|
||||
caccessKey := globalServerConfig[config.CredentialsSubSys][config.Default][config.AccessKey]
|
||||
if caccessKey != accessKey {
|
||||
t.Fatalf("Access key lost during migration, expected: %v, found:%v", accessKey, caccessKey)
|
||||
}
|
||||
|
||||
if globalServerConfig.Credential.SecretKey != secretKey {
|
||||
t.Fatalf("Secret key lost during migration, expected: %v, found: %v", secretKey, globalServerConfig.Credential.SecretKey)
|
||||
csecretKey := globalServerConfig[config.CredentialsSubSys][config.Default][config.SecretKey]
|
||||
if csecretKey != secretKey {
|
||||
t.Fatalf("Secret key lost during migration, expected: %v, found: %v", secretKey, csecretKey)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -22,15 +22,15 @@ import (
|
||||
"github.com/minio/minio/cmd/config"
|
||||
"github.com/minio/minio/cmd/config/cache"
|
||||
"github.com/minio/minio/cmd/config/compress"
|
||||
xldap "github.com/minio/minio/cmd/config/ldap"
|
||||
xldap "github.com/minio/minio/cmd/config/identity/ldap"
|
||||
"github.com/minio/minio/cmd/config/identity/openid"
|
||||
"github.com/minio/minio/cmd/config/notify"
|
||||
"github.com/minio/minio/cmd/config/policy/opa"
|
||||
"github.com/minio/minio/cmd/config/storageclass"
|
||||
"github.com/minio/minio/cmd/crypto"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/auth"
|
||||
"github.com/minio/minio/pkg/event/target"
|
||||
"github.com/minio/minio/pkg/iam/openid"
|
||||
iampolicy "github.com/minio/minio/pkg/iam/policy"
|
||||
"github.com/minio/minio/pkg/quick"
|
||||
)
|
||||
|
||||
@@ -783,15 +783,12 @@ type serverConfigV31 struct {
|
||||
Compression compress.Config `json:"compress"`
|
||||
|
||||
// OpenID configuration
|
||||
OpenID struct {
|
||||
// JWKS validator config.
|
||||
JWKS openid.JWKSArgs `json:"jwks"`
|
||||
} `json:"openid"`
|
||||
OpenID openid.Config `json:"openid"`
|
||||
|
||||
// External policy enforcements.
|
||||
Policy struct {
|
||||
// OPA configuration.
|
||||
OPA iampolicy.OpaArgs `json:"opa"`
|
||||
OPA opa.Args `json:"opa"`
|
||||
|
||||
// Add new external policy enforcements here.
|
||||
} `json:"policy"`
|
||||
@@ -825,15 +822,12 @@ type serverConfigV32 struct {
|
||||
Compression compress.Config `json:"compress"`
|
||||
|
||||
// OpenID configuration
|
||||
OpenID struct {
|
||||
// JWKS validator config.
|
||||
JWKS openid.JWKSArgs `json:"jwks"`
|
||||
} `json:"openid"`
|
||||
OpenID openid.Config `json:"openid"`
|
||||
|
||||
// External policy enforcements.
|
||||
Policy struct {
|
||||
// OPA configuration.
|
||||
OPA iampolicy.OpaArgs `json:"opa"`
|
||||
OPA opa.Args `json:"opa"`
|
||||
|
||||
// Add new external policy enforcements here.
|
||||
} `json:"policy"`
|
||||
@@ -869,15 +863,12 @@ type serverConfigV33 struct {
|
||||
Compression compress.Config `json:"compress"`
|
||||
|
||||
// OpenID configuration
|
||||
OpenID struct {
|
||||
// JWKS validator config.
|
||||
JWKS openid.JWKSArgs `json:"jwks"`
|
||||
} `json:"openid"`
|
||||
OpenID openid.Config `json:"openid"`
|
||||
|
||||
// External policy enforcements.
|
||||
Policy struct {
|
||||
// OPA configuration.
|
||||
OPA iampolicy.OpaArgs `json:"opa"`
|
||||
OPA opa.Args `json:"opa"`
|
||||
|
||||
// Add new external policy enforcements here.
|
||||
} `json:"policy"`
|
||||
|
||||
104
cmd/config.go
104
cmd/config.go
@@ -23,43 +23,103 @@ import (
|
||||
"fmt"
|
||||
"path"
|
||||
"runtime"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/minio/minio/cmd/config"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/quick"
|
||||
"github.com/minio/minio/pkg/madmin"
|
||||
)
|
||||
|
||||
const (
|
||||
minioConfigPrefix = "config"
|
||||
|
||||
// Captures all the previous SetKV operations and allows rollback.
|
||||
minioConfigHistoryPrefix = minioConfigPrefix + "/history"
|
||||
|
||||
// MinIO configuration file.
|
||||
minioConfigFile = "config.json"
|
||||
|
||||
// MinIO backup file
|
||||
// MinIO configuration backup file
|
||||
minioConfigBackupFile = minioConfigFile + ".backup"
|
||||
)
|
||||
|
||||
func saveServerConfig(ctx context.Context, objAPI ObjectLayer, config *serverConfig) error {
|
||||
if err := quick.CheckData(config); err != nil {
|
||||
return err
|
||||
}
|
||||
func listServerConfigHistory(ctx context.Context, objAPI ObjectLayer) ([]madmin.ConfigHistoryEntry, error) {
|
||||
var configHistory []madmin.ConfigHistoryEntry
|
||||
|
||||
data, err := json.MarshalIndent(config, "", "\t")
|
||||
// List all kvs
|
||||
marker := ""
|
||||
for {
|
||||
res, err := objAPI.ListObjects(ctx, minioMetaBucket, minioConfigHistoryPrefix, marker, "", 1000)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, obj := range res.Objects {
|
||||
configHistory = append(configHistory, madmin.ConfigHistoryEntry{
|
||||
RestoreID: path.Base(obj.Name),
|
||||
CreateTime: obj.ModTime, // ModTime is createTime for config history entries.
|
||||
})
|
||||
}
|
||||
if !res.IsTruncated {
|
||||
// We are done here
|
||||
break
|
||||
}
|
||||
marker = res.NextMarker
|
||||
}
|
||||
sort.Slice(configHistory, func(i, j int) bool {
|
||||
return configHistory[i].CreateTime.Before(configHistory[j].CreateTime)
|
||||
})
|
||||
return configHistory, nil
|
||||
}
|
||||
|
||||
func delServerConfigHistory(ctx context.Context, objAPI ObjectLayer, uuidKV string) error {
|
||||
historyFile := pathJoin(minioConfigHistoryPrefix, uuidKV)
|
||||
return objAPI.DeleteObject(ctx, minioMetaBucket, historyFile)
|
||||
}
|
||||
|
||||
func readServerConfigHistory(ctx context.Context, objAPI ObjectLayer, uuidKV string) ([]byte, error) {
|
||||
historyFile := pathJoin(minioConfigHistoryPrefix, uuidKV)
|
||||
return readConfig(ctx, objAPI, historyFile)
|
||||
}
|
||||
|
||||
func saveServerConfigHistory(ctx context.Context, objAPI ObjectLayer, kv []byte) error {
|
||||
uuidKV := mustGetUUID() + ".kv"
|
||||
historyFile := pathJoin(minioConfigHistoryPrefix, uuidKV)
|
||||
|
||||
// Save the new config KV settings into the history path.
|
||||
return saveConfig(ctx, objAPI, historyFile, kv)
|
||||
}
|
||||
|
||||
func saveServerConfig(ctx context.Context, objAPI ObjectLayer, config interface{}, oldConfig interface{}) error {
|
||||
data, err := json.Marshal(config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
configFile := path.Join(minioConfigPrefix, minioConfigFile)
|
||||
// Create a backup of the current config
|
||||
oldData, err := readConfig(ctx, objAPI, configFile)
|
||||
if err == nil {
|
||||
backupConfigFile := path.Join(minioConfigPrefix, minioConfigBackupFile)
|
||||
if err = saveConfig(ctx, objAPI, backupConfigFile, oldData); err != nil {
|
||||
backupConfigFile := path.Join(minioConfigPrefix, minioConfigBackupFile)
|
||||
|
||||
var oldData []byte
|
||||
var freshConfig bool
|
||||
if oldConfig == nil {
|
||||
oldData, err = readConfig(ctx, objAPI, configFile)
|
||||
if err != nil && err != errConfigNotFound {
|
||||
return err
|
||||
}
|
||||
// Current config not found, so nothing to backup.
|
||||
freshConfig = true
|
||||
} else {
|
||||
if err != errConfigNotFound {
|
||||
oldData, err = json.Marshal(oldConfig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// No need to take backups for fresh setups.
|
||||
if !freshConfig {
|
||||
if err = saveConfig(ctx, objAPI, backupConfigFile, oldData); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@@ -68,7 +128,7 @@ func saveServerConfig(ctx context.Context, objAPI ObjectLayer, config *serverCon
|
||||
return saveConfig(ctx, objAPI, configFile, data)
|
||||
}
|
||||
|
||||
func readServerConfig(ctx context.Context, objAPI ObjectLayer) (*serverConfig, error) {
|
||||
func readServerConfig(ctx context.Context, objAPI ObjectLayer) (config.Config, error) {
|
||||
configFile := path.Join(minioConfigPrefix, minioConfigFile)
|
||||
configData, err := readConfig(ctx, objAPI, configFile)
|
||||
if err != nil {
|
||||
@@ -79,16 +139,8 @@ func readServerConfig(ctx context.Context, objAPI ObjectLayer) (*serverConfig, e
|
||||
configData = bytes.Replace(configData, []byte("\r\n"), []byte("\n"), -1)
|
||||
}
|
||||
|
||||
if err = quick.CheckDuplicateKeys(string(configData)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var config = &serverConfig{}
|
||||
if err = json.Unmarshal(configData, config); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err = quick.CheckData(config); err != nil {
|
||||
var config = config.New()
|
||||
if err = json.Unmarshal(configData, &config); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -187,5 +239,11 @@ func initConfig(objAPI ObjectLayer) error {
|
||||
return err
|
||||
}
|
||||
|
||||
// Migrates backend '<export_path>/.minio.sys/config/config.json' to
|
||||
// latest config format.
|
||||
if err := migrateMinioSysConfigToKV(objAPI); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return loadConfig(objAPI)
|
||||
}
|
||||
|
||||
@@ -19,6 +19,8 @@ package config
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// BoolFlag - wrapper bool type.
|
||||
@@ -54,16 +56,35 @@ func (bf *BoolFlag) UnmarshalJSON(data []byte) (err error) {
|
||||
return err
|
||||
}
|
||||
|
||||
// FormatBool prints stringified version of boolean.
|
||||
func FormatBool(b bool) string {
|
||||
if b {
|
||||
return "on"
|
||||
}
|
||||
return "off"
|
||||
}
|
||||
|
||||
// ParseBool returns the boolean value represented by the string.
|
||||
// It accepts 1, t, T, TRUE, true, True, 0, f, F, FALSE, false, False.
|
||||
// Any other value returns an error.
|
||||
func ParseBool(str string) (bool, error) {
|
||||
switch str {
|
||||
case "1", "t", "T", "true", "TRUE", "True", "on", "ON", "On":
|
||||
return true, nil
|
||||
case "0", "f", "F", "false", "FALSE", "False", "off", "OFF", "Off":
|
||||
return false, nil
|
||||
}
|
||||
if strings.EqualFold(str, "enabled") {
|
||||
return true, nil
|
||||
}
|
||||
if strings.EqualFold(str, "disabled") {
|
||||
return false, nil
|
||||
}
|
||||
return false, fmt.Errorf("ParseBool: parsing '%s': %s", str, strconv.ErrSyntax)
|
||||
}
|
||||
|
||||
// ParseBoolFlag - parses string into BoolFlag.
|
||||
func ParseBoolFlag(s string) (bf BoolFlag, err error) {
|
||||
switch s {
|
||||
case "on":
|
||||
bf = true
|
||||
case "off":
|
||||
bf = false
|
||||
default:
|
||||
err = fmt.Errorf("invalid value ‘%s’ for BoolFlag", s)
|
||||
}
|
||||
|
||||
return bf, err
|
||||
b, err := ParseBool(s)
|
||||
return BoolFlag(b), err
|
||||
}
|
||||
|
||||
@@ -17,7 +17,6 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"testing"
|
||||
)
|
||||
|
||||
@@ -68,33 +67,29 @@ func TestBoolFlagUnmarshalJSON(t *testing.T) {
|
||||
testCases := []struct {
|
||||
data []byte
|
||||
expectedResult BoolFlag
|
||||
expectedErr error
|
||||
expectedErr bool
|
||||
}{
|
||||
{[]byte(`{}`), BoolFlag(false), errors.New("json: cannot unmarshal object into Go value of type string")},
|
||||
{[]byte(`["on"]`), BoolFlag(false), errors.New("json: cannot unmarshal array into Go value of type string")},
|
||||
{[]byte(`"junk"`), BoolFlag(false), errors.New("invalid value ‘junk’ for BoolFlag")},
|
||||
{[]byte(`"true"`), BoolFlag(false), errors.New("invalid value ‘true’ for BoolFlag")},
|
||||
{[]byte(`"false"`), BoolFlag(false), errors.New("invalid value ‘false’ for BoolFlag")},
|
||||
{[]byte(`"ON"`), BoolFlag(false), errors.New("invalid value ‘ON’ for BoolFlag")},
|
||||
{[]byte(`"OFF"`), BoolFlag(false), errors.New("invalid value ‘OFF’ for BoolFlag")},
|
||||
{[]byte(`""`), BoolFlag(true), nil},
|
||||
{[]byte(`"on"`), BoolFlag(true), nil},
|
||||
{[]byte(`"off"`), BoolFlag(false), nil},
|
||||
{[]byte(`{}`), BoolFlag(false), true},
|
||||
{[]byte(`["on"]`), BoolFlag(false), true},
|
||||
{[]byte(`"junk"`), BoolFlag(false), true},
|
||||
{[]byte(`""`), BoolFlag(true), false},
|
||||
{[]byte(`"on"`), BoolFlag(true), false},
|
||||
{[]byte(`"off"`), BoolFlag(false), false},
|
||||
{[]byte(`"true"`), BoolFlag(true), false},
|
||||
{[]byte(`"false"`), BoolFlag(false), false},
|
||||
{[]byte(`"ON"`), BoolFlag(true), false},
|
||||
{[]byte(`"OFF"`), BoolFlag(false), false},
|
||||
}
|
||||
|
||||
for _, testCase := range testCases {
|
||||
var flag BoolFlag
|
||||
err := (&flag).UnmarshalJSON(testCase.data)
|
||||
if testCase.expectedErr == nil {
|
||||
if err != nil {
|
||||
t.Fatalf("error: expected = <nil>, got = %v", err)
|
||||
}
|
||||
} else if err == nil {
|
||||
t.Fatalf("error: expected = %v, got = <nil>", testCase.expectedErr)
|
||||
} else if testCase.expectedErr.Error() != err.Error() {
|
||||
t.Fatalf("error: expected = %v, got = %v", testCase.expectedErr, err)
|
||||
if !testCase.expectedErr && err != nil {
|
||||
t.Fatalf("error: expected = <nil>, got = %v", err)
|
||||
}
|
||||
if testCase.expectedErr && err == nil {
|
||||
t.Fatalf("error: expected error, got = <nil>")
|
||||
}
|
||||
|
||||
if err == nil && testCase.expectedResult != flag {
|
||||
t.Fatalf("result: expected: %v, got: %v", testCase.expectedResult, flag)
|
||||
}
|
||||
@@ -106,30 +101,26 @@ func TestParseBoolFlag(t *testing.T) {
|
||||
testCases := []struct {
|
||||
flagStr string
|
||||
expectedResult BoolFlag
|
||||
expectedErr error
|
||||
expectedErr bool
|
||||
}{
|
||||
{"", BoolFlag(false), errors.New("invalid value ‘’ for BoolFlag")},
|
||||
{"junk", BoolFlag(false), errors.New("invalid value ‘junk’ for BoolFlag")},
|
||||
{"true", BoolFlag(false), errors.New("invalid value ‘true’ for BoolFlag")},
|
||||
{"false", BoolFlag(false), errors.New("invalid value ‘false’ for BoolFlag")},
|
||||
{"ON", BoolFlag(false), errors.New("invalid value ‘ON’ for BoolFlag")},
|
||||
{"OFF", BoolFlag(false), errors.New("invalid value ‘OFF’ for BoolFlag")},
|
||||
{"on", BoolFlag(true), nil},
|
||||
{"off", BoolFlag(false), nil},
|
||||
{"", BoolFlag(false), true},
|
||||
{"junk", BoolFlag(false), true},
|
||||
{"true", BoolFlag(true), false},
|
||||
{"false", BoolFlag(false), false},
|
||||
{"ON", BoolFlag(true), false},
|
||||
{"OFF", BoolFlag(false), false},
|
||||
{"on", BoolFlag(true), false},
|
||||
{"off", BoolFlag(false), false},
|
||||
}
|
||||
|
||||
for _, testCase := range testCases {
|
||||
bf, err := ParseBoolFlag(testCase.flagStr)
|
||||
if testCase.expectedErr == nil {
|
||||
if err != nil {
|
||||
t.Fatalf("error: expected = <nil>, got = %v", err)
|
||||
}
|
||||
} else if err == nil {
|
||||
t.Fatalf("error: expected = %v, got = <nil>", testCase.expectedErr)
|
||||
} else if testCase.expectedErr.Error() != err.Error() {
|
||||
t.Fatalf("error: expected = %v, got = %v", testCase.expectedErr, err)
|
||||
if !testCase.expectedErr && err != nil {
|
||||
t.Fatalf("error: expected = <nil>, got = %v", err)
|
||||
}
|
||||
if testCase.expectedErr && err == nil {
|
||||
t.Fatalf("error: expected error, got = <nil>")
|
||||
}
|
||||
|
||||
if err == nil && testCase.expectedResult != bf {
|
||||
t.Fatalf("result: expected: %v, got: %v", testCase.expectedResult, bf)
|
||||
}
|
||||
|
||||
6
cmd/config/cache/config.go
vendored
6
cmd/config/cache/config.go
vendored
@@ -28,9 +28,11 @@ import (
|
||||
|
||||
// Config represents cache config settings
|
||||
type Config struct {
|
||||
Enabled bool `json:"-"`
|
||||
Drives []string `json:"drives"`
|
||||
Expiry int `json:"expiry"`
|
||||
MaxUse int `json:"maxuse"`
|
||||
Quota int `json:"quota"`
|
||||
Exclude []string `json:"exclude"`
|
||||
}
|
||||
|
||||
@@ -55,6 +57,10 @@ func (cfg *Config) UnmarshalJSON(data []byte) (err error) {
|
||||
return errors.New("config max use value should not be null or negative")
|
||||
}
|
||||
|
||||
if _cfg.Quota < 0 {
|
||||
return errors.New("config quota value should not be null or negative")
|
||||
}
|
||||
|
||||
if _, err = parseCacheDrives(_cfg.Drives); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
4
cmd/config/cache/config_test.go
vendored
4
cmd/config/cache/config_test.go
vendored
@@ -69,7 +69,7 @@ func TestParseCacheDrives(t *testing.T) {
|
||||
}{"/home/drive{1..3}", []string{}, false})
|
||||
}
|
||||
for i, testCase := range testCases {
|
||||
drives, err := parseCacheDrives(strings.Split(testCase.driveStr, cacheEnvDelimiter))
|
||||
drives, err := parseCacheDrives(strings.Split(testCase.driveStr, cacheDelimiter))
|
||||
if err != nil && testCase.success {
|
||||
t.Errorf("Test %d: Expected success but failed instead %s", i+1, err)
|
||||
}
|
||||
@@ -98,7 +98,7 @@ func TestParseCacheExclude(t *testing.T) {
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
excludes, err := parseCacheExcludes(strings.Split(testCase.excludeStr, cacheEnvDelimiter))
|
||||
excludes, err := parseCacheExcludes(strings.Split(testCase.excludeStr, cacheDelimiter))
|
||||
if err != nil && testCase.success {
|
||||
t.Errorf("Test %d: Expected success but failed instead %s", i+1, err)
|
||||
}
|
||||
|
||||
31
cmd/config/cache/help.go
vendored
Normal file
31
cmd/config/cache/help.go
vendored
Normal file
@@ -0,0 +1,31 @@
|
||||
/*
|
||||
* MinIO Cloud Storage, (C) 2019 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package cache
|
||||
|
||||
import "github.com/minio/minio/cmd/config"
|
||||
|
||||
// Help template for caching feature.
|
||||
var (
|
||||
Help = config.HelpKV{
|
||||
Drives: `List of mounted drives or directories delimited by ";"`,
|
||||
Exclude: `List of wildcard based cache exclusion patterns delimited by ";"`,
|
||||
Expiry: `Cache expiry duration in days. eg: "90"`,
|
||||
Quota: `Maximum permitted usage of the cache in percentage (0-100)`,
|
||||
config.State: "Indicates if caching is enabled or not",
|
||||
config.Comment: "A comment to describe the caching setting",
|
||||
}
|
||||
)
|
||||
40
cmd/config/cache/legacy.go
vendored
Normal file
40
cmd/config/cache/legacy.go
vendored
Normal file
@@ -0,0 +1,40 @@
|
||||
/*
|
||||
* MinIO Cloud Storage, (C) 2019 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package cache
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/minio/minio/cmd/config"
|
||||
)
|
||||
|
||||
// SetCacheConfig - One time migration code needed, for migrating from older config to new for Cache.
|
||||
func SetCacheConfig(s config.Config, cfg Config) {
|
||||
s[config.CacheSubSys][config.Default] = DefaultKVS
|
||||
s[config.CacheSubSys][config.Default][Drives] = strings.Join(cfg.Drives, cacheDelimiter)
|
||||
s[config.CacheSubSys][config.Default][Exclude] = strings.Join(cfg.Exclude, cacheDelimiter)
|
||||
s[config.CacheSubSys][config.Default][Expiry] = fmt.Sprintf("%d", cfg.Expiry)
|
||||
s[config.CacheSubSys][config.Default][Quota] = fmt.Sprintf("%d", cfg.MaxUse)
|
||||
s[config.CacheSubSys][config.Default][config.State] = func() string {
|
||||
if len(cfg.Drives) > 0 {
|
||||
return config.StateOn
|
||||
}
|
||||
return config.StateOff
|
||||
}()
|
||||
s[config.CacheSubSys][config.Default][config.Comment] = "Settings for Cache, after migrating config"
|
||||
}
|
||||
97
cmd/config/cache/lookup.go
vendored
97
cmd/config/cache/lookup.go
vendored
@@ -17,6 +17,7 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
@@ -26,53 +27,107 @@ import (
|
||||
|
||||
// Cache ENVs
|
||||
const (
|
||||
Drives = "drives"
|
||||
Exclude = "exclude"
|
||||
Expiry = "expiry"
|
||||
MaxUse = "maxuse"
|
||||
Quota = "quota"
|
||||
|
||||
EnvCacheState = "MINIO_CACHE_STATE"
|
||||
EnvCacheDrives = "MINIO_CACHE_DRIVES"
|
||||
EnvCacheExclude = "MINIO_CACHE_EXCLUDE"
|
||||
EnvCacheExpiry = "MINIO_CACHE_EXPIRY"
|
||||
EnvCacheMaxUse = "MINIO_CACHE_MAXUSE"
|
||||
EnvCacheQuota = "MINIO_CACHE_QUOTA"
|
||||
EnvCacheEncryptionMasterKey = "MINIO_CACHE_ENCRYPTION_MASTER_KEY"
|
||||
|
||||
DefaultExpiry = "90"
|
||||
DefaultQuota = "80"
|
||||
)
|
||||
|
||||
// DefaultKVS - default KV settings for caching.
|
||||
var (
|
||||
DefaultKVS = config.KVS{
|
||||
config.State: config.StateOff,
|
||||
config.Comment: "This is a default cache configuration, only applicable in gateway setups",
|
||||
Drives: "",
|
||||
Exclude: "",
|
||||
Expiry: DefaultExpiry,
|
||||
Quota: DefaultQuota,
|
||||
}
|
||||
)
|
||||
|
||||
const (
|
||||
cacheEnvDelimiter = ";"
|
||||
cacheDelimiter = ";"
|
||||
)
|
||||
|
||||
// LookupConfig - extracts cache configuration provided by environment
|
||||
// variables and merge them with provided CacheConfiguration.
|
||||
func LookupConfig(cfg Config) (Config, error) {
|
||||
if drives := env.Get(EnvCacheDrives, strings.Join(cfg.Drives, ",")); drives != "" {
|
||||
driveList, err := parseCacheDrives(strings.Split(drives, cacheEnvDelimiter))
|
||||
func LookupConfig(kvs config.KVS) (Config, error) {
|
||||
cfg := Config{}
|
||||
|
||||
if err := config.CheckValidKeys(config.CacheSubSys, kvs, DefaultKVS); err != nil {
|
||||
return cfg, err
|
||||
}
|
||||
|
||||
// Check if cache is explicitly disabled
|
||||
stateBool, err := config.ParseBool(env.Get(EnvCacheState, kvs.Get(config.State)))
|
||||
if err != nil {
|
||||
return cfg, err
|
||||
}
|
||||
if !stateBool {
|
||||
return cfg, nil
|
||||
}
|
||||
|
||||
drives := env.Get(EnvCacheDrives, kvs.Get(Drives))
|
||||
if len(drives) == 0 {
|
||||
return cfg, nil
|
||||
}
|
||||
|
||||
cfg.Drives, err = parseCacheDrives(strings.Split(drives, cacheDelimiter))
|
||||
if err != nil {
|
||||
return cfg, err
|
||||
}
|
||||
|
||||
cfg.Enabled = true
|
||||
if excludes := env.Get(EnvCacheExclude, kvs.Get(Exclude)); excludes != "" {
|
||||
cfg.Exclude, err = parseCacheExcludes(strings.Split(excludes, cacheDelimiter))
|
||||
if err != nil {
|
||||
return cfg, err
|
||||
}
|
||||
cfg.Drives = driveList
|
||||
}
|
||||
|
||||
if excludes := env.Get(EnvCacheExclude, strings.Join(cfg.Exclude, ",")); excludes != "" {
|
||||
excludeList, err := parseCacheExcludes(strings.Split(excludes, cacheEnvDelimiter))
|
||||
if err != nil {
|
||||
return cfg, err
|
||||
}
|
||||
cfg.Exclude = excludeList
|
||||
}
|
||||
|
||||
if expiryStr := env.Get(EnvCacheExpiry, strconv.Itoa(cfg.Expiry)); expiryStr != "" {
|
||||
expiry, err := strconv.Atoi(expiryStr)
|
||||
if expiryStr := env.Get(EnvCacheExpiry, kvs.Get(Expiry)); expiryStr != "" {
|
||||
cfg.Expiry, err = strconv.Atoi(expiryStr)
|
||||
if err != nil {
|
||||
return cfg, config.ErrInvalidCacheExpiryValue(err)
|
||||
}
|
||||
cfg.Expiry = expiry
|
||||
}
|
||||
|
||||
if maxUseStr := env.Get(EnvCacheMaxUse, strconv.Itoa(cfg.MaxUse)); maxUseStr != "" {
|
||||
maxUse, err := strconv.Atoi(maxUseStr)
|
||||
if maxUseStr := env.Get(EnvCacheMaxUse, kvs.Get(MaxUse)); maxUseStr != "" {
|
||||
cfg.MaxUse, err = strconv.Atoi(maxUseStr)
|
||||
if err != nil {
|
||||
return cfg, config.ErrInvalidCacheMaxUse(err)
|
||||
return cfg, config.ErrInvalidCacheQuota(err)
|
||||
}
|
||||
// maxUse should be a valid percentage.
|
||||
if maxUse > 0 && maxUse <= 100 {
|
||||
cfg.MaxUse = maxUse
|
||||
if cfg.MaxUse < 0 || cfg.MaxUse > 100 {
|
||||
err := errors.New("config max use value should not be null or negative")
|
||||
return cfg, config.ErrInvalidCacheQuota(err)
|
||||
}
|
||||
cfg.Quota = cfg.MaxUse
|
||||
}
|
||||
|
||||
if quotaStr := env.Get(EnvCacheQuota, kvs.Get(Quota)); quotaStr != "" {
|
||||
cfg.Quota, err = strconv.Atoi(quotaStr)
|
||||
if err != nil {
|
||||
return cfg, config.ErrInvalidCacheQuota(err)
|
||||
}
|
||||
// quota should be a valid percentage.
|
||||
if cfg.Quota < 0 || cfg.Quota > 100 {
|
||||
err := errors.New("config quota value should not be null or negative")
|
||||
return cfg, config.ErrInvalidCacheQuota(err)
|
||||
}
|
||||
cfg.MaxUse = cfg.Quota
|
||||
}
|
||||
|
||||
return cfg, nil
|
||||
|
||||
@@ -18,7 +18,6 @@ package compress
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/minio/minio/cmd/config"
|
||||
@@ -34,30 +33,62 @@ type Config struct {
|
||||
|
||||
// Compression environment variables
|
||||
const (
|
||||
EnvCompress = "MINIO_COMPRESS"
|
||||
Extensions = "extensions"
|
||||
MimeTypes = "mime_types"
|
||||
|
||||
EnvCompressState = "MINIO_COMPRESS_STATE"
|
||||
EnvCompressExtensions = "MINIO_COMPRESS_EXTENSIONS"
|
||||
EnvCompressMimeTypes = "MINIO_COMPRESS_MIMETYPES"
|
||||
EnvCompressMimeTypes = "MINIO_COMPRESS_MIME_TYPES"
|
||||
|
||||
// Include-list for compression.
|
||||
DefaultExtensions = ".txt,.log,.csv,.json,.tar,.xml,.bin"
|
||||
DefaultMimeTypes = "text/*,application/json,application/xml"
|
||||
)
|
||||
|
||||
// DefaultKVS - default KV config for compression settings
|
||||
var (
|
||||
DefaultKVS = config.KVS{
|
||||
config.State: config.StateOff,
|
||||
config.Comment: "This is a default compression configuration",
|
||||
Extensions: DefaultExtensions,
|
||||
MimeTypes: DefaultMimeTypes,
|
||||
}
|
||||
)
|
||||
|
||||
// Parses the given compression exclude list `extensions` or `content-types`.
|
||||
func parseCompressIncludes(includes []string) ([]string, error) {
|
||||
for _, e := range includes {
|
||||
if len(e) == 0 {
|
||||
return nil, config.ErrInvalidCompressionIncludesValue(nil).Msg("extension/mime-type (%s) cannot be empty", e)
|
||||
return nil, config.ErrInvalidCompressionIncludesValue(nil).Msg("extension/mime-type cannot be empty")
|
||||
}
|
||||
}
|
||||
return includes, nil
|
||||
}
|
||||
|
||||
// LookupConfig - lookup compression config.
|
||||
func LookupConfig(cfg Config) (Config, error) {
|
||||
if compress := env.Get(EnvCompress, strconv.FormatBool(cfg.Enabled)); compress != "" {
|
||||
cfg.Enabled = strings.EqualFold(compress, "true")
|
||||
func LookupConfig(kvs config.KVS) (Config, error) {
|
||||
var err error
|
||||
cfg := Config{}
|
||||
if err = config.CheckValidKeys(config.CompressionSubSys, kvs, DefaultKVS); err != nil {
|
||||
return cfg, err
|
||||
}
|
||||
|
||||
compressExtensions := env.Get(EnvCompressExtensions, strings.Join(cfg.Extensions, ","))
|
||||
compressMimeTypes := env.Get(EnvCompressMimeTypes, strings.Join(cfg.MimeTypes, ","))
|
||||
if compressExtensions != "" || compressMimeTypes != "" {
|
||||
compress := env.Get(EnvCompress, "")
|
||||
if compress == "" {
|
||||
compress = env.Get(EnvCompressState, kvs.Get(config.State))
|
||||
}
|
||||
cfg.Enabled, err = config.ParseBool(compress)
|
||||
if err != nil {
|
||||
return cfg, err
|
||||
}
|
||||
if !cfg.Enabled {
|
||||
return cfg, nil
|
||||
}
|
||||
|
||||
compressExtensions := env.Get(EnvCompressExtensions, kvs.Get(Extensions))
|
||||
compressMimeTypes := env.Get(EnvCompressMimeTypes, kvs.Get(MimeTypes))
|
||||
compressMimeTypesLegacy := env.Get(EnvCompressMimeTypesLegacy, kvs.Get(MimeTypes))
|
||||
if compressExtensions != "" || compressMimeTypes != "" || compressMimeTypesLegacy != "" {
|
||||
if compressExtensions != "" {
|
||||
extensions, err := parseCompressIncludes(strings.Split(compressExtensions, config.ValueSeparator))
|
||||
if err != nil {
|
||||
@@ -66,11 +97,19 @@ func LookupConfig(cfg Config) (Config, error) {
|
||||
cfg.Extensions = extensions
|
||||
}
|
||||
if compressMimeTypes != "" {
|
||||
contenttypes, err := parseCompressIncludes(strings.Split(compressMimeTypes, config.ValueSeparator))
|
||||
mimeTypes, err := parseCompressIncludes(strings.Split(compressMimeTypes, config.ValueSeparator))
|
||||
if err != nil {
|
||||
return cfg, fmt.Errorf("%s: Invalid MINIO_COMPRESS_MIMETYPES value (`%s`)", err, contenttypes)
|
||||
return cfg, fmt.Errorf("%s: Invalid MINIO_COMPRESS_MIME_TYPES value (`%s`)", err, mimeTypes)
|
||||
}
|
||||
cfg.MimeTypes = contenttypes
|
||||
cfg.MimeTypes = mimeTypes
|
||||
}
|
||||
if compressMimeTypesLegacy != "" {
|
||||
mimeTypes, err := parseCompressIncludes(strings.Split(compressMimeTypesLegacy,
|
||||
config.ValueSeparator))
|
||||
if err != nil {
|
||||
return cfg, fmt.Errorf("%s: Invalid MINIO_COMPRESS_MIME_TYPES value (`%s`)", err, mimeTypes)
|
||||
}
|
||||
cfg.MimeTypes = mimeTypes
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
29
cmd/config/compress/help.go
Normal file
29
cmd/config/compress/help.go
Normal file
@@ -0,0 +1,29 @@
|
||||
/*
|
||||
* MinIO Cloud Storage, (C) 2019 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package compress
|
||||
|
||||
import "github.com/minio/minio/cmd/config"
|
||||
|
||||
// Help template for compress feature.
|
||||
var (
|
||||
Help = config.HelpKV{
|
||||
Extensions: `Comma separated file extensions to compress. eg: ".txt,.log,.csv"`,
|
||||
MimeTypes: `Comma separate wildcard mime-types to compress. eg: "text/*,application/json,application/xml"`,
|
||||
config.State: "Indicates if compression is enabled or not",
|
||||
config.Comment: "A comment to describe the compression setting",
|
||||
}
|
||||
)
|
||||
44
cmd/config/compress/legacy.go
Normal file
44
cmd/config/compress/legacy.go
Normal file
@@ -0,0 +1,44 @@
|
||||
/*
|
||||
* MinIO Cloud Storage, (C) 2019 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package compress
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/minio/minio/cmd/config"
|
||||
)
|
||||
|
||||
// Legacy envs.
|
||||
const (
|
||||
EnvCompress = "MINIO_COMPRESS"
|
||||
EnvCompressMimeTypesLegacy = "MINIO_COMPRESS_MIMETYPES"
|
||||
)
|
||||
|
||||
// SetCompressionConfig - One time migration code needed, for migrating from older config to new for Compression.
|
||||
func SetCompressionConfig(s config.Config, cfg Config) {
|
||||
s[config.CompressionSubSys][config.Default] = config.KVS{
|
||||
config.State: func() string {
|
||||
if cfg.Enabled {
|
||||
return config.StateOn
|
||||
}
|
||||
return config.StateOff
|
||||
}(),
|
||||
config.Comment: "Settings for Compression, after migrating config",
|
||||
Extensions: strings.Join(cfg.Extensions, ","),
|
||||
MimeTypes: strings.Join(cfg.MimeTypes, ","),
|
||||
}
|
||||
}
|
||||
464
cmd/config/config.go
Normal file
464
cmd/config/config.go
Normal file
@@ -0,0 +1,464 @@
|
||||
/*
|
||||
* MinIO Cloud Storage, (C) 2019 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package config
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/minio/minio-go/pkg/set"
|
||||
"github.com/minio/minio/pkg/auth"
|
||||
"github.com/minio/minio/pkg/color"
|
||||
"github.com/minio/minio/pkg/env"
|
||||
)
|
||||
|
||||
// Error config error type
|
||||
type Error string
|
||||
|
||||
func (e Error) Error() string {
|
||||
return string(e)
|
||||
}
|
||||
|
||||
// Default keys
|
||||
const (
|
||||
Default = "_"
|
||||
State = "state"
|
||||
Comment = "comment"
|
||||
|
||||
// State values
|
||||
StateOn = "on"
|
||||
StateOff = "off"
|
||||
|
||||
RegionName = "name"
|
||||
AccessKey = "access_key"
|
||||
SecretKey = "secret_key"
|
||||
)
|
||||
|
||||
// Top level config constants.
|
||||
const (
|
||||
CredentialsSubSys = "credentials"
|
||||
PolicyOPASubSys = "policy_opa"
|
||||
IdentityOpenIDSubSys = "identity_openid"
|
||||
IdentityLDAPSubSys = "identity_ldap"
|
||||
WormSubSys = "worm"
|
||||
CacheSubSys = "cache"
|
||||
RegionSubSys = "region"
|
||||
StorageClassSubSys = "storageclass"
|
||||
CompressionSubSys = "compression"
|
||||
KmsVaultSubSys = "kms_vault"
|
||||
LoggerHTTPSubSys = "logger_http"
|
||||
LoggerHTTPAuditSubSys = "logger_http_audit"
|
||||
|
||||
// Add new constants here if you add new fields to config.
|
||||
)
|
||||
|
||||
// Notification config constants.
|
||||
const (
|
||||
NotifyKafkaSubSys = "notify_kafka"
|
||||
NotifyMQTTSubSys = "notify_mqtt"
|
||||
NotifyMySQLSubSys = "notify_mysql"
|
||||
NotifyNATSSubSys = "notify_nats"
|
||||
NotifyNSQSubSys = "notify_nsq"
|
||||
NotifyESSubSys = "notify_elasticsearch"
|
||||
NotifyAMQPSubSys = "notify_amqp"
|
||||
NotifyPostgresSubSys = "notify_postgres"
|
||||
NotifyRedisSubSys = "notify_redis"
|
||||
NotifyWebhookSubSys = "notify_webhook"
|
||||
|
||||
// Add new constants here if you add new fields to config.
|
||||
)
|
||||
|
||||
// SubSystems - all supported sub-systems
|
||||
var SubSystems = set.CreateStringSet([]string{
|
||||
CredentialsSubSys,
|
||||
WormSubSys,
|
||||
RegionSubSys,
|
||||
CacheSubSys,
|
||||
StorageClassSubSys,
|
||||
CompressionSubSys,
|
||||
KmsVaultSubSys,
|
||||
LoggerHTTPSubSys,
|
||||
LoggerHTTPAuditSubSys,
|
||||
PolicyOPASubSys,
|
||||
IdentityLDAPSubSys,
|
||||
IdentityOpenIDSubSys,
|
||||
NotifyAMQPSubSys,
|
||||
NotifyESSubSys,
|
||||
NotifyKafkaSubSys,
|
||||
NotifyMQTTSubSys,
|
||||
NotifyMySQLSubSys,
|
||||
NotifyNATSSubSys,
|
||||
NotifyNSQSubSys,
|
||||
NotifyPostgresSubSys,
|
||||
NotifyRedisSubSys,
|
||||
NotifyWebhookSubSys,
|
||||
}...)
|
||||
|
||||
// SubSystemsSingleTargets - subsystems which only support single target.
|
||||
var SubSystemsSingleTargets = set.CreateStringSet([]string{
|
||||
CredentialsSubSys,
|
||||
WormSubSys,
|
||||
RegionSubSys,
|
||||
CacheSubSys,
|
||||
StorageClassSubSys,
|
||||
CompressionSubSys,
|
||||
KmsVaultSubSys,
|
||||
PolicyOPASubSys,
|
||||
IdentityLDAPSubSys,
|
||||
IdentityOpenIDSubSys,
|
||||
}...)
|
||||
|
||||
// Constant separators
|
||||
const (
|
||||
SubSystemSeparator = `:`
|
||||
KvSeparator = `=`
|
||||
KvSpaceSeparator = ` `
|
||||
KvComment = `#`
|
||||
KvNewline = "\n"
|
||||
KvDoubleQuote = `"`
|
||||
KvSingleQuote = `'`
|
||||
)
|
||||
|
||||
// KVS - is a shorthand for some wrapper functions
|
||||
// to operate on list of key values.
|
||||
type KVS map[string]string
|
||||
|
||||
func (kvs KVS) String() string {
|
||||
var s strings.Builder
|
||||
for k, v := range kvs {
|
||||
if k == Comment {
|
||||
// Skip the comment, comment will be printed elsewhere.
|
||||
continue
|
||||
}
|
||||
s.WriteString(k)
|
||||
s.WriteString(KvSeparator)
|
||||
s.WriteString(KvDoubleQuote)
|
||||
s.WriteString(v)
|
||||
s.WriteString(KvDoubleQuote)
|
||||
s.WriteString(KvSpaceSeparator)
|
||||
}
|
||||
return s.String()
|
||||
}
|
||||
|
||||
// Get - returns the value of a key, if not found returns empty.
|
||||
func (kvs KVS) Get(key string) string {
|
||||
return kvs[key]
|
||||
}
|
||||
|
||||
// Config - MinIO server config structure.
|
||||
type Config map[string]map[string]KVS
|
||||
|
||||
func (c Config) String() string {
|
||||
var s strings.Builder
|
||||
for k, v := range c {
|
||||
for target, kv := range v {
|
||||
c, ok := kv[Comment]
|
||||
if ok {
|
||||
// For multiple comments split it correctly.
|
||||
for _, c1 := range strings.Split(c, KvNewline) {
|
||||
if c1 == "" {
|
||||
continue
|
||||
}
|
||||
s.WriteString(color.YellowBold(KvComment))
|
||||
s.WriteString(KvSpaceSeparator)
|
||||
s.WriteString(color.BlueBold(strings.TrimSpace(c1)))
|
||||
s.WriteString(KvNewline)
|
||||
}
|
||||
}
|
||||
s.WriteString(color.CyanBold(k))
|
||||
if target != Default {
|
||||
s.WriteString(SubSystemSeparator)
|
||||
s.WriteString(target)
|
||||
}
|
||||
s.WriteString(KvSpaceSeparator)
|
||||
s.WriteString(kv.String())
|
||||
s.WriteString(KvNewline)
|
||||
}
|
||||
}
|
||||
return s.String()
|
||||
}
|
||||
|
||||
// Default KV configs for worm and region
|
||||
var (
|
||||
DefaultCredentialKVS = KVS{
|
||||
State: StateOff,
|
||||
Comment: "This is a default credential configuration",
|
||||
AccessKey: auth.DefaultAccessKey,
|
||||
SecretKey: auth.DefaultSecretKey,
|
||||
}
|
||||
|
||||
DefaultWormKVS = KVS{
|
||||
State: StateOff,
|
||||
Comment: "This is a default WORM configuration",
|
||||
}
|
||||
|
||||
DefaultRegionKVS = KVS{
|
||||
State: StateOff,
|
||||
Comment: "This is a default Region configuration",
|
||||
RegionName: "",
|
||||
}
|
||||
)
|
||||
|
||||
// LookupCreds - lookup credentials from config.
|
||||
func LookupCreds(kv KVS) (auth.Credentials, error) {
|
||||
if err := CheckValidKeys(CredentialsSubSys, kv, DefaultCredentialKVS); err != nil {
|
||||
return auth.Credentials{}, err
|
||||
}
|
||||
return auth.CreateCredentials(env.Get(EnvAccessKey, kv.Get(AccessKey)),
|
||||
env.Get(EnvSecretKey, kv.Get(SecretKey)))
|
||||
}
|
||||
|
||||
// LookupRegion - get current region.
|
||||
func LookupRegion(kv KVS) (string, error) {
|
||||
if err := CheckValidKeys(RegionSubSys, kv, DefaultRegionKVS); err != nil {
|
||||
return "", err
|
||||
}
|
||||
region := env.Get(EnvRegion, "")
|
||||
if region == "" {
|
||||
region = env.Get(EnvRegionName, kv.Get(RegionName))
|
||||
}
|
||||
return region, nil
|
||||
}
|
||||
|
||||
// CheckValidKeys - checks if inputs KVS has the necessary keys,
|
||||
// returns error if it find extra or superflous keys.
|
||||
func CheckValidKeys(subSys string, kv KVS, validKVS KVS) error {
|
||||
nkv := KVS{}
|
||||
for k, v := range kv {
|
||||
if _, ok := validKVS[k]; !ok {
|
||||
nkv[k] = v
|
||||
}
|
||||
}
|
||||
if len(nkv) > 0 {
|
||||
return Error(fmt.Sprintf("found invalid keys (%s) for '%s' sub-system", nkv.String(), subSys))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// LookupWorm - check if worm is enabled
|
||||
func LookupWorm(kv KVS) (bool, error) {
|
||||
if err := CheckValidKeys(WormSubSys, kv, DefaultWormKVS); err != nil {
|
||||
return false, err
|
||||
}
|
||||
worm := env.Get(EnvWorm, "")
|
||||
if worm == "" {
|
||||
worm = env.Get(EnvWormState, kv.Get(State))
|
||||
if worm == "" {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
return ParseBool(worm)
|
||||
}
|
||||
|
||||
// New - initialize a new server config.
|
||||
func New() Config {
|
||||
srvCfg := make(Config)
|
||||
for _, k := range SubSystems.ToSlice() {
|
||||
srvCfg[k] = map[string]KVS{}
|
||||
}
|
||||
return srvCfg
|
||||
}
|
||||
|
||||
// GetKVS - get kvs from specific subsystem.
|
||||
func (c Config) GetKVS(s string) (map[string]KVS, error) {
|
||||
if len(s) == 0 {
|
||||
return nil, Error("input cannot be empty")
|
||||
}
|
||||
inputs := strings.Fields(s)
|
||||
if len(inputs) > 1 {
|
||||
return nil, Error(fmt.Sprintf("invalid number of arguments %s", s))
|
||||
}
|
||||
subSystemValue := strings.SplitN(inputs[0], SubSystemSeparator, 2)
|
||||
if len(subSystemValue) == 0 {
|
||||
return nil, Error(fmt.Sprintf("invalid number of arguments %s", s))
|
||||
}
|
||||
found := SubSystems.Contains(subSystemValue[0])
|
||||
if !found {
|
||||
// Check for sub-prefix only if the input value
|
||||
// is only a single value, this rejects invalid
|
||||
// inputs if any.
|
||||
found = !SubSystems.FuncMatch(strings.HasPrefix, subSystemValue[0]).IsEmpty() && len(subSystemValue) == 1
|
||||
}
|
||||
if !found {
|
||||
return nil, Error(fmt.Sprintf("unknown sub-system %s", s))
|
||||
}
|
||||
|
||||
kvs := make(map[string]KVS)
|
||||
var ok bool
|
||||
if len(subSystemValue) == 2 {
|
||||
if len(subSystemValue[1]) == 0 {
|
||||
err := fmt.Sprintf("sub-system target '%s' cannot be empty", s)
|
||||
return nil, Error(err)
|
||||
}
|
||||
kvs[inputs[0]], ok = c[subSystemValue[0]][subSystemValue[1]]
|
||||
if !ok {
|
||||
err := fmt.Sprintf("sub-system target '%s' doesn't exist, proceed to create a new one", s)
|
||||
return nil, Error(err)
|
||||
}
|
||||
return kvs, nil
|
||||
}
|
||||
|
||||
for subSys, subSysTgts := range c {
|
||||
if !strings.HasPrefix(subSys, subSystemValue[0]) {
|
||||
continue
|
||||
}
|
||||
for k, kv := range subSysTgts {
|
||||
if k != Default {
|
||||
kvs[subSys+SubSystemSeparator+k] = kv
|
||||
} else {
|
||||
kvs[subSys] = kv
|
||||
}
|
||||
}
|
||||
}
|
||||
return kvs, nil
|
||||
}
|
||||
|
||||
// DelKVS - delete a specific key.
|
||||
func (c Config) DelKVS(s string) error {
|
||||
if len(s) == 0 {
|
||||
return Error("input arguments cannot be empty")
|
||||
}
|
||||
inputs := strings.Fields(s)
|
||||
if len(inputs) > 1 {
|
||||
return Error(fmt.Sprintf("invalid number of arguments %s", s))
|
||||
}
|
||||
subSystemValue := strings.SplitN(inputs[0], SubSystemSeparator, 2)
|
||||
if len(subSystemValue) == 0 {
|
||||
return Error(fmt.Sprintf("invalid number of arguments %s", s))
|
||||
}
|
||||
if !SubSystems.Contains(subSystemValue[0]) {
|
||||
return Error(fmt.Sprintf("unknown sub-system %s", s))
|
||||
}
|
||||
if len(subSystemValue) == 2 {
|
||||
if len(subSystemValue[1]) == 0 {
|
||||
err := fmt.Sprintf("sub-system target '%s' cannot be empty", s)
|
||||
return Error(err)
|
||||
}
|
||||
delete(c[subSystemValue[0]], subSystemValue[1])
|
||||
return nil
|
||||
}
|
||||
return Error(fmt.Sprintf("default config for '%s' sub-system cannot be removed", s))
|
||||
}
|
||||
|
||||
// This function is needed, to trim off single or double quotes, creeping into the values.
|
||||
func sanitizeValue(v string) string {
|
||||
v = strings.TrimSuffix(strings.TrimPrefix(strings.TrimSpace(v), KvDoubleQuote), KvDoubleQuote)
|
||||
return strings.TrimSuffix(strings.TrimPrefix(v, KvSingleQuote), KvSingleQuote)
|
||||
}
|
||||
|
||||
// Clone - clones a config map entirely.
|
||||
func (c Config) Clone() Config {
|
||||
cp := New()
|
||||
for subSys, tgtKV := range c {
|
||||
cp[subSys] = make(map[string]KVS)
|
||||
for tgt, kv := range tgtKV {
|
||||
cp[subSys][tgt] = KVS{}
|
||||
for k, v := range kv {
|
||||
cp[subSys][tgt][k] = v
|
||||
}
|
||||
}
|
||||
}
|
||||
return cp
|
||||
}
|
||||
|
||||
// SetKVS - set specific key values per sub-system.
|
||||
func (c Config) SetKVS(s string, comment string, defaultKVS map[string]KVS) error {
|
||||
if len(s) == 0 {
|
||||
return Error("input arguments cannot be empty")
|
||||
}
|
||||
inputs := strings.SplitN(s, KvSpaceSeparator, 2)
|
||||
if len(inputs) <= 1 {
|
||||
return Error(fmt.Sprintf("invalid number of arguments '%s'", s))
|
||||
}
|
||||
subSystemValue := strings.SplitN(inputs[0], SubSystemSeparator, 2)
|
||||
if len(subSystemValue) == 0 {
|
||||
return Error(fmt.Sprintf("invalid number of arguments %s", s))
|
||||
}
|
||||
|
||||
if subSystemValue[0] == CredentialsSubSys {
|
||||
return Error(fmt.Sprintf("changing '%s' sub-system values is not allowed, use ENVs instead",
|
||||
subSystemValue[0]))
|
||||
}
|
||||
|
||||
if !SubSystems.Contains(subSystemValue[0]) {
|
||||
return Error(fmt.Sprintf("unknown sub-system %s", s))
|
||||
}
|
||||
|
||||
if SubSystemsSingleTargets.Contains(subSystemValue[0]) && len(subSystemValue) == 2 {
|
||||
return Error(fmt.Sprintf("sub-system '%s' only supports single target", subSystemValue[0]))
|
||||
}
|
||||
|
||||
var kvs = KVS{}
|
||||
var prevK string
|
||||
for _, v := range strings.Fields(inputs[1]) {
|
||||
kv := strings.SplitN(v, KvSeparator, 2)
|
||||
if len(kv) == 0 {
|
||||
continue
|
||||
}
|
||||
if len(kv) == 1 && prevK != "" {
|
||||
kvs[prevK] = strings.Join([]string{kvs[prevK], sanitizeValue(kv[0])}, KvSpaceSeparator)
|
||||
continue
|
||||
}
|
||||
if len(kv[1]) == 0 {
|
||||
err := fmt.Sprintf("value for key '%s' cannot be empty", kv[0])
|
||||
return Error(err)
|
||||
}
|
||||
prevK = kv[0]
|
||||
kvs[kv[0]] = sanitizeValue(kv[1])
|
||||
}
|
||||
|
||||
if len(subSystemValue) == 2 {
|
||||
_, ok := c[subSystemValue[0]][subSystemValue[1]]
|
||||
if !ok {
|
||||
c[subSystemValue[0]][subSystemValue[1]] = defaultKVS[subSystemValue[0]]
|
||||
// Add a comment since its a new target, this comment may be
|
||||
// overridden if client supplied it.
|
||||
if comment == "" {
|
||||
comment = fmt.Sprintf("Settings for sub-system target %s:%s",
|
||||
subSystemValue[0], subSystemValue[1])
|
||||
}
|
||||
c[subSystemValue[0]][subSystemValue[1]][Comment] = comment
|
||||
}
|
||||
}
|
||||
|
||||
var commentKv bool
|
||||
for k, v := range kvs {
|
||||
if k == Comment {
|
||||
// Set this to true to indicate comment was
|
||||
// supplied by client and is going to be preserved.
|
||||
commentKv = true
|
||||
}
|
||||
if len(subSystemValue) == 2 {
|
||||
c[subSystemValue[0]][subSystemValue[1]][k] = v
|
||||
} else {
|
||||
c[subSystemValue[0]][Default][k] = v
|
||||
}
|
||||
}
|
||||
|
||||
// if client didn't supply the comment try to preserve
|
||||
// the comment if any we found while parsing the incoming
|
||||
// stream, if not preserve the default.
|
||||
if !commentKv && comment != "" {
|
||||
if len(subSystemValue) == 2 {
|
||||
c[subSystemValue[0]][subSystemValue[1]][Comment] = comment
|
||||
} else {
|
||||
c[subSystemValue[0]][Default][Comment] = comment
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -23,13 +23,17 @@ const (
|
||||
|
||||
// Top level common ENVs
|
||||
const (
|
||||
EnvAccessKey = "MINIO_ACCESS_KEY"
|
||||
EnvSecretKey = "MINIO_SECRET_KEY"
|
||||
EnvBrowser = "MINIO_BROWSER"
|
||||
EnvDomain = "MINIO_DOMAIN"
|
||||
EnvPublicIPs = "MINIO_PUBLIC_IPS"
|
||||
EnvEndpoints = "MINIO_ENDPOINTS"
|
||||
EnvAccessKey = "MINIO_ACCESS_KEY"
|
||||
EnvSecretKey = "MINIO_SECRET_KEY"
|
||||
EnvBrowser = "MINIO_BROWSER"
|
||||
EnvDomain = "MINIO_DOMAIN"
|
||||
EnvRegionName = "MINIO_REGION_NAME"
|
||||
EnvPublicIPs = "MINIO_PUBLIC_IPS"
|
||||
EnvEndpoints = "MINIO_ENDPOINTS"
|
||||
|
||||
EnvUpdate = "MINIO_UPDATE"
|
||||
EnvWorm = "MINIO_WORM"
|
||||
EnvUpdate = "MINIO_UPDATE"
|
||||
EnvWormState = "MINIO_WORM_STATE"
|
||||
|
||||
EnvWorm = "MINIO_WORM" // legacy
|
||||
EnvRegion = "MINIO_REGION" // legacy
|
||||
)
|
||||
|
||||
@@ -63,13 +63,13 @@ var (
|
||||
ErrInvalidCacheExpiryValue = newErrFn(
|
||||
"Invalid cache expiry value",
|
||||
"Please check the passed value",
|
||||
"MINIO_CACHE_EXPIRY: Valid cache expiry duration is in days",
|
||||
"MINIO_CACHE_EXPIRY: Valid cache expiry duration must be in days",
|
||||
)
|
||||
|
||||
ErrInvalidCacheMaxUse = newErrFn(
|
||||
"Invalid cache max-use value",
|
||||
ErrInvalidCacheQuota = newErrFn(
|
||||
"Invalid cache quota value",
|
||||
"Please check the passed value",
|
||||
"MINIO_CACHE_MAXUSE: Valid cache max-use value between 0-100",
|
||||
"MINIO_CACHE_QUOTA: Valid cache quota value must be between 0-100",
|
||||
)
|
||||
|
||||
ErrInvalidCacheEncryptionKey = newErrFn(
|
||||
@@ -215,7 +215,7 @@ Example 1:
|
||||
ErrInvalidCompressionIncludesValue = newErrFn(
|
||||
"Invalid compression include value",
|
||||
"Please check the passed value",
|
||||
"Compress extensions/mime-types are delimited by `,`. For eg, MINIO_COMPRESS_ATTR=\"A,B,C\"",
|
||||
"Compress extensions/mime-types are delimited by `,`. For eg, MINIO_COMPRESS_MIME_TYPES=\"A,B,C\"",
|
||||
)
|
||||
|
||||
ErrInvalidGWSSEValue = newErrFn(
|
||||
|
||||
57
cmd/config/help.go
Normal file
57
cmd/config/help.go
Normal file
@@ -0,0 +1,57 @@
|
||||
/*
|
||||
* MinIO Cloud Storage, (C) 2019 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package config
|
||||
|
||||
import (
|
||||
"text/template"
|
||||
|
||||
"github.com/minio/minio/pkg/color"
|
||||
)
|
||||
|
||||
// HelpKV - implements help messages for keys
|
||||
// with value as description of the keys.
|
||||
type HelpKV map[string]string
|
||||
|
||||
// Help template used by all sub-systems
|
||||
const Help = `{{colorBlueBold "Key"}}{{"\t"}}{{colorBlueBold "Description"}}
|
||||
{{colorYellowBold "----"}}{{"\t"}}{{colorYellowBold "----"}}
|
||||
{{range $key, $value := .}}{{colorCyanBold $key}}{{ "\t" }}{{$value}}
|
||||
{{end}}`
|
||||
|
||||
var funcMap = template.FuncMap{
|
||||
"colorBlueBold": color.BlueBold,
|
||||
"colorYellowBold": color.YellowBold,
|
||||
"colorCyanBold": color.CyanBold,
|
||||
"colorGreenBold": color.GreenBold,
|
||||
}
|
||||
|
||||
// HelpTemplate - captures config help template
|
||||
var HelpTemplate = template.Must(template.New("config-help").Funcs(funcMap).Parse(Help))
|
||||
|
||||
// Region and Worm help is documented in default config
|
||||
var (
|
||||
RegionHelp = HelpKV{
|
||||
RegionName: `Region name of this deployment, eg: "us-west-2"`,
|
||||
State: "Indicates if config region is honored or ignored",
|
||||
Comment: "A comment to describe the region setting",
|
||||
}
|
||||
|
||||
WormHelp = HelpKV{
|
||||
State: `Indicates if worm is "on" or "off"`,
|
||||
Comment: "A comment to describe the worm state",
|
||||
}
|
||||
)
|
||||
@@ -24,6 +24,7 @@ import (
|
||||
"regexp"
|
||||
"time"
|
||||
|
||||
"github.com/minio/minio/cmd/config"
|
||||
"github.com/minio/minio/pkg/env"
|
||||
ldap "gopkg.in/ldap.v3"
|
||||
)
|
||||
@@ -34,16 +35,13 @@ const (
|
||||
|
||||
// Config contains AD/LDAP server connectivity information.
|
||||
type Config struct {
|
||||
IsEnabled bool `json:"enabled"`
|
||||
Enabled bool `json:"enabled"`
|
||||
|
||||
// E.g. "ldap.minio.io:636"
|
||||
ServerAddr string `json:"serverAddr"`
|
||||
|
||||
// STS credentials expiry duration
|
||||
STSExpiryDuration string `json:"stsExpiryDuration"`
|
||||
stsExpiryDuration time.Duration // contains converted value
|
||||
|
||||
RootCAs *x509.CertPool `json:"-"`
|
||||
STSExpiryDuration string `json:"stsExpiryDuration"`
|
||||
|
||||
// Format string for usernames
|
||||
UsernameFormat string `json:"usernameFormat"`
|
||||
@@ -51,6 +49,10 @@ type Config struct {
|
||||
GroupSearchBaseDN string `json:"groupSearchBaseDN"`
|
||||
GroupSearchFilter string `json:"groupSearchFilter"`
|
||||
GroupNameAttribute string `json:"groupNameAttribute"`
|
||||
|
||||
stsExpiryDuration time.Duration // contains converted value
|
||||
tlsSkipVerify bool // allows skipping TLS verification
|
||||
rootCAs *x509.CertPool
|
||||
}
|
||||
|
||||
// LDAP keys and envs.
|
||||
@@ -61,22 +63,43 @@ const (
|
||||
GroupSearchFilter = "group_search_filter"
|
||||
GroupNameAttribute = "group_name_attribute"
|
||||
GroupSearchBaseDN = "group_search_base_dn"
|
||||
TLSSkipVerify = "tls_skip_verify"
|
||||
|
||||
EnvLDAPState = "MINIO_IDENTITY_LDAP_STATE"
|
||||
EnvServerAddr = "MINIO_IDENTITY_LDAP_SERVER_ADDR"
|
||||
EnvSTSExpiry = "MINIO_IDENTITY_LDAP_STS_EXPIRY"
|
||||
EnvTLSSkipVerify = "MINIO_IDENTITY_LDAP_TLS_SKIP_VERIFY"
|
||||
EnvUsernameFormat = "MINIO_IDENTITY_LDAP_USERNAME_FORMAT"
|
||||
EnvGroupSearchFilter = "MINIO_IDENTITY_LDAP_GROUP_SEARCH_FILTER"
|
||||
EnvGroupNameAttribute = "MINIO_IDENTITY_LDAP_GROUP_NAME_ATTRIBUTE"
|
||||
EnvGroupSearchBaseDN = "MINIO_IDENTITY_LDAP_GROUP_SEARCH_BASE_DN"
|
||||
)
|
||||
|
||||
// DefaultKVS - default config for LDAP config
|
||||
var (
|
||||
DefaultKVS = config.KVS{
|
||||
config.State: config.StateOff,
|
||||
config.Comment: "This is a default LDAP configuration",
|
||||
ServerAddr: "",
|
||||
STSExpiry: "1h",
|
||||
UsernameFormat: "",
|
||||
GroupSearchFilter: "",
|
||||
GroupNameAttribute: "",
|
||||
GroupSearchBaseDN: "",
|
||||
TLSSkipVerify: config.StateOff,
|
||||
}
|
||||
)
|
||||
|
||||
// Connect connect to ldap server.
|
||||
func (l *Config) Connect() (ldapConn *ldap.Conn, err error) {
|
||||
if l == nil {
|
||||
// Happens when LDAP is not configured.
|
||||
return
|
||||
}
|
||||
return ldap.DialTLS("tcp", l.ServerAddr, &tls.Config{RootCAs: l.RootCAs})
|
||||
return ldap.DialTLS("tcp", l.ServerAddr, &tls.Config{
|
||||
InsecureSkipVerify: l.tlsSkipVerify,
|
||||
RootCAs: l.rootCAs,
|
||||
})
|
||||
}
|
||||
|
||||
// GetExpiryDuration - return parsed expiry duration.
|
||||
@@ -85,19 +108,26 @@ func (l Config) GetExpiryDuration() time.Duration {
|
||||
}
|
||||
|
||||
// Lookup - initializes LDAP config, overrides config, if any ENV values are set.
|
||||
func Lookup(cfg Config, rootCAs *x509.CertPool) (l Config, err error) {
|
||||
if cfg.ServerAddr == "" && cfg.IsEnabled {
|
||||
return l, errors.New("ldap server cannot initialize with empty LDAP server")
|
||||
func Lookup(kvs config.KVS, rootCAs *x509.CertPool) (l Config, err error) {
|
||||
l = Config{}
|
||||
if err = config.CheckValidKeys(config.IdentityLDAPSubSys, kvs, DefaultKVS); err != nil {
|
||||
return l, err
|
||||
}
|
||||
l.RootCAs = rootCAs
|
||||
ldapServer := env.Get(EnvServerAddr, cfg.ServerAddr)
|
||||
stateBool, err := config.ParseBool(env.Get(EnvLDAPState, kvs.Get(config.State)))
|
||||
if err != nil {
|
||||
return l, err
|
||||
}
|
||||
if !stateBool {
|
||||
return l, nil
|
||||
}
|
||||
ldapServer := env.Get(EnvServerAddr, kvs.Get(ServerAddr))
|
||||
if ldapServer == "" {
|
||||
return l, nil
|
||||
}
|
||||
l.IsEnabled = true
|
||||
l.Enabled = true
|
||||
l.ServerAddr = ldapServer
|
||||
l.stsExpiryDuration = defaultLDAPExpiry
|
||||
if v := env.Get(EnvSTSExpiry, cfg.STSExpiryDuration); v != "" {
|
||||
if v := env.Get(EnvSTSExpiry, kvs.Get(STSExpiry)); v != "" {
|
||||
expDur, err := time.ParseDuration(v)
|
||||
if err != nil {
|
||||
return l, errors.New("LDAP expiry time err:" + err.Error())
|
||||
@@ -108,21 +138,28 @@ func Lookup(cfg Config, rootCAs *x509.CertPool) (l Config, err error) {
|
||||
l.STSExpiryDuration = v
|
||||
l.stsExpiryDuration = expDur
|
||||
}
|
||||
|
||||
if v := env.Get(EnvUsernameFormat, cfg.UsernameFormat); v != "" {
|
||||
if v := env.Get(EnvTLSSkipVerify, kvs.Get(TLSSkipVerify)); v != "" {
|
||||
l.tlsSkipVerify, err = config.ParseBool(v)
|
||||
if err != nil {
|
||||
return l, err
|
||||
}
|
||||
}
|
||||
if v := env.Get(EnvUsernameFormat, kvs.Get(UsernameFormat)); v != "" {
|
||||
subs, err := NewSubstituter("username", "test")
|
||||
if err != nil {
|
||||
return l, err
|
||||
}
|
||||
if _, err := subs.Substitute(v); err != nil {
|
||||
return l, fmt.Errorf("Only username may be substituted in the username format: %s", err)
|
||||
return l, err
|
||||
}
|
||||
l.UsernameFormat = v
|
||||
} else {
|
||||
return l, fmt.Errorf("'%s' cannot be empty and must have a value", UsernameFormat)
|
||||
}
|
||||
|
||||
grpSearchFilter := env.Get(EnvGroupSearchFilter, cfg.GroupSearchFilter)
|
||||
grpSearchNameAttr := env.Get(EnvGroupNameAttribute, cfg.GroupNameAttribute)
|
||||
grpSearchBaseDN := env.Get(EnvGroupSearchBaseDN, cfg.GroupSearchBaseDN)
|
||||
grpSearchFilter := env.Get(EnvGroupSearchFilter, kvs.Get(GroupSearchFilter))
|
||||
grpSearchNameAttr := env.Get(EnvGroupNameAttribute, kvs.Get(GroupNameAttribute))
|
||||
grpSearchBaseDN := env.Get(EnvGroupSearchBaseDN, kvs.Get(GroupSearchBaseDN))
|
||||
|
||||
// Either all group params must be set or none must be set.
|
||||
allNotSet := grpSearchFilter == "" && grpSearchNameAttr == "" && grpSearchBaseDN == ""
|
||||
@@ -150,7 +187,9 @@ func Lookup(cfg Config, rootCAs *x509.CertPool) (l Config, err error) {
|
||||
}
|
||||
l.GroupSearchBaseDN = grpSearchBaseDN
|
||||
}
|
||||
return
|
||||
|
||||
l.rootCAs = rootCAs
|
||||
return l, nil
|
||||
}
|
||||
|
||||
// Substituter - This type is to allow restricted runtime
|
||||
@@ -180,6 +219,10 @@ func NewSubstituter(v ...string) (Substituter, error) {
|
||||
//
|
||||
// subber.Substitute("uid=${username},cn=users,dc=example,dc=com")
|
||||
//
|
||||
// or
|
||||
//
|
||||
// subber.Substitute("uid={username},cn=users,dc=example,dc=com")
|
||||
//
|
||||
// returns "uid=john,cn=users,dc=example,dc=com"
|
||||
//
|
||||
// whereas:
|
||||
@@ -189,11 +232,13 @@ func NewSubstituter(v ...string) (Substituter, error) {
|
||||
// returns an error.
|
||||
func (s *Substituter) Substitute(t string) (string, error) {
|
||||
for k, v := range s.vals {
|
||||
re := regexp.MustCompile(fmt.Sprintf(`\$\{%s\}`, k))
|
||||
t = re.ReplaceAllLiteralString(t, v)
|
||||
reDollar := regexp.MustCompile(fmt.Sprintf(`\$\{%s\}`, k))
|
||||
t = reDollar.ReplaceAllLiteralString(t, v)
|
||||
reFlower := regexp.MustCompile(fmt.Sprintf(`\{%s\}`, k))
|
||||
t = reFlower.ReplaceAllLiteralString(t, v)
|
||||
}
|
||||
// Check if all requested substitutions have been made.
|
||||
re := regexp.MustCompile(`\$\{.*\}`)
|
||||
re := regexp.MustCompile(`\{.*\}`)
|
||||
if re.MatchString(t) {
|
||||
return "", errors.New("unsupported substitution requested")
|
||||
}
|
||||
@@ -17,7 +17,6 @@
|
||||
package ldap
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
)
|
||||
|
||||
@@ -34,6 +33,29 @@ func TestSubstituter(t *testing.T) {
|
||||
SubstitutedStr: "uid=john,cn=users,dc=example,dc=com",
|
||||
ErrExpected: false,
|
||||
},
|
||||
{
|
||||
KV: []string{"username", "john"},
|
||||
SubstitutableStr: "uid={username},cn=users,dc=example,dc=com",
|
||||
SubstitutedStr: "uid=john,cn=users,dc=example,dc=com",
|
||||
ErrExpected: false,
|
||||
},
|
||||
{
|
||||
KV: []string{"username", "john"},
|
||||
SubstitutableStr: "(&(objectclass=group)(member=${username}))",
|
||||
SubstitutedStr: "(&(objectclass=group)(member=john))",
|
||||
ErrExpected: false,
|
||||
},
|
||||
{
|
||||
KV: []string{"username", "john"},
|
||||
SubstitutableStr: "(&(objectclass=group)(member={username}))",
|
||||
SubstitutedStr: "(&(objectclass=group)(member=john))",
|
||||
ErrExpected: false,
|
||||
},
|
||||
{
|
||||
KV: []string{"username", "john"},
|
||||
SubstitutableStr: "uid=${{username}},cn=users,dc=example,dc=com",
|
||||
ErrExpected: true,
|
||||
},
|
||||
{
|
||||
KV: []string{"username", "john"},
|
||||
SubstitutableStr: "uid=${usernamedn},cn=users,dc=example,dc=com",
|
||||
@@ -46,9 +68,9 @@ func TestSubstituter(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
for i, test := range tests {
|
||||
for _, test := range tests {
|
||||
test := test
|
||||
t.Run(fmt.Sprintf("Test%d", i+1), func(t *testing.T) {
|
||||
t.Run(test.SubstitutableStr, func(t *testing.T) {
|
||||
subber, err := NewSubstituter(test.KV...)
|
||||
if err != nil && !test.ErrExpected {
|
||||
t.Errorf("Unexpected failure %s", err)
|
||||
34
cmd/config/identity/ldap/help.go
Normal file
34
cmd/config/identity/ldap/help.go
Normal file
@@ -0,0 +1,34 @@
|
||||
/*
|
||||
* MinIO Cloud Storage, (C) 2019 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package ldap
|
||||
|
||||
import "github.com/minio/minio/cmd/config"
|
||||
|
||||
// Help template for Ldap identity feature.
|
||||
var (
|
||||
Help = config.HelpKV{
|
||||
ServerAddr: `(Required) AD/LDAP server address eg: "myldapserver.com:636"`,
|
||||
UsernameFormat: `(Required) AD/LDAP format of full username DN eg: "uid={username},cn=accounts,dc=myldapserver,dc=com"`,
|
||||
GroupSearchFilter: `Search filter to find groups of a user (optional) eg: "(&(objectclass=groupOfNames)(member={usernamedn}))"`,
|
||||
GroupNameAttribute: `Attribute of search results to use as group name (optional) eg: "cn"`,
|
||||
GroupSearchBaseDN: `Base DN in AD/LDAP hierarchy to use in search requests (optional) eg: "dc=myldapserver,dc=com"`,
|
||||
STSExpiry: `AD/LDAP STS credentials validity duration (optional) eg: "1h"`,
|
||||
TLSSkipVerify: "Set this to 'on', to disable client verification of server certificates",
|
||||
config.State: "(Required) Enable or disable LDAP/AD identity",
|
||||
config.Comment: "A comment to describe the LDAP/AD identity setting",
|
||||
}
|
||||
)
|
||||
38
cmd/config/identity/ldap/legacy.go
Normal file
38
cmd/config/identity/ldap/legacy.go
Normal file
@@ -0,0 +1,38 @@
|
||||
/*
|
||||
* MinIO Cloud Storage, (C) 2019 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package ldap
|
||||
|
||||
import "github.com/minio/minio/cmd/config"
|
||||
|
||||
// SetIdentityLDAP - One time migration code needed, for migrating from older config to new for LDAPConfig.
|
||||
func SetIdentityLDAP(s config.Config, ldapArgs Config) {
|
||||
s[config.IdentityLDAPSubSys][config.Default] = config.KVS{
|
||||
config.State: func() string {
|
||||
if !ldapArgs.Enabled {
|
||||
return config.StateOff
|
||||
}
|
||||
return config.StateOn
|
||||
}(),
|
||||
config.Comment: "Settings for LDAP, after migrating config",
|
||||
ServerAddr: ldapArgs.ServerAddr,
|
||||
STSExpiry: ldapArgs.STSExpiryDuration,
|
||||
UsernameFormat: ldapArgs.UsernameFormat,
|
||||
GroupSearchFilter: ldapArgs.GroupSearchFilter,
|
||||
GroupNameAttribute: ldapArgs.GroupNameAttribute,
|
||||
GroupSearchBaseDN: ldapArgs.GroupSearchBaseDN,
|
||||
}
|
||||
}
|
||||
28
cmd/config/identity/openid/help.go
Normal file
28
cmd/config/identity/openid/help.go
Normal file
@@ -0,0 +1,28 @@
|
||||
/*
|
||||
* MinIO Cloud Storage, (C) 2019 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package openid
|
||||
|
||||
import "github.com/minio/minio/cmd/config"
|
||||
|
||||
// Help template for OpenID identity feature.
|
||||
var (
|
||||
Help = config.HelpKV{
|
||||
ConfigURL: `OpenID discovery documented endpoint. eg: "https://accounts.google.com/.well-known/openid-configuration"`,
|
||||
config.State: "Indicates if OpenID identity is enabled or not",
|
||||
config.Comment: "A comment to describe the OpenID identity setting",
|
||||
}
|
||||
)
|
||||
137
cmd/config/identity/openid/jwks.go
Normal file
137
cmd/config/identity/openid/jwks.go
Normal file
@@ -0,0 +1,137 @@
|
||||
/*
|
||||
* MinIO Cloud Storage, (C) 2018 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package openid
|
||||
|
||||
import (
|
||||
"crypto"
|
||||
"crypto/ecdsa"
|
||||
"crypto/elliptic"
|
||||
"crypto/rsa"
|
||||
"encoding/base64"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// JWKS - https://tools.ietf.org/html/rfc7517
|
||||
type JWKS struct {
|
||||
Keys []*JWKS `json:"keys,omitempty"`
|
||||
|
||||
Kty string `json:"kty"`
|
||||
Use string `json:"use,omitempty"`
|
||||
Kid string `json:"kid,omitempty"`
|
||||
Alg string `json:"alg,omitempty"`
|
||||
|
||||
Crv string `json:"crv,omitempty"`
|
||||
X string `json:"x,omitempty"`
|
||||
Y string `json:"y,omitempty"`
|
||||
D string `json:"d,omitempty"`
|
||||
N string `json:"n,omitempty"`
|
||||
E string `json:"e,omitempty"`
|
||||
K string `json:"k,omitempty"`
|
||||
}
|
||||
|
||||
func safeDecode(str string) ([]byte, error) {
|
||||
lenMod4 := len(str) % 4
|
||||
if lenMod4 > 0 {
|
||||
str = str + strings.Repeat("=", 4-lenMod4)
|
||||
}
|
||||
|
||||
return base64.URLEncoding.DecodeString(str)
|
||||
}
|
||||
|
||||
var (
|
||||
errMalformedJWKRSAKey = errors.New("malformed JWK RSA key")
|
||||
errMalformedJWKECKey = errors.New("malformed JWK EC key")
|
||||
)
|
||||
|
||||
// DecodePublicKey - decodes JSON Web Key (JWK) as public key
|
||||
func (key *JWKS) DecodePublicKey() (crypto.PublicKey, error) {
|
||||
switch key.Kty {
|
||||
case "RSA":
|
||||
if key.N == "" || key.E == "" {
|
||||
return nil, errMalformedJWKRSAKey
|
||||
}
|
||||
|
||||
// decode exponent
|
||||
data, err := safeDecode(key.E)
|
||||
if err != nil {
|
||||
return nil, errMalformedJWKRSAKey
|
||||
}
|
||||
|
||||
if len(data) < 4 {
|
||||
ndata := make([]byte, 4)
|
||||
copy(ndata[4-len(data):], data)
|
||||
data = ndata
|
||||
}
|
||||
|
||||
pubKey := &rsa.PublicKey{
|
||||
N: &big.Int{},
|
||||
E: int(binary.BigEndian.Uint32(data[:])),
|
||||
}
|
||||
|
||||
data, err = safeDecode(key.N)
|
||||
if err != nil {
|
||||
return nil, errMalformedJWKRSAKey
|
||||
}
|
||||
pubKey.N.SetBytes(data)
|
||||
|
||||
return pubKey, nil
|
||||
case "EC":
|
||||
if key.Crv == "" || key.X == "" || key.Y == "" {
|
||||
return nil, errMalformedJWKECKey
|
||||
}
|
||||
|
||||
var curve elliptic.Curve
|
||||
switch key.Crv {
|
||||
case "P-224":
|
||||
curve = elliptic.P224()
|
||||
case "P-256":
|
||||
curve = elliptic.P256()
|
||||
case "P-384":
|
||||
curve = elliptic.P384()
|
||||
case "P-521":
|
||||
curve = elliptic.P521()
|
||||
default:
|
||||
return nil, fmt.Errorf("Unknown curve type: %s", key.Crv)
|
||||
}
|
||||
|
||||
pubKey := &ecdsa.PublicKey{
|
||||
Curve: curve,
|
||||
X: &big.Int{},
|
||||
Y: &big.Int{},
|
||||
}
|
||||
|
||||
data, err := safeDecode(key.X)
|
||||
if err != nil {
|
||||
return nil, errMalformedJWKECKey
|
||||
}
|
||||
pubKey.X.SetBytes(data)
|
||||
|
||||
data, err = safeDecode(key.Y)
|
||||
if err != nil {
|
||||
return nil, errMalformedJWKECKey
|
||||
}
|
||||
pubKey.Y.SetBytes(data)
|
||||
|
||||
return pubKey, nil
|
||||
default:
|
||||
return nil, fmt.Errorf("Unknown JWK key type %s", key.Kty)
|
||||
}
|
||||
}
|
||||
103
cmd/config/identity/openid/jwks_test.go
Normal file
103
cmd/config/identity/openid/jwks_test.go
Normal file
@@ -0,0 +1,103 @@
|
||||
/*
|
||||
* MinIO Cloud Storage, (C) 2018 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package openid
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto"
|
||||
"crypto/ecdsa"
|
||||
"crypto/elliptic"
|
||||
"crypto/rsa"
|
||||
"encoding/json"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// A.1 - Example public keys
|
||||
func TestPublicKey(t *testing.T) {
|
||||
const jsonkey = `{"keys":
|
||||
[
|
||||
{"kty":"EC",
|
||||
"crv":"P-256",
|
||||
"x":"MKBCTNIcKUSDii11ySs3526iDZ8AiTo7Tu6KPAqv7D4",
|
||||
"y":"4Etl6SRW2YiLUrN5vfvVHuhp7x8PxltmWWlbbM4IFyM",
|
||||
"use":"enc",
|
||||
"kid":"1"},
|
||||
|
||||
{"kty":"RSA",
|
||||
"n": "0vx7agoebGcQSuuPiLJXZptN9nndrQmbXEps2aiAFbWhM78LhWx4cbbfAAtVT86zwu1RK7aPFFxuhDR1L6tSoc_BJECPebWKRXjBZCiFV4n3oknjhMstn64tZ_2W-5JsGY4Hc5n9yBXArwl93lqt7_RN5w6Cf0h4QyQ5v-65YGjQR0_FDW2QvzqY368QQMicAtaSqzs8KJZgnYb9c7d0zgdAZHzu6qMQvRL5hajrn1n91CbOpbISD08qNLyrdkt-bFTWhAI4vMQFh6WeZu0fM4lFd2NcRwr3XPksINHaQ-G_xBniIqbw0Ls1jF44-csFCur-kEgU8awapJzKnqDKgw",
|
||||
"e":"AQAB",
|
||||
"alg":"RS256",
|
||||
"kid":"2011-04-29"}
|
||||
]
|
||||
}`
|
||||
|
||||
var jk JWKS
|
||||
if err := json.Unmarshal([]byte(jsonkey), &jk); err != nil {
|
||||
t.Fatal("Unmarshal: ", err)
|
||||
} else if len(jk.Keys) != 2 {
|
||||
t.Fatalf("Expected 2 keys, got %d", len(jk.Keys))
|
||||
}
|
||||
|
||||
keys := make([]crypto.PublicKey, len(jk.Keys))
|
||||
for ii, jks := range jk.Keys {
|
||||
var err error
|
||||
keys[ii], err = jks.DecodePublicKey()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to decode key %d: %v", ii, err)
|
||||
}
|
||||
}
|
||||
|
||||
if key0, ok := keys[0].(*ecdsa.PublicKey); !ok {
|
||||
t.Fatalf("Expected ECDSA key[0], got %T", keys[0])
|
||||
} else if key1, ok := keys[1].(*rsa.PublicKey); !ok {
|
||||
t.Fatalf("Expected RSA key[1], got %T", keys[1])
|
||||
} else if key0.Curve != elliptic.P256() {
|
||||
t.Fatal("Key[0] is not using P-256 curve")
|
||||
} else if !bytes.Equal(key0.X.Bytes(), []byte{0x30, 0xa0, 0x42, 0x4c, 0xd2,
|
||||
0x1c, 0x29, 0x44, 0x83, 0x8a, 0x2d, 0x75, 0xc9, 0x2b, 0x37, 0xe7, 0x6e, 0xa2,
|
||||
0xd, 0x9f, 0x0, 0x89, 0x3a, 0x3b, 0x4e, 0xee, 0x8a, 0x3c, 0xa, 0xaf, 0xec, 0x3e}) {
|
||||
t.Fatalf("Bad key[0].X, got %v", key0.X.Bytes())
|
||||
} else if !bytes.Equal(key0.Y.Bytes(), []byte{0xe0, 0x4b, 0x65, 0xe9, 0x24,
|
||||
0x56, 0xd9, 0x88, 0x8b, 0x52, 0xb3, 0x79, 0xbd, 0xfb, 0xd5, 0x1e, 0xe8,
|
||||
0x69, 0xef, 0x1f, 0xf, 0xc6, 0x5b, 0x66, 0x59, 0x69, 0x5b, 0x6c, 0xce,
|
||||
0x8, 0x17, 0x23}) {
|
||||
t.Fatalf("Bad key[0].Y, got %v", key0.Y.Bytes())
|
||||
} else if key1.E != 0x10001 {
|
||||
t.Fatalf("Bad key[1].E: %d", key1.E)
|
||||
} else if !bytes.Equal(key1.N.Bytes(), []byte{0xd2, 0xfc, 0x7b, 0x6a, 0xa, 0x1e,
|
||||
0x6c, 0x67, 0x10, 0x4a, 0xeb, 0x8f, 0x88, 0xb2, 0x57, 0x66, 0x9b, 0x4d, 0xf6,
|
||||
0x79, 0xdd, 0xad, 0x9, 0x9b, 0x5c, 0x4a, 0x6c, 0xd9, 0xa8, 0x80, 0x15, 0xb5,
|
||||
0xa1, 0x33, 0xbf, 0xb, 0x85, 0x6c, 0x78, 0x71, 0xb6, 0xdf, 0x0, 0xb, 0x55,
|
||||
0x4f, 0xce, 0xb3, 0xc2, 0xed, 0x51, 0x2b, 0xb6, 0x8f, 0x14, 0x5c, 0x6e, 0x84,
|
||||
0x34, 0x75, 0x2f, 0xab, 0x52, 0xa1, 0xcf, 0xc1, 0x24, 0x40, 0x8f, 0x79, 0xb5,
|
||||
0x8a, 0x45, 0x78, 0xc1, 0x64, 0x28, 0x85, 0x57, 0x89, 0xf7, 0xa2, 0x49, 0xe3,
|
||||
0x84, 0xcb, 0x2d, 0x9f, 0xae, 0x2d, 0x67, 0xfd, 0x96, 0xfb, 0x92, 0x6c, 0x19,
|
||||
0x8e, 0x7, 0x73, 0x99, 0xfd, 0xc8, 0x15, 0xc0, 0xaf, 0x9, 0x7d, 0xde, 0x5a,
|
||||
0xad, 0xef, 0xf4, 0x4d, 0xe7, 0xe, 0x82, 0x7f, 0x48, 0x78, 0x43, 0x24, 0x39,
|
||||
0xbf, 0xee, 0xb9, 0x60, 0x68, 0xd0, 0x47, 0x4f, 0xc5, 0xd, 0x6d, 0x90, 0xbf,
|
||||
0x3a, 0x98, 0xdf, 0xaf, 0x10, 0x40, 0xc8, 0x9c, 0x2, 0xd6, 0x92, 0xab, 0x3b,
|
||||
0x3c, 0x28, 0x96, 0x60, 0x9d, 0x86, 0xfd, 0x73, 0xb7, 0x74, 0xce, 0x7, 0x40,
|
||||
0x64, 0x7c, 0xee, 0xea, 0xa3, 0x10, 0xbd, 0x12, 0xf9, 0x85, 0xa8, 0xeb, 0x9f,
|
||||
0x59, 0xfd, 0xd4, 0x26, 0xce, 0xa5, 0xb2, 0x12, 0xf, 0x4f, 0x2a, 0x34, 0xbc,
|
||||
0xab, 0x76, 0x4b, 0x7e, 0x6c, 0x54, 0xd6, 0x84, 0x2, 0x38, 0xbc, 0xc4, 0x5, 0x87,
|
||||
0xa5, 0x9e, 0x66, 0xed, 0x1f, 0x33, 0x89, 0x45, 0x77, 0x63, 0x5c, 0x47, 0xa,
|
||||
0xf7, 0x5c, 0xf9, 0x2c, 0x20, 0xd1, 0xda, 0x43, 0xe1, 0xbf, 0xc4, 0x19, 0xe2,
|
||||
0x22, 0xa6, 0xf0, 0xd0, 0xbb, 0x35, 0x8c, 0x5e, 0x38, 0xf9, 0xcb, 0x5, 0xa, 0xea,
|
||||
0xfe, 0x90, 0x48, 0x14, 0xf1, 0xac, 0x1a, 0xa4, 0x9c, 0xca, 0x9e, 0xa0, 0xca, 0x83}) {
|
||||
t.Fatalf("Bad key[1].N, got %v", key1.N.Bytes())
|
||||
}
|
||||
}
|
||||
324
cmd/config/identity/openid/jwt.go
Normal file
324
cmd/config/identity/openid/jwt.go
Normal file
@@ -0,0 +1,324 @@
|
||||
/*
|
||||
* MinIO Cloud Storage, (C) 2018-2019 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package openid
|
||||
|
||||
import (
|
||||
"crypto"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
jwtgo "github.com/dgrijalva/jwt-go"
|
||||
"github.com/minio/minio/cmd/config"
|
||||
"github.com/minio/minio/pkg/env"
|
||||
xnet "github.com/minio/minio/pkg/net"
|
||||
)
|
||||
|
||||
// Config - OpenID Config
|
||||
// RSA authentication target arguments
|
||||
type Config struct {
|
||||
JWKS struct {
|
||||
URL *xnet.URL `json:"url"`
|
||||
} `json:"jwks"`
|
||||
URL *xnet.URL `json:"url,omitempty"`
|
||||
ClaimPrefix string `json:"claimPrefix,omitempty"`
|
||||
DiscoveryDoc DiscoveryDoc
|
||||
publicKeys map[string]crypto.PublicKey
|
||||
transport *http.Transport
|
||||
closeRespFn func(io.ReadCloser)
|
||||
}
|
||||
|
||||
// PopulatePublicKey - populates a new publickey from the JWKS URL.
|
||||
func (r *Config) PopulatePublicKey() error {
|
||||
if r.JWKS.URL == nil || r.JWKS.URL.String() == "" {
|
||||
return nil
|
||||
}
|
||||
transport := http.DefaultTransport
|
||||
if r.transport != nil {
|
||||
transport = r.transport
|
||||
}
|
||||
client := &http.Client{
|
||||
Transport: transport,
|
||||
}
|
||||
resp, err := client.Get(r.JWKS.URL.String())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer r.closeRespFn(resp.Body)
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return errors.New(resp.Status)
|
||||
}
|
||||
|
||||
var jwk JWKS
|
||||
if err = json.NewDecoder(resp.Body).Decode(&jwk); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, key := range jwk.Keys {
|
||||
r.publicKeys[key.Kid], err = key.DecodePublicKey()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// UnmarshalJSON - decodes JSON data.
|
||||
func (r *Config) UnmarshalJSON(data []byte) error {
|
||||
// subtype to avoid recursive call to UnmarshalJSON()
|
||||
type subConfig Config
|
||||
var sr subConfig
|
||||
|
||||
if err := json.Unmarshal(data, &sr); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ar := Config(sr)
|
||||
if ar.JWKS.URL == nil || ar.JWKS.URL.String() == "" {
|
||||
*r = ar
|
||||
return nil
|
||||
}
|
||||
|
||||
*r = ar
|
||||
return nil
|
||||
}
|
||||
|
||||
// JWT - rs client grants provider details.
|
||||
type JWT struct {
|
||||
Config
|
||||
}
|
||||
|
||||
func expToInt64(expI interface{}) (expAt int64, err error) {
|
||||
switch exp := expI.(type) {
|
||||
case float64:
|
||||
expAt = int64(exp)
|
||||
case int64:
|
||||
expAt = exp
|
||||
case json.Number:
|
||||
expAt, err = exp.Int64()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
default:
|
||||
return 0, ErrInvalidDuration
|
||||
}
|
||||
return expAt, nil
|
||||
}
|
||||
|
||||
// GetDefaultExpiration - returns the expiration seconds expected.
|
||||
func GetDefaultExpiration(dsecs string) (time.Duration, error) {
|
||||
defaultExpiryDuration := time.Duration(60) * time.Minute // Defaults to 1hr.
|
||||
if dsecs != "" {
|
||||
expirySecs, err := strconv.ParseInt(dsecs, 10, 64)
|
||||
if err != nil {
|
||||
return 0, ErrInvalidDuration
|
||||
}
|
||||
// The duration, in seconds, of the role session.
|
||||
// The value can range from 900 seconds (15 minutes)
|
||||
// to 12 hours.
|
||||
if expirySecs < 900 || expirySecs > 43200 {
|
||||
return 0, ErrInvalidDuration
|
||||
}
|
||||
|
||||
defaultExpiryDuration = time.Duration(expirySecs) * time.Second
|
||||
}
|
||||
return defaultExpiryDuration, nil
|
||||
}
|
||||
|
||||
// Validate - validates the access token.
|
||||
func (p *JWT) Validate(token, dsecs string) (map[string]interface{}, error) {
|
||||
jp := new(jwtgo.Parser)
|
||||
jp.ValidMethods = []string{"RS256", "RS384", "RS512", "ES256", "ES384", "ES512"}
|
||||
|
||||
keyFuncCallback := func(jwtToken *jwtgo.Token) (interface{}, error) {
|
||||
kid, ok := jwtToken.Header["kid"].(string)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("Invalid kid value %v", jwtToken.Header["kid"])
|
||||
}
|
||||
return p.publicKeys[kid], nil
|
||||
}
|
||||
|
||||
var claims jwtgo.MapClaims
|
||||
jwtToken, err := jp.ParseWithClaims(token, &claims, keyFuncCallback)
|
||||
if err != nil {
|
||||
if err = p.PopulatePublicKey(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
jwtToken, err = jwtgo.ParseWithClaims(token, &claims, keyFuncCallback)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if !jwtToken.Valid {
|
||||
return nil, ErrTokenExpired
|
||||
}
|
||||
|
||||
expAt, err := expToInt64(claims["exp"])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
defaultExpiryDuration, err := GetDefaultExpiration(dsecs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if time.Unix(expAt, 0).UTC().Sub(time.Now().UTC()) < defaultExpiryDuration {
|
||||
defaultExpiryDuration = time.Unix(expAt, 0).UTC().Sub(time.Now().UTC())
|
||||
}
|
||||
|
||||
expiry := time.Now().UTC().Add(defaultExpiryDuration).Unix()
|
||||
if expAt < expiry {
|
||||
claims["exp"] = strconv.FormatInt(expAt, 64)
|
||||
}
|
||||
|
||||
return claims, nil
|
||||
|
||||
}
|
||||
|
||||
// ID returns the provider name and authentication type.
|
||||
func (p *JWT) ID() ID {
|
||||
return "jwt"
|
||||
}
|
||||
|
||||
// OpenID keys and envs.
|
||||
const (
|
||||
JwksURL = "jwks_url"
|
||||
ConfigURL = "config_url"
|
||||
ClaimPrefix = "claim_prefix"
|
||||
|
||||
EnvIdentityOpenIDJWKSURL = "MINIO_IDENTITY_OPENID_JWKS_URL"
|
||||
EnvIdentityOpenIDURL = "MINIO_IDENTITY_OPENID_CONFIG_URL"
|
||||
EnvIdentityOpenIDClaimPrefix = "MINIO_IDENTITY_OPENID_CLAIM_PREFIX"
|
||||
)
|
||||
|
||||
// DiscoveryDoc - parses the output from openid-configuration
|
||||
// for example https://accounts.google.com/.well-known/openid-configuration
|
||||
type DiscoveryDoc struct {
|
||||
Issuer string `json:"issuer,omitempty"`
|
||||
AuthEndpoint string `json:"authorization_endpoint,omitempty"`
|
||||
TokenEndpoint string `json:"token_endpoint,omitempty"`
|
||||
UserInfoEndpoint string `json:"userinfo_endpoint,omitempty"`
|
||||
RevocationEndpoint string `json:"revocation_endpoint,omitempty"`
|
||||
JwksURI string `json:"jwks_uri,omitempty"`
|
||||
ResponseTypesSupported []string `json:"response_types_supported,omitempty"`
|
||||
SubjectTypesSupported []string `json:"subject_types_supported,omitempty"`
|
||||
IDTokenSigningAlgValuesSupported []string `json:"id_token_signing_alg_values_supported,omitempty"`
|
||||
ScopesSupported []string `json:"scopes_supported,omitempty"`
|
||||
TokenEndpointAuthMethods []string `json:"token_endpoint_auth_methods_supported,omitempty"`
|
||||
ClaimsSupported []string `json:"claims_supported,omitempty"`
|
||||
CodeChallengeMethodsSupported []string `json:"code_challenge_methods_supported,omitempty"`
|
||||
}
|
||||
|
||||
func parseDiscoveryDoc(u *xnet.URL, transport *http.Transport, closeRespFn func(io.ReadCloser)) (DiscoveryDoc, error) {
|
||||
d := DiscoveryDoc{}
|
||||
req, err := http.NewRequest(http.MethodGet, u.String(), nil)
|
||||
if err != nil {
|
||||
return d, err
|
||||
}
|
||||
clnt := http.Client{
|
||||
Transport: transport,
|
||||
}
|
||||
resp, err := clnt.Do(req)
|
||||
if err != nil {
|
||||
return d, err
|
||||
}
|
||||
defer closeRespFn(resp.Body)
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return d, err
|
||||
}
|
||||
dec := json.NewDecoder(resp.Body)
|
||||
if err = dec.Decode(&d); err != nil {
|
||||
return d, err
|
||||
}
|
||||
return d, nil
|
||||
}
|
||||
|
||||
// DefaultKVS - default config for OpenID config
|
||||
var (
|
||||
DefaultKVS = config.KVS{
|
||||
config.State: config.StateOff,
|
||||
config.Comment: "This is a default OpenID configuration",
|
||||
JwksURL: "",
|
||||
ConfigURL: "",
|
||||
ClaimPrefix: "",
|
||||
}
|
||||
)
|
||||
|
||||
// LookupConfig lookup jwks from config, override with any ENVs.
|
||||
func LookupConfig(kv config.KVS, transport *http.Transport, closeRespFn func(io.ReadCloser)) (c Config, err error) {
|
||||
if err = config.CheckValidKeys(config.IdentityOpenIDSubSys, kv, DefaultKVS); err != nil {
|
||||
return c, err
|
||||
}
|
||||
|
||||
stateBool, err := config.ParseBool(kv.Get(config.State))
|
||||
if err != nil {
|
||||
return c, err
|
||||
}
|
||||
if !stateBool {
|
||||
return c, nil
|
||||
}
|
||||
|
||||
c = Config{
|
||||
ClaimPrefix: env.Get(EnvIdentityOpenIDClaimPrefix, kv.Get(ClaimPrefix)),
|
||||
publicKeys: make(map[string]crypto.PublicKey),
|
||||
transport: transport,
|
||||
closeRespFn: closeRespFn,
|
||||
}
|
||||
|
||||
jwksURL := env.Get(EnvIamJwksURL, "") // Legacy
|
||||
if jwksURL == "" {
|
||||
jwksURL = env.Get(EnvIdentityOpenIDJWKSURL, kv.Get(JwksURL))
|
||||
}
|
||||
|
||||
configURL := env.Get(EnvIdentityOpenIDURL, kv.Get(ConfigURL))
|
||||
if configURL != "" {
|
||||
c.URL, err = xnet.ParseURL(configURL)
|
||||
if err != nil {
|
||||
return c, err
|
||||
}
|
||||
c.DiscoveryDoc, err = parseDiscoveryDoc(c.URL, transport, closeRespFn)
|
||||
if err != nil {
|
||||
return c, err
|
||||
}
|
||||
}
|
||||
if jwksURL == "" {
|
||||
// Fallback to discovery document jwksURL
|
||||
jwksURL = c.DiscoveryDoc.JwksURI
|
||||
}
|
||||
if jwksURL != "" {
|
||||
c.JWKS.URL, err = xnet.ParseURL(jwksURL)
|
||||
if err != nil {
|
||||
return c, err
|
||||
}
|
||||
if err = c.PopulatePublicKey(); err != nil {
|
||||
return c, err
|
||||
}
|
||||
}
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// NewJWT - initialize new jwt authenticator.
|
||||
func NewJWT(c Config) *JWT {
|
||||
return &JWT{c}
|
||||
}
|
||||
120
cmd/config/identity/openid/jwt_test.go
Normal file
120
cmd/config/identity/openid/jwt_test.go
Normal file
@@ -0,0 +1,120 @@
|
||||
/*
|
||||
* MinIO Cloud Storage, (C) 2018 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package openid
|
||||
|
||||
import (
|
||||
"crypto"
|
||||
"encoding/json"
|
||||
"net/url"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
xnet "github.com/minio/minio/pkg/net"
|
||||
)
|
||||
|
||||
func TestJWT(t *testing.T) {
|
||||
const jsonkey = `{"keys":
|
||||
[
|
||||
{"kty":"RSA",
|
||||
"n": "0vx7agoebGcQSuuPiLJXZptN9nndrQmbXEps2aiAFbWhM78LhWx4cbbfAAtVT86zwu1RK7aPFFxuhDR1L6tSoc_BJECPebWKRXjBZCiFV4n3oknjhMstn64tZ_2W-5JsGY4Hc5n9yBXArwl93lqt7_RN5w6Cf0h4QyQ5v-65YGjQR0_FDW2QvzqY368QQMicAtaSqzs8KJZgnYb9c7d0zgdAZHzu6qMQvRL5hajrn1n91CbOpbISD08qNLyrdkt-bFTWhAI4vMQFh6WeZu0fM4lFd2NcRwr3XPksINHaQ-G_xBniIqbw0Ls1jF44-csFCur-kEgU8awapJzKnqDKgw",
|
||||
"e":"AQAB",
|
||||
"alg":"RS256",
|
||||
"kid":"2011-04-29"}
|
||||
]
|
||||
}`
|
||||
|
||||
var jk JWKS
|
||||
if err := json.Unmarshal([]byte(jsonkey), &jk); err != nil {
|
||||
t.Fatal("Unmarshal: ", err)
|
||||
} else if len(jk.Keys) != 1 {
|
||||
t.Fatalf("Expected 1 keys, got %d", len(jk.Keys))
|
||||
}
|
||||
|
||||
keys := make(map[string]crypto.PublicKey, len(jk.Keys))
|
||||
for ii, jks := range jk.Keys {
|
||||
var err error
|
||||
keys[jks.Kid], err = jks.DecodePublicKey()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to decode key %d: %v", ii, err)
|
||||
}
|
||||
}
|
||||
|
||||
u1, err := xnet.ParseURL("http://localhost:8443")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
cfg := Config{}
|
||||
cfg.JWKS.URL = u1
|
||||
cfg.publicKeys = keys
|
||||
jwt := NewJWT(cfg)
|
||||
if jwt.ID() != "jwt" {
|
||||
t.Fatalf("Uexpected id %s for the validator", jwt.ID())
|
||||
}
|
||||
|
||||
u, err := url.Parse("http://localhost:8443/?Token=invalid")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, err := jwt.Validate(u.Query().Get("Token"), ""); err == nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDefaultExpiryDuration(t *testing.T) {
|
||||
testCases := []struct {
|
||||
reqURL string
|
||||
duration time.Duration
|
||||
expectErr bool
|
||||
}{
|
||||
{
|
||||
reqURL: "http://localhost:8443/?Token=xxxxx",
|
||||
duration: time.Duration(60) * time.Minute,
|
||||
},
|
||||
{
|
||||
reqURL: "http://localhost:8443/?DurationSeconds=9s",
|
||||
expectErr: true,
|
||||
},
|
||||
{
|
||||
reqURL: "http://localhost:8443/?DurationSeconds=43201",
|
||||
expectErr: true,
|
||||
},
|
||||
{
|
||||
reqURL: "http://localhost:8443/?DurationSeconds=800",
|
||||
expectErr: true,
|
||||
},
|
||||
{
|
||||
reqURL: "http://localhost:8443/?DurationSeconds=901",
|
||||
duration: time.Duration(901) * time.Second,
|
||||
},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
u, err := url.Parse(testCase.reqURL)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
d, err := GetDefaultExpiration(u.Query().Get("DurationSeconds"))
|
||||
gotErr := (err != nil)
|
||||
if testCase.expectErr != gotErr {
|
||||
t.Errorf("Test %d: Expected %v, got %v with error %s", i+1, testCase.expectErr, gotErr, err)
|
||||
}
|
||||
if d != testCase.duration {
|
||||
t.Errorf("Test %d: Expected duration %d, got %d", i+1, testCase.duration, d)
|
||||
}
|
||||
}
|
||||
}
|
||||
48
cmd/config/identity/openid/legacy.go
Normal file
48
cmd/config/identity/openid/legacy.go
Normal file
@@ -0,0 +1,48 @@
|
||||
/*
|
||||
* MinIO Cloud Storage, (C) 2019 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package openid
|
||||
|
||||
import "github.com/minio/minio/cmd/config"
|
||||
|
||||
// Legacy envs
|
||||
const (
|
||||
EnvIamJwksURL = "MINIO_IAM_JWKS_URL"
|
||||
)
|
||||
|
||||
// SetIdentityOpenID - One time migration code needed, for migrating from older config to new for OpenIDConfig.
|
||||
func SetIdentityOpenID(s config.Config, cfg Config) {
|
||||
s[config.IdentityOpenIDSubSys][config.Default] = config.KVS{
|
||||
config.State: func() string {
|
||||
if cfg.JWKS.URL == nil {
|
||||
return config.StateOff
|
||||
}
|
||||
if cfg.JWKS.URL.String() == "" {
|
||||
return config.StateOff
|
||||
}
|
||||
return config.StateOn
|
||||
}(),
|
||||
config.Comment: "Settings for OpenID, after migrating config",
|
||||
JwksURL: func() string {
|
||||
if cfg.JWKS.URL != nil {
|
||||
return cfg.JWKS.URL.String()
|
||||
}
|
||||
return ""
|
||||
}(),
|
||||
ConfigURL: "",
|
||||
ClaimPrefix: "",
|
||||
}
|
||||
}
|
||||
92
cmd/config/identity/openid/validators.go
Normal file
92
cmd/config/identity/openid/validators.go
Normal file
@@ -0,0 +1,92 @@
|
||||
/*
|
||||
* MinIO Cloud Storage, (C) 2018-2019 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package openid
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// ID - holds identification name authentication validator target.
|
||||
type ID string
|
||||
|
||||
// Validator interface describes basic implementation
|
||||
// requirements of various authentication providers.
|
||||
type Validator interface {
|
||||
// Validate is a custom validator function for this provider,
|
||||
// each validation is authenticationType or provider specific.
|
||||
Validate(token string, duration string) (map[string]interface{}, error)
|
||||
|
||||
// ID returns provider name of this provider.
|
||||
ID() ID
|
||||
}
|
||||
|
||||
// ErrTokenExpired - error token expired
|
||||
var (
|
||||
ErrTokenExpired = errors.New("token expired")
|
||||
ErrInvalidDuration = errors.New("duration higher than token expiry")
|
||||
)
|
||||
|
||||
// Validators - holds list of providers indexed by provider id.
|
||||
type Validators struct {
|
||||
sync.RWMutex
|
||||
providers map[ID]Validator
|
||||
}
|
||||
|
||||
// Add - adds unique provider to provider list.
|
||||
func (list *Validators) Add(provider Validator) error {
|
||||
list.Lock()
|
||||
defer list.Unlock()
|
||||
|
||||
if _, ok := list.providers[provider.ID()]; ok {
|
||||
return fmt.Errorf("provider %v already exists", provider.ID())
|
||||
}
|
||||
|
||||
list.providers[provider.ID()] = provider
|
||||
return nil
|
||||
}
|
||||
|
||||
// List - returns available provider IDs.
|
||||
func (list *Validators) List() []ID {
|
||||
list.RLock()
|
||||
defer list.RUnlock()
|
||||
|
||||
keys := []ID{}
|
||||
for k := range list.providers {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
|
||||
return keys
|
||||
}
|
||||
|
||||
// Get - returns the provider for the given providerID, if not found
|
||||
// returns an error.
|
||||
func (list *Validators) Get(id ID) (p Validator, err error) {
|
||||
list.RLock()
|
||||
defer list.RUnlock()
|
||||
var ok bool
|
||||
if p, ok = list.providers[id]; !ok {
|
||||
return nil, fmt.Errorf("provider %v doesn't exist", id)
|
||||
}
|
||||
return p, nil
|
||||
}
|
||||
|
||||
// NewValidators - creates Validators.
|
||||
func NewValidators() *Validators {
|
||||
return &Validators{providers: make(map[ID]Validator)}
|
||||
}
|
||||
104
cmd/config/identity/openid/validators_test.go
Normal file
104
cmd/config/identity/openid/validators_test.go
Normal file
@@ -0,0 +1,104 @@
|
||||
/*
|
||||
* MinIO Cloud Storage, (C) 2018 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package openid
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
|
||||
xnet "github.com/minio/minio/pkg/net"
|
||||
)
|
||||
|
||||
type errorValidator struct{}
|
||||
|
||||
func (e errorValidator) Validate(token, dsecs string) (map[string]interface{}, error) {
|
||||
return nil, ErrTokenExpired
|
||||
}
|
||||
|
||||
func (e errorValidator) ID() ID {
|
||||
return "err"
|
||||
}
|
||||
|
||||
func TestValidators(t *testing.T) {
|
||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("content-type", "application/json")
|
||||
w.Write([]byte(`{
|
||||
"keys" : [ {
|
||||
"kty" : "RSA",
|
||||
"kid" : "1438289820780",
|
||||
"use" : "sig",
|
||||
"alg" : "RS256",
|
||||
"n" : "idWPro_QiAFOdMsJD163lcDIPogOwXogRo3Pct2MMyeE2GAGqV20Sc8QUbuLDfPl-7Hi9IfFOz--JY6QL5l92eV-GJXkTmidUEooZxIZSp3ghRxLCqlyHeF5LuuM5LPRFDeF4YWFQT_D2eNo_w95g6qYSeOwOwGIfaHa2RMPcQAiM6LX4ot-Z7Po9z0_3ztFa02m3xejEFr2rLRqhFl3FZJaNnwTUk6an6XYsunxMk3Ya3lRaKJReeXeFtfTpShgtPiAl7lIfLJH9h26h2OAlww531DpxHSm1gKXn6bjB0NTC55vJKft4wXoc_0xKZhnWmjQE8d9xE8e1Z3Ll1LYbw",
|
||||
"e" : "AQAB"
|
||||
}, {
|
||||
"kty" : "RSA",
|
||||
"kid" : "1438289856256",
|
||||
"use" : "sig",
|
||||
"alg" : "RS256",
|
||||
"n" : "zo5cKcbFECeiH8eGx2D-DsFSpjSKbTVlXD6uL5JAy9rYIv7eYEP6vrKeX-x1z70yEdvgk9xbf9alc8siDfAz3rLCknqlqL7XGVAQL0ZP63UceDmD60LHOzMrx4eR6p49B3rxFfjvX2SWSV3-1H6XNyLk_ALbG6bGCFGuWBQzPJB4LMKCrOFq-6jtRKOKWBXYgkYkaYs5dG-3e2ULbq-y2RdgxYh464y_-MuxDQfvUgP787XKfcXP_XjJZvyuOEANjVyJYZSOyhHUlSGJapQ8ztHdF-swsnf7YkePJ2eR9fynWV2ZoMaXOdidgZtGTa4R1Z4BgH2C0hKJiqRy9fB7Gw",
|
||||
"e" : "AQAB"
|
||||
} ]
|
||||
}
|
||||
`))
|
||||
w.(http.Flusher).Flush()
|
||||
}))
|
||||
defer ts.Close()
|
||||
|
||||
vrs := NewValidators()
|
||||
|
||||
if err := vrs.Add(&errorValidator{}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := vrs.Add(&errorValidator{}); err == nil {
|
||||
t.Fatal("Unexpected should return error for double inserts")
|
||||
}
|
||||
|
||||
if _, err := vrs.Get("unknown"); err == nil {
|
||||
t.Fatal("Unexpected should return error for unknown validators")
|
||||
}
|
||||
|
||||
v, err := vrs.Get("err")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if _, err = v.Validate("", ""); err != ErrTokenExpired {
|
||||
t.Fatalf("Expected error %s, got %s", ErrTokenExpired, err)
|
||||
}
|
||||
|
||||
vids := vrs.List()
|
||||
if len(vids) == 0 || len(vids) > 1 {
|
||||
t.Fatalf("Unexpected number of vids %v", vids)
|
||||
}
|
||||
|
||||
u, err := xnet.ParseURL(ts.URL)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
cfg := Config{}
|
||||
cfg.JWKS.URL = u
|
||||
if err = vrs.Add(NewJWT(cfg)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if _, err = vrs.Get("jwt"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
55
cmd/config/legacy.go
Normal file
55
cmd/config/legacy.go
Normal file
@@ -0,0 +1,55 @@
|
||||
/*
|
||||
* MinIO Cloud Storage, (C) 2019 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package config
|
||||
|
||||
import "github.com/minio/minio/pkg/auth"
|
||||
|
||||
//// One time migration code section
|
||||
|
||||
// SetCredentials - One time migration code needed, for migrating from older config to new for server credentials.
|
||||
func SetCredentials(c Config, cred auth.Credentials) {
|
||||
c[CredentialsSubSys][Default] = KVS{
|
||||
State: StateOn,
|
||||
Comment: "Settings for credentials, after migrating config",
|
||||
AccessKey: cred.AccessKey,
|
||||
SecretKey: cred.SecretKey,
|
||||
}
|
||||
}
|
||||
|
||||
// SetRegion - One time migration code needed, for migrating from older config to new for server Region.
|
||||
func SetRegion(c Config, name string) {
|
||||
c[RegionSubSys][Default] = KVS{
|
||||
RegionName: name,
|
||||
State: StateOn,
|
||||
Comment: "Settings for Region, after migrating config",
|
||||
}
|
||||
}
|
||||
|
||||
// SetWorm - One time migration code needed, for migrating from older config to new for Worm mode.
|
||||
func SetWorm(c Config, b bool) {
|
||||
// Set the new value.
|
||||
c[WormSubSys][Default] = KVS{
|
||||
State: func() string {
|
||||
if b {
|
||||
return StateOn
|
||||
}
|
||||
return StateOff
|
||||
}(),
|
||||
Comment: "Settings for WORM, after migrating config",
|
||||
}
|
||||
}
|
||||
@@ -16,7 +16,9 @@
|
||||
|
||||
package notify
|
||||
|
||||
import "github.com/minio/minio/pkg/event/target"
|
||||
import (
|
||||
"github.com/minio/minio/pkg/event/target"
|
||||
)
|
||||
|
||||
// Config - notification target configuration structure, holds
|
||||
// information about various notification targets.
|
||||
|
||||
160
cmd/config/notify/help.go
Normal file
160
cmd/config/notify/help.go
Normal file
@@ -0,0 +1,160 @@
|
||||
/*
|
||||
* MinIO Cloud Storage, (C) 2019 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package notify
|
||||
|
||||
import (
|
||||
"github.com/minio/minio/cmd/config"
|
||||
"github.com/minio/minio/pkg/event/target"
|
||||
)
|
||||
|
||||
// Help template inputs for all notification targets
|
||||
var (
|
||||
HelpAMQP = config.HelpKV{
|
||||
config.State: "(Required) Is this server endpoint configuration active/enabled",
|
||||
config.Comment: "A comment to describe the AMQP target setting",
|
||||
target.AmqpURL: "(Required) AMQP server endpoint, e.g. `amqp://myuser:mypassword@localhost:5672`",
|
||||
target.AmqpExchange: "Name of the AMQP exchange",
|
||||
target.AmqpExchangeType: "Kind of AMQP exchange type",
|
||||
target.AmqpRoutingKey: "Routing key for publishing",
|
||||
target.AmqpMandatory: "Set this to 'on' for server to return an unroutable message with a Return method. If this flag is 'off', the server silently drops the message",
|
||||
target.AmqpDurable: "Set this to 'on' for queue to surive broker restarts",
|
||||
target.AmqpNoWait: "When no_wait is 'on', declare without waiting for a confirmation from the server",
|
||||
target.AmqpInternal: "Set this to 'on' for exchange to be not used directly by publishers, but only when bound to other exchanges",
|
||||
target.AmqpAutoDeleted: "Set this to 'on' for queue that has had at least one consumer is deleted when last consumer unsubscribes",
|
||||
target.AmqpDeliveryMode: "Delivery queue implementation use non-persistent (1) or persistent (2)",
|
||||
target.AmqpQueueLimit: "Enable persistent event store queue limit, defaults to '10000'",
|
||||
target.AmqpQueueDir: "Local directory where events are stored eg: '/home/events'",
|
||||
}
|
||||
|
||||
HelpKafka = config.HelpKV{
|
||||
config.State: "(Required) Is this server endpoint configuration active/enabled",
|
||||
config.Comment: "A comment to describe the Kafka target setting",
|
||||
target.KafkaTopic: "The Kafka topic for a given message",
|
||||
target.KafkaBrokers: "Command separated list of Kafka broker addresses",
|
||||
target.KafkaSASLUsername: "Username for SASL/PLAIN or SASL/SCRAM authentication",
|
||||
target.KafkaSASLPassword: "Password for SASL/PLAIN or SASL/SCRAM authentication",
|
||||
target.KafkaTLSClientAuth: "ClientAuth determines the Kafka server's policy for TLS client auth",
|
||||
target.KafkaSASLEnable: "Set this to 'on' to enable SASL authentication",
|
||||
target.KafkaTLSEnable: "Set this to 'on' to enable TLS",
|
||||
target.KafkaTLSSkipVerify: "Set this to 'on' to disable client verification of server certificate chain",
|
||||
target.KafkaQueueLimit: "Enable persistent event store queue limit, defaults to '10000'",
|
||||
target.KafkaQueueDir: "Local directory where events are stored eg: '/home/events'",
|
||||
}
|
||||
|
||||
HelpMQTT = config.HelpKV{
|
||||
config.State: "(Required) Is this server endpoint configuration active/enabled",
|
||||
config.Comment: "A comment to describe the MQTT target setting",
|
||||
target.MqttBroker: "(Required) MQTT server endpoint, e.g. `tcp://localhost:1883`",
|
||||
target.MqttTopic: "(Required) Name of the MQTT topic to publish on, e.g. `minio`",
|
||||
target.MqttUsername: "Username to connect to the MQTT server (if required)",
|
||||
target.MqttPassword: "Password to connect to the MQTT server (if required)",
|
||||
target.MqttQoS: "Set the Quality of Service Level for MQTT endpoint",
|
||||
target.MqttKeepAliveInterval: "Optional keep alive interval for MQTT connections",
|
||||
target.MqttReconnectInterval: "Optional reconnect interval for MQTT connections",
|
||||
target.MqttQueueDir: "Local directory where events are stored eg: '/home/events'",
|
||||
target.MqttQueueLimit: "Enable persistent event store queue limit, defaults to '10000'",
|
||||
}
|
||||
|
||||
HelpES = config.HelpKV{
|
||||
config.State: "(Required) Is this server endpoint configuration active/enabled",
|
||||
config.Comment: "A comment to describe the Elasticsearch target setting",
|
||||
target.ElasticURL: "(Required) The Elasticsearch server's address, with optional authentication info",
|
||||
target.ElasticFormat: "(Required) Either `namespace` or `access`, defaults to 'namespace'",
|
||||
target.ElasticIndex: "(Required) The name of an Elasticsearch index in which MinIO will store document",
|
||||
target.ElasticQueueDir: "Local directory where events are stored eg: '/home/events'",
|
||||
target.ElasticQueueLimit: "Enable persistent event store queue limit, defaults to '10000'",
|
||||
}
|
||||
|
||||
HelpWebhook = config.HelpKV{
|
||||
config.State: "(Required) Is this server endpoint configuration active/enabled",
|
||||
config.Comment: "A comment to describe the Webhook target setting",
|
||||
target.WebhookEndpoint: "Webhook server endpoint eg: http://localhost:8080/minio/events",
|
||||
target.WebhookAuthToken: "Authorization token used for webhook server endpoint (optional)",
|
||||
target.WebhookQueueLimit: "Enable persistent event store queue limit, defaults to '10000'",
|
||||
target.WebhookQueueDir: "Local directory where events are stored eg: '/home/events'",
|
||||
}
|
||||
|
||||
HelpRedis = config.HelpKV{
|
||||
config.State: "(Required) Is this server endpoint configuration active/enabled",
|
||||
config.Comment: "A comment to describe the Redis target setting",
|
||||
target.RedisFormat: "Specify how data is populated, a hash is used in case of `namespace` format and a list in case of `access` format, defaults to 'namespace'",
|
||||
target.RedisAddress: "(Required) The Redis server's address. For example: `localhost:6379`",
|
||||
target.RedisKey: "The name of the redis key under which events are stored",
|
||||
target.RedisPassword: "(Optional) The Redis server's password",
|
||||
target.RedisQueueDir: "Local directory where events are stored eg: '/home/events'",
|
||||
target.RedisQueueLimit: "Enable persistent event store queue limit, defaults to '10000'",
|
||||
}
|
||||
|
||||
HelpPostgres = config.HelpKV{
|
||||
config.State: "(Required) Is this server endpoint configuration active/enabled",
|
||||
config.Comment: "A comment to describe the Postgres target setting",
|
||||
target.PostgresFormat: "Specify how data is populated, `namespace` format and `access` format, defaults to 'namespace'",
|
||||
target.PostgresConnectionString: "Connection string parameters for the PostgreSQL server",
|
||||
target.PostgresTable: "(Required) Table name in which events will be stored/updated. If the table does not exist, the MinIO server creates it at start-up",
|
||||
target.PostgresHost: "(Optional) Host name of the PostgreSQL server. Defaults to `localhost`. IPv6 host should be enclosed with `[` and `]`",
|
||||
target.PostgresPort: "(Optional) Port on which to connect to PostgreSQL server, defaults to `5432`",
|
||||
target.PostgresUsername: "Database username, defaults to user running the MinIO process if not specified",
|
||||
target.PostgresPassword: "Database password",
|
||||
target.PostgresDatabase: "Database name",
|
||||
target.PostgresQueueDir: "Local directory where events are stored eg: '/home/events'",
|
||||
target.PostgresQueueLimit: "Enable persistent event store queue limit, defaults to '10000'",
|
||||
}
|
||||
|
||||
HelpMySQL = config.HelpKV{
|
||||
config.State: "(Required) Is this server endpoint configuration active/enabled",
|
||||
config.Comment: "A comment to describe the MySQL target setting",
|
||||
target.MySQLFormat: "Specify how data is populated, `namespace` format and `access` format, defaults to 'namespace'",
|
||||
target.MySQLHost: "Host name of the MySQL server (used only if `dsnString` is empty)",
|
||||
target.MySQLPort: "Port on which to connect to the MySQL server (used only if `dsn_string` is empty)",
|
||||
target.MySQLUsername: "Database user-name (used only if `dsnString` is empty)",
|
||||
target.MySQLPassword: "Database password (used only if `dsnString` is empty)",
|
||||
target.MySQLDatabase: "Database name (used only if `dsnString` is empty)",
|
||||
target.MySQLDSNString: "Data-Source-Name connection string for the MySQL server",
|
||||
target.MySQLTable: "(Required) Table name in which events will be stored/updated. If the table does not exist, the MinIO server creates it at start-up",
|
||||
target.MySQLQueueLimit: "Enable persistent event store queue limit, defaults to '10000'",
|
||||
target.MySQLQueueDir: "Local directory where events are stored eg: '/home/events'",
|
||||
}
|
||||
|
||||
HelpNATS = config.HelpKV{
|
||||
config.State: "(Required) Is this server endpoint configuration active/enabled",
|
||||
config.Comment: "A comment to describe the NATS target setting",
|
||||
target.NATSAddress: "NATS server address eg: '0.0.0.0:4222'",
|
||||
target.NATSSubject: "NATS subject that represents this subscription",
|
||||
target.NATSUsername: "Username to be used when connecting to the server",
|
||||
target.NATSPassword: "Password to be used when connecting to a server",
|
||||
target.NATSToken: "Token to be used when connecting to a server",
|
||||
target.NATSSecure: "Set this to 'on', enables TLS secure connections that skip server verification (not recommended)",
|
||||
target.NATSPingInterval: "Client ping commands interval to the server, disabled by default",
|
||||
target.NATSStreamingEnable: "Set this to 'on', to use streaming NATS server",
|
||||
target.NATSStreamingAsync: "Set this to 'on', to enable asynchronous publish, process the ACK or error state",
|
||||
target.NATSStreamingMaxPubAcksInFlight: "Specifies how many messages can be published without getting ACKs back from NATS streaming server",
|
||||
target.NATSStreamingClusterID: "Unique ID for the NATS streaming cluster",
|
||||
target.NATSQueueLimit: "Enable persistent event store queue limit, defaults to '10000'",
|
||||
target.NATSQueueDir: "Local directory where events are stored eg: '/home/events'",
|
||||
}
|
||||
|
||||
HelpNSQ = config.HelpKV{
|
||||
config.State: "(Required) Is this server endpoint configuration active/enabled",
|
||||
config.Comment: "A comment to describe the NSQ target setting",
|
||||
target.NSQAddress: "NSQ server address eg: '127.0.0.1:4150'",
|
||||
target.NSQTopic: "NSQ topic unique per target",
|
||||
target.NSQTLSEnable: "Set this to 'on', to enable TLS negotiation",
|
||||
target.NSQTLSSkipVerify: "Set this to 'on', to disable client verification of server certificates",
|
||||
target.NSQQueueLimit: "Enable persistent event store queue limit, defaults to '10000'",
|
||||
target.NSQQueueDir: "Local directory where events are stored eg: '/home/events'",
|
||||
}
|
||||
)
|
||||
295
cmd/config/notify/legacy.go
Normal file
295
cmd/config/notify/legacy.go
Normal file
@@ -0,0 +1,295 @@
|
||||
package notify
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/minio/minio/cmd/config"
|
||||
"github.com/minio/minio/pkg/event/target"
|
||||
)
|
||||
|
||||
// SetNotifyKafka - helper for config migration from older config.
|
||||
func SetNotifyKafka(s config.Config, kName string, cfg target.KafkaArgs) error {
|
||||
if err := cfg.Validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
s[config.NotifyKafkaSubSys][kName] = config.KVS{
|
||||
config.State: func() string {
|
||||
if cfg.Enable {
|
||||
return config.StateOn
|
||||
}
|
||||
return config.StateOff
|
||||
}(),
|
||||
target.KafkaBrokers: func() string {
|
||||
var brokers []string
|
||||
for _, broker := range cfg.Brokers {
|
||||
brokers = append(brokers, broker.String())
|
||||
}
|
||||
return strings.Join(brokers, ",")
|
||||
}(),
|
||||
config.Comment: "Settings for Kafka notification, after migrating config",
|
||||
target.KafkaTopic: cfg.Topic,
|
||||
target.KafkaQueueDir: cfg.QueueDir,
|
||||
target.KafkaQueueLimit: strconv.Itoa(int(cfg.QueueLimit)),
|
||||
target.KafkaTLSEnable: config.FormatBool(cfg.TLS.Enable),
|
||||
target.KafkaTLSSkipVerify: config.FormatBool(cfg.TLS.SkipVerify),
|
||||
target.KafkaTLSClientAuth: strconv.Itoa(int(cfg.TLS.ClientAuth)),
|
||||
target.KafkaSASLEnable: config.FormatBool(cfg.SASL.Enable),
|
||||
target.KafkaSASLUsername: cfg.SASL.User,
|
||||
target.KafkaSASLPassword: cfg.SASL.Password,
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetNotifyAMQP - helper for config migration from older config.
|
||||
func SetNotifyAMQP(s config.Config, amqpName string, cfg target.AMQPArgs) error {
|
||||
if err := cfg.Validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
s[config.NotifyAMQPSubSys][amqpName] = config.KVS{
|
||||
config.State: func() string {
|
||||
if cfg.Enable {
|
||||
return config.StateOn
|
||||
}
|
||||
return config.StateOff
|
||||
}(),
|
||||
config.Comment: "Settings for AMQP notification, after migrating config",
|
||||
target.AmqpURL: cfg.URL.String(),
|
||||
target.AmqpExchange: cfg.Exchange,
|
||||
target.AmqpRoutingKey: cfg.RoutingKey,
|
||||
target.AmqpExchangeType: cfg.ExchangeType,
|
||||
target.AmqpDeliveryMode: strconv.Itoa(int(cfg.DeliveryMode)),
|
||||
target.AmqpMandatory: config.FormatBool(cfg.Mandatory),
|
||||
target.AmqpInternal: config.FormatBool(cfg.Immediate),
|
||||
target.AmqpDurable: config.FormatBool(cfg.Durable),
|
||||
target.AmqpNoWait: config.FormatBool(cfg.NoWait),
|
||||
target.AmqpAutoDeleted: config.FormatBool(cfg.AutoDeleted),
|
||||
target.AmqpQueueDir: cfg.QueueDir,
|
||||
target.AmqpQueueLimit: strconv.Itoa(int(cfg.QueueLimit)),
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetNotifyES - helper for config migration from older config.
|
||||
func SetNotifyES(s config.Config, esName string, cfg target.ElasticsearchArgs) error {
|
||||
if err := cfg.Validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
s[config.NotifyESSubSys][esName] = config.KVS{
|
||||
config.State: func() string {
|
||||
if cfg.Enable {
|
||||
return config.StateOn
|
||||
}
|
||||
return config.StateOff
|
||||
}(),
|
||||
config.Comment: "Settings for Elasticsearch notification, after migrating config",
|
||||
target.ElasticFormat: cfg.Format,
|
||||
target.ElasticURL: cfg.URL.String(),
|
||||
target.ElasticIndex: cfg.Index,
|
||||
target.ElasticQueueDir: cfg.QueueDir,
|
||||
target.ElasticQueueLimit: strconv.Itoa(int(cfg.QueueLimit)),
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetNotifyRedis - helper for config migration from older config.
|
||||
func SetNotifyRedis(s config.Config, redisName string, cfg target.RedisArgs) error {
|
||||
if err := cfg.Validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
s[config.NotifyRedisSubSys][redisName] = config.KVS{
|
||||
config.State: func() string {
|
||||
if cfg.Enable {
|
||||
return config.StateOn
|
||||
}
|
||||
return config.StateOff
|
||||
}(),
|
||||
config.Comment: "Settings for Redis notification, after migrating config",
|
||||
target.RedisFormat: cfg.Format,
|
||||
target.RedisAddress: cfg.Addr.String(),
|
||||
target.RedisPassword: cfg.Password,
|
||||
target.RedisKey: cfg.Key,
|
||||
target.RedisQueueDir: cfg.QueueDir,
|
||||
target.RedisQueueLimit: strconv.Itoa(int(cfg.QueueLimit)),
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetNotifyWebhook - helper for config migration from older config.
|
||||
func SetNotifyWebhook(s config.Config, whName string, cfg target.WebhookArgs) error {
|
||||
if err := cfg.Validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
s[config.NotifyWebhookSubSys][whName] = config.KVS{
|
||||
config.State: func() string {
|
||||
if cfg.Enable {
|
||||
return config.StateOn
|
||||
}
|
||||
return config.StateOff
|
||||
}(),
|
||||
config.Comment: "Settings for Webhook notification, after migrating config",
|
||||
target.WebhookEndpoint: cfg.Endpoint.String(),
|
||||
target.WebhookAuthToken: cfg.AuthToken,
|
||||
target.WebhookQueueDir: cfg.QueueDir,
|
||||
target.WebhookQueueLimit: strconv.Itoa(int(cfg.QueueLimit)),
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetNotifyPostgres - helper for config migration from older config.
|
||||
func SetNotifyPostgres(s config.Config, psqName string, cfg target.PostgreSQLArgs) error {
|
||||
if err := cfg.Validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
s[config.NotifyPostgresSubSys][psqName] = config.KVS{
|
||||
config.State: func() string {
|
||||
if cfg.Enable {
|
||||
return config.StateOn
|
||||
}
|
||||
return config.StateOff
|
||||
}(),
|
||||
config.Comment: "Settings for Postgres notification, after migrating config",
|
||||
target.PostgresFormat: cfg.Format,
|
||||
target.PostgresConnectionString: cfg.ConnectionString,
|
||||
target.PostgresTable: cfg.Table,
|
||||
target.PostgresHost: cfg.Host.String(),
|
||||
target.PostgresPort: cfg.Port,
|
||||
target.PostgresUsername: cfg.User,
|
||||
target.PostgresPassword: cfg.Password,
|
||||
target.PostgresDatabase: cfg.Database,
|
||||
target.PostgresQueueDir: cfg.QueueDir,
|
||||
target.PostgresQueueLimit: strconv.Itoa(int(cfg.QueueLimit)),
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetNotifyNSQ - helper for config migration from older config.
|
||||
func SetNotifyNSQ(s config.Config, nsqName string, cfg target.NSQArgs) error {
|
||||
if err := cfg.Validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
s[config.NotifyNSQSubSys][nsqName] = config.KVS{
|
||||
config.State: func() string {
|
||||
if cfg.Enable {
|
||||
return config.StateOn
|
||||
}
|
||||
return config.StateOff
|
||||
}(),
|
||||
config.Comment: "Settings for NSQ notification, after migrating config",
|
||||
target.NSQAddress: cfg.NSQDAddress.String(),
|
||||
target.NSQTopic: cfg.Topic,
|
||||
target.NSQTLSEnable: config.FormatBool(cfg.TLS.Enable),
|
||||
target.NSQTLSSkipVerify: config.FormatBool(cfg.TLS.SkipVerify),
|
||||
target.NSQQueueDir: cfg.QueueDir,
|
||||
target.NSQQueueLimit: strconv.Itoa(int(cfg.QueueLimit)),
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetNotifyNATS - helper for config migration from older config.
|
||||
func SetNotifyNATS(s config.Config, natsName string, cfg target.NATSArgs) error {
|
||||
if err := cfg.Validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
s[config.NotifyNATSSubSys][natsName] = config.KVS{
|
||||
config.State: func() string {
|
||||
if cfg.Enable {
|
||||
return config.StateOn
|
||||
}
|
||||
return config.StateOff
|
||||
}(),
|
||||
config.Comment: "Settings for NATS notification, after migrating config",
|
||||
target.NATSAddress: cfg.Address.String(),
|
||||
target.NATSSubject: cfg.Subject,
|
||||
target.NATSUsername: cfg.Username,
|
||||
target.NATSPassword: cfg.Password,
|
||||
target.NATSToken: cfg.Token,
|
||||
target.NATSSecure: config.FormatBool(cfg.Secure),
|
||||
target.NATSPingInterval: strconv.FormatInt(cfg.PingInterval, 10),
|
||||
target.NATSQueueDir: cfg.QueueDir,
|
||||
target.NATSQueueLimit: strconv.Itoa(int(cfg.QueueLimit)),
|
||||
target.NATSStreamingEnable: func() string {
|
||||
if cfg.Streaming.Enable {
|
||||
return config.StateOn
|
||||
}
|
||||
return config.StateOff
|
||||
}(),
|
||||
target.NATSStreamingClusterID: cfg.Streaming.ClusterID,
|
||||
target.NATSStreamingAsync: config.FormatBool(cfg.Streaming.Async),
|
||||
target.NATSStreamingMaxPubAcksInFlight: strconv.Itoa(cfg.Streaming.MaxPubAcksInflight),
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetNotifyMySQL - helper for config migration from older config.
|
||||
func SetNotifyMySQL(s config.Config, sqlName string, cfg target.MySQLArgs) error {
|
||||
if err := cfg.Validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
s[config.NotifyMySQLSubSys][sqlName] = config.KVS{
|
||||
config.State: func() string {
|
||||
if cfg.Enable {
|
||||
return config.StateOn
|
||||
}
|
||||
return config.StateOff
|
||||
}(),
|
||||
config.Comment: "Settings for MySQL notification, after migrating config",
|
||||
target.MySQLFormat: cfg.Format,
|
||||
target.MySQLDSNString: cfg.DSN,
|
||||
target.MySQLTable: cfg.Table,
|
||||
target.MySQLHost: cfg.Host.String(),
|
||||
target.MySQLPort: cfg.Port,
|
||||
target.MySQLUsername: cfg.User,
|
||||
target.MySQLPassword: cfg.Password,
|
||||
target.MySQLDatabase: cfg.Database,
|
||||
target.MySQLQueueDir: cfg.QueueDir,
|
||||
target.MySQLQueueLimit: strconv.Itoa(int(cfg.QueueLimit)),
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetNotifyMQTT - helper for config migration from older config.
|
||||
func SetNotifyMQTT(s config.Config, mqttName string, cfg target.MQTTArgs) error {
|
||||
if err := cfg.Validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
s[config.NotifyMQTTSubSys][mqttName] = config.KVS{
|
||||
config.State: func() string {
|
||||
if cfg.Enable {
|
||||
return config.StateOn
|
||||
}
|
||||
return config.StateOff
|
||||
}(),
|
||||
config.Comment: "Settings for MQTT notification, after migrating config",
|
||||
target.MqttBroker: cfg.Broker.String(),
|
||||
target.MqttTopic: cfg.Topic,
|
||||
target.MqttQoS: fmt.Sprintf("%d", cfg.QoS),
|
||||
target.MqttUsername: cfg.User,
|
||||
target.MqttPassword: cfg.Password,
|
||||
target.MqttReconnectInterval: cfg.MaxReconnectInterval.String(),
|
||||
target.MqttKeepAliveInterval: cfg.KeepAlive.String(),
|
||||
target.MqttQueueDir: cfg.QueueDir,
|
||||
target.MqttQueueLimit: strconv.Itoa(int(cfg.QueueLimit)),
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
1396
cmd/config/notify/parse.go
Normal file
1396
cmd/config/notify/parse.go
Normal file
File diff suppressed because it is too large
Load Diff
218
cmd/config/policy/opa/config.go
Normal file
218
cmd/config/policy/opa/config.go
Normal file
@@ -0,0 +1,218 @@
|
||||
/*
|
||||
* MinIO Cloud Storage, (C) 2018-2019 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package opa
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
|
||||
"github.com/minio/minio/cmd/config"
|
||||
"github.com/minio/minio/pkg/env"
|
||||
iampolicy "github.com/minio/minio/pkg/iam/policy"
|
||||
xnet "github.com/minio/minio/pkg/net"
|
||||
)
|
||||
|
||||
// Env IAM OPA URL
|
||||
const (
|
||||
URL = "url"
|
||||
AuthToken = "auth_token"
|
||||
|
||||
EnvPolicyOpaURL = "MINIO_POLICY_OPA_URL"
|
||||
EnvPolicyOpaAuthToken = "MINIO_POLICY_OPA_AUTH_TOKEN"
|
||||
)
|
||||
|
||||
// DefaultKVS - default config for OPA config
|
||||
var (
|
||||
DefaultKVS = config.KVS{
|
||||
config.State: config.StateOff,
|
||||
config.Comment: "This is a default OPA configuration",
|
||||
URL: "",
|
||||
AuthToken: "",
|
||||
}
|
||||
)
|
||||
|
||||
// Args opa general purpose policy engine configuration.
|
||||
type Args struct {
|
||||
URL *xnet.URL `json:"url"`
|
||||
AuthToken string `json:"authToken"`
|
||||
Transport http.RoundTripper `json:"-"`
|
||||
CloseRespFn func(r io.ReadCloser) `json:"-"`
|
||||
}
|
||||
|
||||
// Validate - validate opa configuration params.
|
||||
func (a *Args) Validate() error {
|
||||
req, err := http.NewRequest(http.MethodPost, a.URL.String(), bytes.NewReader([]byte("")))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
if a.AuthToken != "" {
|
||||
req.Header.Set("Authorization", a.AuthToken)
|
||||
}
|
||||
|
||||
client := &http.Client{Transport: a.Transport}
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer a.CloseRespFn(resp.Body)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// UnmarshalJSON - decodes JSON data.
|
||||
func (a *Args) UnmarshalJSON(data []byte) error {
|
||||
// subtype to avoid recursive call to UnmarshalJSON()
|
||||
type subArgs Args
|
||||
var so subArgs
|
||||
|
||||
if err := json.Unmarshal(data, &so); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
oa := Args(so)
|
||||
if oa.URL == nil || oa.URL.String() == "" {
|
||||
*a = oa
|
||||
return nil
|
||||
}
|
||||
|
||||
*a = oa
|
||||
return nil
|
||||
}
|
||||
|
||||
// Opa - implements opa policy agent calls.
|
||||
type Opa struct {
|
||||
args Args
|
||||
client *http.Client
|
||||
}
|
||||
|
||||
// LookupConfig lookup Opa from config, override with any ENVs.
|
||||
func LookupConfig(kv config.KVS, transport *http.Transport, closeRespFn func(io.ReadCloser)) (Args, error) {
|
||||
args := Args{}
|
||||
|
||||
if err := config.CheckValidKeys(config.PolicyOPASubSys, kv, DefaultKVS); err != nil {
|
||||
return args, err
|
||||
}
|
||||
|
||||
opaURL := env.Get(EnvIamOpaURL, "")
|
||||
if opaURL == "" {
|
||||
opaURL = env.Get(EnvPolicyOpaURL, kv.Get(URL))
|
||||
if opaURL == "" {
|
||||
return args, nil
|
||||
}
|
||||
}
|
||||
authToken := env.Get(EnvIamOpaAuthToken, "")
|
||||
if authToken == "" {
|
||||
authToken = env.Get(EnvPolicyOpaAuthToken, kv.Get(AuthToken))
|
||||
}
|
||||
|
||||
u, err := xnet.ParseURL(opaURL)
|
||||
if err != nil {
|
||||
return args, err
|
||||
}
|
||||
args = Args{
|
||||
URL: u,
|
||||
AuthToken: authToken,
|
||||
Transport: transport,
|
||||
CloseRespFn: closeRespFn,
|
||||
}
|
||||
if err = args.Validate(); err != nil {
|
||||
return args, err
|
||||
}
|
||||
return args, nil
|
||||
}
|
||||
|
||||
// New - initializes opa policy engine connector.
|
||||
func New(args Args) *Opa {
|
||||
// No opa args.
|
||||
if args.URL == nil || args.URL.Scheme == "" && args.AuthToken == "" {
|
||||
return nil
|
||||
}
|
||||
return &Opa{
|
||||
args: args,
|
||||
client: &http.Client{Transport: args.Transport},
|
||||
}
|
||||
}
|
||||
|
||||
// IsAllowed - checks given policy args is allowed to continue the REST API.
|
||||
func (o *Opa) IsAllowed(args iampolicy.Args) (bool, error) {
|
||||
if o == nil {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// OPA input
|
||||
body := make(map[string]interface{})
|
||||
body["input"] = args
|
||||
|
||||
inputBytes, err := json.Marshal(body)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
req, err := http.NewRequest(http.MethodPost, o.args.URL.String(), bytes.NewReader(inputBytes))
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
if o.args.AuthToken != "" {
|
||||
req.Header.Set("Authorization", o.args.AuthToken)
|
||||
}
|
||||
|
||||
resp, err := o.client.Do(req)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
defer o.args.CloseRespFn(resp.Body)
|
||||
|
||||
// Read the body to be saved later.
|
||||
opaRespBytes, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
// Handle large OPA responses when OPA URL is of
|
||||
// form http://localhost:8181/v1/data/httpapi/authz
|
||||
type opaResultAllow struct {
|
||||
Result struct {
|
||||
Allow bool `json:"allow"`
|
||||
} `json:"result"`
|
||||
}
|
||||
|
||||
// Handle simpler OPA responses when OPA URL is of
|
||||
// form http://localhost:8181/v1/data/httpapi/authz/allow
|
||||
type opaResult struct {
|
||||
Result bool `json:"result"`
|
||||
}
|
||||
|
||||
respBody := bytes.NewReader(opaRespBytes)
|
||||
|
||||
var result opaResult
|
||||
if err = json.NewDecoder(respBody).Decode(&result); err != nil {
|
||||
respBody.Seek(0, 0)
|
||||
var resultAllow opaResultAllow
|
||||
if err = json.NewDecoder(respBody).Decode(&resultAllow); err != nil {
|
||||
return false, err
|
||||
}
|
||||
return resultAllow.Result.Allow, nil
|
||||
}
|
||||
return result.Result, nil
|
||||
}
|
||||
29
cmd/config/policy/opa/help.go
Normal file
29
cmd/config/policy/opa/help.go
Normal file
@@ -0,0 +1,29 @@
|
||||
/*
|
||||
* MinIO Cloud Storage, (C) 2019 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package opa
|
||||
|
||||
import "github.com/minio/minio/cmd/config"
|
||||
|
||||
// Help template for OPA policy feature.
|
||||
var (
|
||||
Help = config.HelpKV{
|
||||
URL: `Points to URL for OPA HTTP API endpoint. eg: "http://localhost:8181/v1/data/httpapi/authz/allow"`,
|
||||
AuthToken: "Authorization token for the OPA HTTP API endpoint (optional)",
|
||||
config.State: "Indicates if OPA policy is enabled or not",
|
||||
config.Comment: "A comment to describe the OPA policy setting",
|
||||
}
|
||||
)
|
||||
50
cmd/config/policy/opa/legacy.go
Normal file
50
cmd/config/policy/opa/legacy.go
Normal file
@@ -0,0 +1,50 @@
|
||||
/*
|
||||
* MinIO Cloud Storage, (C) 2019 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package opa
|
||||
|
||||
import (
|
||||
"github.com/minio/minio/cmd/config"
|
||||
)
|
||||
|
||||
// Legacy OPA envs
|
||||
const (
|
||||
EnvIamOpaURL = "MINIO_IAM_OPA_URL"
|
||||
EnvIamOpaAuthToken = "MINIO_IAM_OPA_AUTHTOKEN"
|
||||
)
|
||||
|
||||
// SetPolicyOPAConfig - One time migration code needed, for migrating from older config to new for PolicyOPAConfig.
|
||||
func SetPolicyOPAConfig(s config.Config, opaArgs Args) {
|
||||
s[config.PolicyOPASubSys][config.Default] = config.KVS{
|
||||
config.State: func() string {
|
||||
if opaArgs.URL == nil {
|
||||
return config.StateOff
|
||||
}
|
||||
if opaArgs.URL.String() == "" {
|
||||
return config.StateOff
|
||||
}
|
||||
return config.StateOn
|
||||
}(),
|
||||
config.Comment: "Settings for OPA, after migrating config",
|
||||
URL: func() string {
|
||||
if opaArgs.URL != nil {
|
||||
return opaArgs.URL.String()
|
||||
}
|
||||
return ""
|
||||
}(),
|
||||
AuthToken: opaArgs.AuthToken,
|
||||
}
|
||||
}
|
||||
29
cmd/config/storageclass/help.go
Normal file
29
cmd/config/storageclass/help.go
Normal file
@@ -0,0 +1,29 @@
|
||||
/*
|
||||
* MinIO Cloud Storage, (C) 2019 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package storageclass
|
||||
|
||||
import "github.com/minio/minio/cmd/config"
|
||||
|
||||
// Help template for storageclass feature.
|
||||
var (
|
||||
Help = config.HelpKV{
|
||||
ClassRRS: "Set reduced redundancy storage class parity ratio. eg: \"EC:2\"",
|
||||
ClassStandard: "Set standard storage class parity ratio. eg: \"EC:4\"",
|
||||
config.State: "Indicates if storageclass is enabled or not",
|
||||
config.Comment: "A comment to describe the storageclass setting",
|
||||
}
|
||||
)
|
||||
36
cmd/config/storageclass/legacy.go
Normal file
36
cmd/config/storageclass/legacy.go
Normal file
@@ -0,0 +1,36 @@
|
||||
/*
|
||||
* MinIO Cloud Storage, (C) 2019 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package storageclass
|
||||
|
||||
import (
|
||||
"github.com/minio/minio/cmd/config"
|
||||
)
|
||||
|
||||
// SetStorageClass - One time migration code needed, for migrating from older config to new for StorageClass.
|
||||
func SetStorageClass(s config.Config, cfg Config) {
|
||||
s[config.StorageClassSubSys][config.Default] = config.KVS{
|
||||
ClassStandard: cfg.Standard.String(),
|
||||
ClassRRS: cfg.RRS.String(),
|
||||
config.State: func() string {
|
||||
if len(cfg.Standard.String()) > 0 || len(cfg.RRS.String()) > 0 {
|
||||
return config.StateOn
|
||||
}
|
||||
return config.StateOff
|
||||
}(),
|
||||
config.Comment: "Settings for StorageClass, after migrating config",
|
||||
}
|
||||
}
|
||||
@@ -36,6 +36,11 @@ const (
|
||||
|
||||
// Standard constats for config info storage class
|
||||
const (
|
||||
ClassStandard = "standard"
|
||||
ClassRRS = "rrs"
|
||||
|
||||
// Env to on/off storage class settings.
|
||||
EnvStorageClass = "MINIO_STORAGE_CLASS_STATE"
|
||||
// Reduced redundancy storage class environment variable
|
||||
RRSEnv = "MINIO_STORAGE_CLASS_RRS"
|
||||
// Standard storage class environment variable
|
||||
@@ -51,6 +56,16 @@ const (
|
||||
defaultRRSParity = minParityDisks
|
||||
)
|
||||
|
||||
// DefaultKVS - default storage class config
|
||||
var (
|
||||
DefaultKVS = config.KVS{
|
||||
config.State: config.StateOff,
|
||||
config.Comment: "This is a default StorageClass configuration, only applicable in erasure coded setups",
|
||||
ClassStandard: "",
|
||||
ClassRRS: "EC:2",
|
||||
}
|
||||
)
|
||||
|
||||
// StorageClass - holds storage class information
|
||||
type StorageClass struct {
|
||||
Parity int
|
||||
@@ -196,11 +211,25 @@ func (sCfg Config) GetParityForSC(sc string) (parity int) {
|
||||
}
|
||||
|
||||
// LookupConfig - lookup storage class config and override with valid environment settings if any.
|
||||
func LookupConfig(cfg Config, drivesPerSet int) (Config, error) {
|
||||
var err error
|
||||
func LookupConfig(kvs config.KVS, drivesPerSet int) (cfg Config, err error) {
|
||||
cfg = Config{}
|
||||
cfg.Standard.Parity = drivesPerSet / 2
|
||||
cfg.RRS.Parity = defaultRRSParity
|
||||
|
||||
if err = config.CheckValidKeys(config.StorageClassSubSys, kvs, DefaultKVS); err != nil {
|
||||
return cfg, err
|
||||
}
|
||||
|
||||
stateBool, err := config.ParseBool(env.Get(EnvStorageClass, kvs.Get(config.State)))
|
||||
if err != nil {
|
||||
return cfg, err
|
||||
}
|
||||
if !stateBool {
|
||||
return cfg, nil
|
||||
}
|
||||
|
||||
// Check for environment variables and parse into storageClass struct
|
||||
if ssc := env.Get(StandardEnv, cfg.Standard.String()); ssc != "" {
|
||||
if ssc := env.Get(StandardEnv, kvs.Get(ClassStandard)); ssc != "" {
|
||||
cfg.Standard, err = parseStorageClass(ssc)
|
||||
if err != nil {
|
||||
return cfg, err
|
||||
@@ -210,7 +239,7 @@ func LookupConfig(cfg Config, drivesPerSet int) (Config, error) {
|
||||
cfg.Standard.Parity = drivesPerSet / 2
|
||||
}
|
||||
|
||||
if rrsc := env.Get(RRSEnv, cfg.RRS.String()); rrsc != "" {
|
||||
if rrsc := env.Get(RRSEnv, kvs.Get(ClassRRS)); rrsc != "" {
|
||||
cfg.RRS, err = parseStorageClass(rrsc)
|
||||
if err != nil {
|
||||
return cfg, err
|
||||
|
||||
@@ -68,7 +68,7 @@ func (sys *HTTPConsoleLoggerSys) HasLogListeners() bool {
|
||||
// Subscribe starts console logging for this node.
|
||||
func (sys *HTTPConsoleLoggerSys) Subscribe(subCh chan interface{}, doneCh chan struct{}, node string, last int, logKind string, filter func(entry interface{}) bool) {
|
||||
// Enable console logging for remote client even if local console logging is disabled in the config.
|
||||
if !globalServerConfig.Logger.Console.Enabled && !sys.pubsub.HasSubscribers() {
|
||||
if !sys.pubsub.HasSubscribers() {
|
||||
logger.AddTarget(globalConsoleSys.Console())
|
||||
}
|
||||
|
||||
@@ -135,8 +135,5 @@ func (sys *HTTPConsoleLoggerSys) Send(e interface{}, logKind string) error {
|
||||
sys.logBuf = sys.logBuf.Next()
|
||||
sys.logBufLk.Unlock()
|
||||
|
||||
if globalServerConfig.Logger.Console.Enabled {
|
||||
return sys.console.Send(e, string(logger.All))
|
||||
}
|
||||
return nil
|
||||
return sys.console.Send(e, string(logger.All))
|
||||
}
|
||||
|
||||
@@ -18,68 +18,99 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/minio/minio/cmd/config"
|
||||
"github.com/minio/minio/pkg/env"
|
||||
)
|
||||
|
||||
const (
|
||||
// EnvKMSMasterKey is the environment variable used to specify
|
||||
// a KMS master key used to protect SSE-S3 per-object keys.
|
||||
// Valid values must be of the from: "KEY_ID:32_BYTE_HEX_VALUE".
|
||||
EnvKMSMasterKey = "MINIO_SSE_MASTER_KEY"
|
||||
|
||||
// EnvAutoEncryption is the environment variable used to en/disable
|
||||
// SSE-S3 auto-encryption. SSE-S3 auto-encryption, if enabled,
|
||||
// requires a valid KMS configuration and turns any non-SSE-C
|
||||
// request into an SSE-S3 request.
|
||||
// If present EnvAutoEncryption must be either "on" or "off".
|
||||
EnvAutoEncryption = "MINIO_SSE_AUTO_ENCRYPTION"
|
||||
)
|
||||
|
||||
const (
|
||||
// EnvVaultEndpoint is the environment variable used to specify
|
||||
// the vault HTTPS endpoint.
|
||||
EnvVaultEndpoint = "MINIO_SSE_VAULT_ENDPOINT"
|
||||
|
||||
// EnvVaultAuthType is the environment variable used to specify
|
||||
// the authentication type for vault.
|
||||
EnvVaultAuthType = "MINIO_SSE_VAULT_AUTH_TYPE"
|
||||
|
||||
// EnvVaultAppRoleID is the environment variable used to specify
|
||||
// the vault AppRole ID.
|
||||
EnvVaultAppRoleID = "MINIO_SSE_VAULT_APPROLE_ID"
|
||||
|
||||
// EnvVaultAppSecretID is the environment variable used to specify
|
||||
// the vault AppRole secret corresponding to the AppRole ID.
|
||||
EnvVaultAppSecretID = "MINIO_SSE_VAULT_APPROLE_SECRET"
|
||||
|
||||
// EnvVaultKeyVersion is the environment variable used to specify
|
||||
// the vault key version.
|
||||
EnvVaultKeyVersion = "MINIO_SSE_VAULT_KEY_VERSION"
|
||||
|
||||
// EnvVaultKeyName is the environment variable used to specify
|
||||
// the vault named key-ring. In the S3 context it's referred as
|
||||
// customer master key ID (CMK-ID).
|
||||
EnvVaultKeyName = "MINIO_SSE_VAULT_KEY_NAME"
|
||||
|
||||
// EnvVaultCAPath is the environment variable used to specify the
|
||||
// path to a directory of PEM-encoded CA cert files. These CA cert
|
||||
// files are used to authenticate MinIO to Vault over mTLS.
|
||||
EnvVaultCAPath = "MINIO_SSE_VAULT_CAPATH"
|
||||
|
||||
// EnvVaultNamespace is the environment variable used to specify
|
||||
// vault namespace. The vault namespace is used if the enterprise
|
||||
// version of Hashicorp Vault is used.
|
||||
EnvVaultNamespace = "MINIO_SSE_VAULT_NAMESPACE"
|
||||
)
|
||||
|
||||
// KMSConfig has the KMS config for hashicorp vault
|
||||
type KMSConfig struct {
|
||||
AutoEncryption bool `json:"-"`
|
||||
Vault VaultConfig `json:"vault"`
|
||||
}
|
||||
|
||||
// KMS Vault constants.
|
||||
const (
|
||||
KMSVaultEndpoint = "endpoint"
|
||||
KMSVaultCAPath = "capath"
|
||||
KMSVaultKeyName = "key_name"
|
||||
KMSVaultKeyVersion = "key_version"
|
||||
KMSVaultNamespace = "namespace"
|
||||
KMSVaultAuthType = "auth_type"
|
||||
KMSVaultAppRoleID = "auth_approle_id"
|
||||
KMSVaultAppRoleSecret = "auth_approle_secret"
|
||||
)
|
||||
|
||||
// DefaultKVS - default KV crypto config
|
||||
var (
|
||||
DefaultKVS = config.KVS{
|
||||
config.State: config.StateOff,
|
||||
config.Comment: "This is a default Vault configuration",
|
||||
KMSVaultEndpoint: "",
|
||||
KMSVaultCAPath: "",
|
||||
KMSVaultKeyName: "",
|
||||
KMSVaultKeyVersion: "",
|
||||
KMSVaultNamespace: "",
|
||||
KMSVaultAuthType: "approle",
|
||||
KMSVaultAppRoleID: "",
|
||||
KMSVaultAppRoleSecret: "",
|
||||
}
|
||||
)
|
||||
|
||||
const (
|
||||
// EnvKMSMasterKey is the environment variable used to specify
|
||||
// a KMS master key used to protect SSE-S3 per-object keys.
|
||||
// Valid values must be of the from: "KEY_ID:32_BYTE_HEX_VALUE".
|
||||
EnvKMSMasterKey = "MINIO_KMS_MASTER_KEY"
|
||||
|
||||
// EnvKMSAutoEncryption is the environment variable used to en/disable
|
||||
// SSE-S3 auto-encryption. SSE-S3 auto-encryption, if enabled,
|
||||
// requires a valid KMS configuration and turns any non-SSE-C
|
||||
// request into an SSE-S3 request.
|
||||
// If present EnvAutoEncryption must be either "on" or "off".
|
||||
EnvKMSAutoEncryption = "MINIO_KMS_AUTO_ENCRYPTION"
|
||||
)
|
||||
|
||||
const (
|
||||
// EnvKMSVaultState to enable on/off
|
||||
EnvKMSVaultState = "MINIO_KMS_VAULT_STATE"
|
||||
|
||||
// EnvKMSVaultEndpoint is the environment variable used to specify
|
||||
// the vault HTTPS endpoint.
|
||||
EnvKMSVaultEndpoint = "MINIO_KMS_VAULT_ENDPOINT"
|
||||
|
||||
// EnvKMSVaultAuthType is the environment variable used to specify
|
||||
// the authentication type for vault.
|
||||
EnvKMSVaultAuthType = "MINIO_KMS_VAULT_AUTH_TYPE"
|
||||
|
||||
// EnvKMSVaultAppRoleID is the environment variable used to specify
|
||||
// the vault AppRole ID.
|
||||
EnvKMSVaultAppRoleID = "MINIO_KMS_VAULT_APPROLE_ID"
|
||||
|
||||
// EnvKMSVaultAppSecretID is the environment variable used to specify
|
||||
// the vault AppRole secret corresponding to the AppRole ID.
|
||||
EnvKMSVaultAppSecretID = "MINIO_KMS_VAULT_APPROLE_SECRET"
|
||||
|
||||
// EnvKMSVaultKeyVersion is the environment variable used to specify
|
||||
// the vault key version.
|
||||
EnvKMSVaultKeyVersion = "MINIO_KMS_VAULT_KEY_VERSION"
|
||||
|
||||
// EnvKMSVaultKeyName is the environment variable used to specify
|
||||
// the vault named key-ring. In the S3 context it's referred as
|
||||
// customer master key ID (CMK-ID).
|
||||
EnvKMSVaultKeyName = "MINIO_KMS_VAULT_KEY_NAME"
|
||||
|
||||
// EnvKMSVaultCAPath is the environment variable used to specify the
|
||||
// path to a directory of PEM-encoded CA cert files. These CA cert
|
||||
// files are used to authenticate MinIO to Vault over mTLS.
|
||||
EnvKMSVaultCAPath = "MINIO_KMS_VAULT_CAPATH"
|
||||
|
||||
// EnvKMSVaultNamespace is the environment variable used to specify
|
||||
// vault namespace. The vault namespace is used if the enterprise
|
||||
// version of Hashicorp Vault is used.
|
||||
EnvKMSVaultNamespace = "MINIO_KMS_VAULT_NAMESPACE"
|
||||
)
|
||||
|
||||
// LookupConfig extracts the KMS configuration provided by environment
|
||||
// variables and merge them with the provided KMS configuration. The
|
||||
// merging follows the following rules:
|
||||
@@ -93,33 +124,69 @@ type KMSConfig struct {
|
||||
//
|
||||
// It sets the global KMS configuration according to the merged configuration
|
||||
// on succes.
|
||||
func LookupConfig(config KMSConfig) (KMSConfig, error) {
|
||||
var err error
|
||||
// Lookup Hashicorp-Vault configuration & overwrite config entry if ENV var is present
|
||||
config.Vault.Endpoint = env.Get(EnvVaultEndpoint, config.Vault.Endpoint)
|
||||
config.Vault.CAPath = env.Get(EnvVaultCAPath, config.Vault.CAPath)
|
||||
config.Vault.Auth.Type = env.Get(EnvVaultAuthType, config.Vault.Auth.Type)
|
||||
config.Vault.Auth.AppRole.ID = env.Get(EnvVaultAppRoleID, config.Vault.Auth.AppRole.ID)
|
||||
config.Vault.Auth.AppRole.Secret = env.Get(EnvVaultAppSecretID, config.Vault.Auth.AppRole.Secret)
|
||||
config.Vault.Key.Name = env.Get(EnvVaultKeyName, config.Vault.Key.Name)
|
||||
config.Vault.Namespace = env.Get(EnvVaultNamespace, config.Vault.Namespace)
|
||||
keyVersion := env.Get(EnvVaultKeyVersion, strconv.Itoa(config.Vault.Key.Version))
|
||||
config.Vault.Key.Version, err = strconv.Atoi(keyVersion)
|
||||
if err != nil {
|
||||
return config, fmt.Errorf("Invalid ENV variable: Unable to parse %s value (`%s`)", EnvVaultKeyVersion, keyVersion)
|
||||
func LookupConfig(kvs config.KVS) (KMSConfig, error) {
|
||||
if err := config.CheckValidKeys(config.KmsVaultSubSys, kvs, DefaultKVS); err != nil {
|
||||
return KMSConfig{}, err
|
||||
}
|
||||
if err = config.Vault.Verify(); err != nil {
|
||||
return config, err
|
||||
kmsCfg, err := lookupConfigLegacy(kvs)
|
||||
if err != nil {
|
||||
return kmsCfg, err
|
||||
}
|
||||
if !kmsCfg.AutoEncryption {
|
||||
kmsCfg.AutoEncryption, err = config.ParseBool(env.Get(EnvKMSAutoEncryption, config.StateOff))
|
||||
if err != nil {
|
||||
return kmsCfg, err
|
||||
}
|
||||
}
|
||||
if !kmsCfg.Vault.IsEmpty() {
|
||||
return kmsCfg, nil
|
||||
}
|
||||
stateBool, err := config.ParseBool(env.Get(EnvKMSVaultState, kvs.Get(config.State)))
|
||||
if err != nil {
|
||||
return kmsCfg, err
|
||||
}
|
||||
if !stateBool {
|
||||
return kmsCfg, nil
|
||||
}
|
||||
vcfg := VaultConfig{}
|
||||
// Lookup Hashicorp-Vault configuration & overwrite config entry if ENV var is present
|
||||
vcfg.Endpoint = env.Get(EnvKMSVaultEndpoint, kvs.Get(KMSVaultEndpoint))
|
||||
vcfg.CAPath = env.Get(EnvKMSVaultCAPath, kvs.Get(KMSVaultCAPath))
|
||||
vcfg.Auth.Type = env.Get(EnvKMSVaultAuthType, kvs.Get(KMSVaultAuthType))
|
||||
vcfg.Auth.AppRole.ID = env.Get(EnvKMSVaultAppRoleID, kvs.Get(KMSVaultAppRoleID))
|
||||
vcfg.Auth.AppRole.Secret = env.Get(EnvKMSVaultAppSecretID, kvs.Get(KMSVaultAppRoleSecret))
|
||||
vcfg.Key.Name = env.Get(EnvKMSVaultKeyName, kvs.Get(KMSVaultKeyName))
|
||||
vcfg.Namespace = env.Get(EnvKMSVaultNamespace, kvs.Get(KMSVaultNamespace))
|
||||
keyVersion := env.Get(EnvKMSVaultKeyVersion, kvs.Get(KMSVaultKeyVersion))
|
||||
|
||||
if keyVersion != "" {
|
||||
vcfg.Key.Version, err = strconv.Atoi(keyVersion)
|
||||
if err != nil {
|
||||
return kmsCfg, fmt.Errorf("Unable to parse VaultKeyVersion value (`%s`)", keyVersion)
|
||||
}
|
||||
}
|
||||
|
||||
return config, nil
|
||||
if err = vcfg.Verify(); err != nil {
|
||||
return kmsCfg, err
|
||||
}
|
||||
|
||||
kmsCfg.Vault = vcfg
|
||||
return kmsCfg, nil
|
||||
}
|
||||
|
||||
// NewKMS - initialize a new KMS.
|
||||
func NewKMS(config KMSConfig) (kms KMS, err error) {
|
||||
func NewKMS(cfg KMSConfig) (kms KMS, err error) {
|
||||
// Lookup KMS master keys - only available through ENV.
|
||||
if masterKey, ok := env.Lookup(EnvKMSMasterKey); ok {
|
||||
if !config.Vault.IsEmpty() { // Vault and KMS master key provided
|
||||
if masterKeyLegacy, ok := env.Lookup(EnvKMSMasterKeyLegacy); ok {
|
||||
if !cfg.Vault.IsEmpty() { // Vault and KMS master key provided
|
||||
return kms, errors.New("Ambiguous KMS configuration: vault configuration and a master key are provided at the same time")
|
||||
}
|
||||
kms, err = ParseMasterKey(masterKeyLegacy)
|
||||
if err != nil {
|
||||
return kms, err
|
||||
}
|
||||
} else if masterKey, ok := env.Lookup(EnvKMSMasterKey); ok {
|
||||
if !cfg.Vault.IsEmpty() { // Vault and KMS master key provided
|
||||
return kms, errors.New("Ambiguous KMS configuration: vault configuration and a master key are provided at the same time")
|
||||
}
|
||||
kms, err = ParseMasterKey(masterKey)
|
||||
@@ -127,15 +194,14 @@ func NewKMS(config KMSConfig) (kms KMS, err error) {
|
||||
return kms, err
|
||||
}
|
||||
}
|
||||
if !config.Vault.IsEmpty() {
|
||||
kms, err = NewVault(config.Vault)
|
||||
if !cfg.Vault.IsEmpty() {
|
||||
kms, err = NewVault(cfg.Vault)
|
||||
if err != nil {
|
||||
return kms, err
|
||||
}
|
||||
}
|
||||
|
||||
autoEncryption := strings.EqualFold(env.Get(EnvAutoEncryption, "off"), "on")
|
||||
if autoEncryption && kms == nil {
|
||||
if cfg.AutoEncryption && kms == nil {
|
||||
return kms, errors.New("Invalid KMS configuration: auto-encryption is enabled but no valid KMS configuration is present")
|
||||
}
|
||||
return kms, nil
|
||||
|
||||
35
cmd/crypto/help.go
Normal file
35
cmd/crypto/help.go
Normal file
@@ -0,0 +1,35 @@
|
||||
/*
|
||||
* MinIO Cloud Storage, (C) 2019 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package crypto
|
||||
|
||||
import "github.com/minio/minio/cmd/config"
|
||||
|
||||
// Help template for KMS vault
|
||||
var (
|
||||
Help = config.HelpKV{
|
||||
KMSVaultEndpoint: `Points to Vault API endpoint eg: "http://vault-endpoint-ip:8200"`,
|
||||
KMSVaultKeyName: `Transit key name used in vault policy, must be unique name eg: "my-minio-key"`,
|
||||
KMSVaultAuthType: `Authentication type to Vault API endpoint eg: "approle"`,
|
||||
KMSVaultAppRoleID: `Unique role ID created for AppRole`,
|
||||
KMSVaultAppRoleSecret: `Unique secret ID created for AppRole`,
|
||||
KMSVaultNamespace: `Only needed if AppRole engine is scoped to Vault Namespace eg: "ns1"`,
|
||||
KMSVaultKeyVersion: `Key version (optional)`,
|
||||
KMSVaultCAPath: `Path to PEM-encoded CA cert files to use mTLS authentication (optional) eg: "/home/user/custom-certs"`,
|
||||
config.State: "Indicates if KMS Vault is enabled or not",
|
||||
config.Comment: "A comment to describe the KMS Vault setting",
|
||||
}
|
||||
)
|
||||
157
cmd/crypto/legacy.go
Normal file
157
cmd/crypto/legacy.go
Normal file
@@ -0,0 +1,157 @@
|
||||
/*
|
||||
* MinIO Cloud Storage, (C) 2019 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package crypto
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
|
||||
"github.com/minio/minio/cmd/config"
|
||||
"github.com/minio/minio/pkg/env"
|
||||
)
|
||||
|
||||
const (
|
||||
// EnvKMSMasterKeyLegacy is the environment variable used to specify
|
||||
// a KMS master key used to protect SSE-S3 per-object keys.
|
||||
// Valid values must be of the from: "KEY_ID:32_BYTE_HEX_VALUE".
|
||||
EnvKMSMasterKeyLegacy = "MINIO_SSE_MASTER_KEY"
|
||||
|
||||
// EnvAutoEncryptionLegacy is the environment variable used to en/disable
|
||||
// SSE-S3 auto-encryption. SSE-S3 auto-encryption, if enabled,
|
||||
// requires a valid KMS configuration and turns any non-SSE-C
|
||||
// request into an SSE-S3 request.
|
||||
// If present EnvAutoEncryption must be either "on" or "off".
|
||||
EnvAutoEncryptionLegacy = "MINIO_SSE_AUTO_ENCRYPTION"
|
||||
)
|
||||
|
||||
const (
|
||||
// EnvVaultEndpoint is the environment variable used to specify
|
||||
// the vault HTTPS endpoint.
|
||||
EnvVaultEndpoint = "MINIO_SSE_VAULT_ENDPOINT"
|
||||
|
||||
// EnvVaultAuthType is the environment variable used to specify
|
||||
// the authentication type for vault.
|
||||
EnvVaultAuthType = "MINIO_SSE_VAULT_AUTH_TYPE"
|
||||
|
||||
// EnvVaultAppRoleID is the environment variable used to specify
|
||||
// the vault AppRole ID.
|
||||
EnvVaultAppRoleID = "MINIO_SSE_VAULT_APPROLE_ID"
|
||||
|
||||
// EnvVaultAppSecretID is the environment variable used to specify
|
||||
// the vault AppRole secret corresponding to the AppRole ID.
|
||||
EnvVaultAppSecretID = "MINIO_SSE_VAULT_APPROLE_SECRET"
|
||||
|
||||
// EnvVaultKeyVersion is the environment variable used to specify
|
||||
// the vault key version.
|
||||
EnvVaultKeyVersion = "MINIO_SSE_VAULT_KEY_VERSION"
|
||||
|
||||
// EnvVaultKeyName is the environment variable used to specify
|
||||
// the vault named key-ring. In the S3 context it's referred as
|
||||
// customer master key ID (CMK-ID).
|
||||
EnvVaultKeyName = "MINIO_SSE_VAULT_KEY_NAME"
|
||||
|
||||
// EnvVaultCAPath is the environment variable used to specify the
|
||||
// path to a directory of PEM-encoded CA cert files. These CA cert
|
||||
// files are used to authenticate MinIO to Vault over mTLS.
|
||||
EnvVaultCAPath = "MINIO_SSE_VAULT_CAPATH"
|
||||
|
||||
// EnvVaultNamespace is the environment variable used to specify
|
||||
// vault namespace. The vault namespace is used if the enterprise
|
||||
// version of Hashicorp Vault is used.
|
||||
EnvVaultNamespace = "MINIO_SSE_VAULT_NAMESPACE"
|
||||
)
|
||||
|
||||
// SetKMSConfig helper to migrate from older KMSConfig to new KV.
|
||||
func SetKMSConfig(s config.Config, cfg KMSConfig) {
|
||||
s[config.KmsVaultSubSys][config.Default] = config.KVS{
|
||||
KMSVaultEndpoint: cfg.Vault.Endpoint,
|
||||
KMSVaultCAPath: cfg.Vault.CAPath,
|
||||
KMSVaultAuthType: func() string {
|
||||
if cfg.Vault.Auth.Type != "" {
|
||||
return cfg.Vault.Auth.Type
|
||||
}
|
||||
return "approle"
|
||||
}(),
|
||||
KMSVaultAppRoleID: cfg.Vault.Auth.AppRole.ID,
|
||||
KMSVaultAppRoleSecret: cfg.Vault.Auth.AppRole.Secret,
|
||||
KMSVaultKeyName: cfg.Vault.Key.Name,
|
||||
KMSVaultKeyVersion: strconv.Itoa(cfg.Vault.Key.Version),
|
||||
KMSVaultNamespace: cfg.Vault.Namespace,
|
||||
config.State: func() string {
|
||||
if !cfg.Vault.IsEmpty() {
|
||||
return config.StateOn
|
||||
}
|
||||
return config.StateOff
|
||||
}(),
|
||||
config.Comment: "Settings for KMS Vault, after migrating config",
|
||||
}
|
||||
}
|
||||
|
||||
// lookupConfigLegacy extracts the KMS configuration provided by legacy
|
||||
// environment variables and merge them with the provided KMS configuration.
|
||||
// The merging follows the following rules:
|
||||
//
|
||||
// 1. A valid value provided as environment variable has higher priority
|
||||
// than the provided configuration and overwrites the value from the
|
||||
// configuration file.
|
||||
//
|
||||
// 2. A value specified as environment variable never changes the configuration
|
||||
// file. So it is never made a persistent setting.
|
||||
//
|
||||
// It sets the global KMS configuration according to the merged configuration
|
||||
// on success.
|
||||
func lookupConfigLegacy(kvs config.KVS) (KMSConfig, error) {
|
||||
autoBool, err := config.ParseBool(env.Get(EnvAutoEncryptionLegacy, config.StateOff))
|
||||
if err != nil {
|
||||
return KMSConfig{}, err
|
||||
}
|
||||
cfg := KMSConfig{
|
||||
AutoEncryption: autoBool,
|
||||
}
|
||||
stateBool, err := config.ParseBool(kvs.Get(config.State))
|
||||
if err != nil {
|
||||
return cfg, err
|
||||
}
|
||||
if !stateBool {
|
||||
return cfg, nil
|
||||
}
|
||||
vcfg := VaultConfig{}
|
||||
// Lookup Hashicorp-Vault configuration & overwrite config entry if ENV var is present
|
||||
vcfg.Endpoint = env.Get(EnvVaultEndpoint, kvs.Get(KMSVaultEndpoint))
|
||||
vcfg.CAPath = env.Get(EnvVaultCAPath, kvs.Get(KMSVaultCAPath))
|
||||
vcfg.Auth.Type = env.Get(EnvVaultAuthType, kvs.Get(KMSVaultAuthType))
|
||||
vcfg.Auth.AppRole.ID = env.Get(EnvVaultAppRoleID, kvs.Get(KMSVaultAppRoleID))
|
||||
vcfg.Auth.AppRole.Secret = env.Get(EnvVaultAppSecretID, kvs.Get(KMSVaultAppRoleSecret))
|
||||
vcfg.Key.Name = env.Get(EnvVaultKeyName, kvs.Get(KMSVaultKeyName))
|
||||
vcfg.Namespace = env.Get(EnvVaultNamespace, kvs.Get(KMSVaultNamespace))
|
||||
keyVersion := env.Get(EnvVaultKeyVersion, kvs.Get(KMSVaultKeyVersion))
|
||||
|
||||
if keyVersion != "" {
|
||||
vcfg.Key.Version, err = strconv.Atoi(keyVersion)
|
||||
if err != nil {
|
||||
return cfg, fmt.Errorf("Invalid ENV variable: Unable to parse %s value (`%s`)",
|
||||
EnvVaultKeyVersion, keyVersion)
|
||||
}
|
||||
}
|
||||
|
||||
if err = vcfg.Verify(); err != nil {
|
||||
return cfg, err
|
||||
}
|
||||
|
||||
cfg.Vault = vcfg
|
||||
return cfg, nil
|
||||
}
|
||||
@@ -109,9 +109,9 @@ func (m *cacheMeta) ToObjectInfo(bucket, object string) (o ObjectInfo) {
|
||||
|
||||
// represents disk cache struct
|
||||
type diskCache struct {
|
||||
dir string // caching directory
|
||||
maxDiskUsagePct int // max usage in %
|
||||
expiry int // cache expiry in days
|
||||
dir string // caching directory
|
||||
quotaPct int // max usage in %
|
||||
expiry int // cache expiry in days
|
||||
// mark false if drive is offline
|
||||
online bool
|
||||
// mutex to protect updates to online variable
|
||||
@@ -122,21 +122,17 @@ type diskCache struct {
|
||||
}
|
||||
|
||||
// Inits the disk cache dir if it is not initialized already.
|
||||
func newdiskCache(dir string, expiry int, maxDiskUsagePct int) (*diskCache, error) {
|
||||
func newDiskCache(dir string, expiry int, quotaPct int) (*diskCache, error) {
|
||||
if err := os.MkdirAll(dir, 0777); err != nil {
|
||||
return nil, fmt.Errorf("Unable to initialize '%s' dir, %s", dir, err)
|
||||
}
|
||||
|
||||
if expiry == 0 {
|
||||
expiry = globalCacheExpiry
|
||||
}
|
||||
cache := diskCache{
|
||||
dir: dir,
|
||||
expiry: expiry,
|
||||
maxDiskUsagePct: maxDiskUsagePct,
|
||||
purgeChan: make(chan struct{}),
|
||||
online: true,
|
||||
onlineMutex: &sync.RWMutex{},
|
||||
dir: dir,
|
||||
expiry: expiry,
|
||||
quotaPct: quotaPct,
|
||||
purgeChan: make(chan struct{}),
|
||||
online: true,
|
||||
onlineMutex: &sync.RWMutex{},
|
||||
pool: sync.Pool{
|
||||
New: func() interface{} {
|
||||
b := directio.AlignedBlock(int(cacheBlkSize))
|
||||
@@ -152,7 +148,7 @@ func newdiskCache(dir string, expiry int, maxDiskUsagePct int) (*diskCache, erro
|
||||
// Ex. for a 100GB disk, if maxUsage is configured as 70% then cacheMaxDiskUsagePct is 70G
|
||||
// hence disk usage is low if the disk usage is less than 56G (because 80% of 70G is 56G)
|
||||
func (c *diskCache) diskUsageLow() bool {
|
||||
minUsage := c.maxDiskUsagePct * 80 / 100
|
||||
minUsage := c.quotaPct * 80 / 100
|
||||
di, err := disk.GetInfo(c.dir)
|
||||
if err != nil {
|
||||
reqInfo := (&logger.ReqInfo{}).AppendTags("cachePath", c.dir)
|
||||
@@ -175,7 +171,7 @@ func (c *diskCache) diskUsageHigh() bool {
|
||||
return true
|
||||
}
|
||||
usedPercent := (di.Total - di.Free) * 100 / di.Total
|
||||
return int(usedPercent) > c.maxDiskUsagePct
|
||||
return int(usedPercent) > c.quotaPct
|
||||
}
|
||||
|
||||
// Returns if size space can be allocated without exceeding
|
||||
@@ -189,7 +185,7 @@ func (c *diskCache) diskAvailable(size int64) bool {
|
||||
return false
|
||||
}
|
||||
usedPercent := (di.Total - (di.Free - uint64(size))) * 100 / di.Total
|
||||
return int(usedPercent) < c.maxDiskUsagePct
|
||||
return int(usedPercent) < c.quotaPct
|
||||
}
|
||||
|
||||
// Purge cache entries that were not accessed.
|
||||
|
||||
@@ -409,7 +409,12 @@ func newCache(config cache.Config) ([]*diskCache, bool, error) {
|
||||
return nil, false, errors.New("Atime support required for disk caching")
|
||||
}
|
||||
|
||||
cache, err := newdiskCache(dir, config.Expiry, config.MaxUse)
|
||||
quota := config.MaxUse
|
||||
if quota == 0 {
|
||||
quota = config.Quota
|
||||
}
|
||||
|
||||
cache, err := newDiskCache(dir, config.Expiry, quota)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
|
||||
@@ -28,7 +28,7 @@ import (
|
||||
|
||||
// Initialize cache objects.
|
||||
func initCacheObjects(disk string, cacheMaxUse int) (*diskCache, error) {
|
||||
return newdiskCache(disk, globalCacheExpiry, cacheMaxUse)
|
||||
return newDiskCache(disk, 80, cacheMaxUse)
|
||||
}
|
||||
|
||||
// inits diskCache struct for nDisks
|
||||
@@ -109,7 +109,7 @@ func TestGetCacheMaxUse(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
d, err := initDiskCaches(fsDirs, globalCacheMaxUse, t)
|
||||
d, err := initDiskCaches(fsDirs, 80, t)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -258,7 +258,7 @@ func TestDiskCacheMaxUse(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
d, err := initDiskCaches(fsDirs, globalCacheMaxUse, t)
|
||||
d, err := initDiskCaches(fsDirs, 80, t)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
@@ -23,6 +23,7 @@ import (
|
||||
"github.com/minio/minio/cmd/config"
|
||||
xhttp "github.com/minio/minio/cmd/http"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/auth"
|
||||
"github.com/minio/minio/pkg/env"
|
||||
"github.com/minio/minio/pkg/hash"
|
||||
xnet "github.com/minio/minio/pkg/net"
|
||||
@@ -381,4 +382,13 @@ func handleGatewayEnvVars() {
|
||||
logger.Fatal(err, "Unable to parse MINIO_GATEWAY_SSE value (`%s`)", gwsseVal)
|
||||
}
|
||||
}
|
||||
|
||||
accessKey := env.Get(config.EnvAccessKey, "")
|
||||
secretKey := env.Get(config.EnvSecretKey, "")
|
||||
cred, err := auth.CreateCredentials(accessKey, secretKey)
|
||||
if err != nil {
|
||||
logger.Fatal(config.ErrInvalidCredentials(err),
|
||||
"Unable to validate credentials inherited from the shell environment")
|
||||
}
|
||||
globalActiveCred = cred
|
||||
}
|
||||
|
||||
@@ -142,11 +142,6 @@ func StartGateway(ctx *cli.Context, gw Gateway) {
|
||||
// Handle gateway specific env
|
||||
handleGatewayEnvVars()
|
||||
|
||||
// Validate if we have access, secret set through environment.
|
||||
if !globalIsEnvCreds {
|
||||
logger.Fatal(config.ErrEnvCredentialsMissingGateway(nil), "Unable to start gateway")
|
||||
}
|
||||
|
||||
// Set system resources to maximum.
|
||||
logger.LogIf(context.Background(), setMaxResources())
|
||||
|
||||
@@ -179,7 +174,7 @@ func StartGateway(ctx *cli.Context, gw Gateway) {
|
||||
registerMetricsRouter(router)
|
||||
|
||||
// Register web router when its enabled.
|
||||
if globalIsBrowserEnabled {
|
||||
if globalBrowserEnabled {
|
||||
logger.FatalIf(registerWebRouter(router), "Unable to configure web browser")
|
||||
}
|
||||
|
||||
@@ -202,15 +197,16 @@ func StartGateway(ctx *cli.Context, gw Gateway) {
|
||||
|
||||
signal.Notify(globalOSSignalCh, os.Interrupt, syscall.SIGTERM)
|
||||
|
||||
// !!! Do not move this block !!!
|
||||
// For all gateways, the config needs to be loaded from env
|
||||
// prior to initializing the gateway layer
|
||||
{
|
||||
if !enableConfigOps {
|
||||
// TODO: We need to move this code with globalConfigSys.Init()
|
||||
// for now keep it here such that "s3" gateway layer initializes
|
||||
// itself properly when KMS is set.
|
||||
|
||||
// Initialize server config.
|
||||
srvCfg := newServerConfig()
|
||||
|
||||
// Override any values from ENVs.
|
||||
srvCfg.lookupConfigs()
|
||||
lookupConfigs(srvCfg)
|
||||
|
||||
// hold the mutex lock before a new config is assigned.
|
||||
globalServerConfigMu.Lock()
|
||||
@@ -218,7 +214,7 @@ func StartGateway(ctx *cli.Context, gw Gateway) {
|
||||
globalServerConfigMu.Unlock()
|
||||
}
|
||||
|
||||
newObject, err := gw.NewGatewayLayer(globalServerConfig.GetCredential())
|
||||
newObject, err := gw.NewGatewayLayer(globalActiveCred)
|
||||
if err != nil {
|
||||
// Stop watching for any certificate changes.
|
||||
globalTLSCerts.Stop()
|
||||
@@ -248,11 +244,9 @@ func StartGateway(ctx *cli.Context, gw Gateway) {
|
||||
globalDeploymentID = env.Get("MINIO_GATEWAY_DEPLOYMENT_ID", mustGetUUID())
|
||||
logger.SetDeploymentID(globalDeploymentID)
|
||||
|
||||
var cacheConfig = globalServerConfig.GetCacheConfig()
|
||||
if len(cacheConfig.Drives) > 0 {
|
||||
var err error
|
||||
if globalCacheConfig.Enabled {
|
||||
// initialize the new disk cache objects.
|
||||
globalCacheObjectAPI, err = newServerCacheObjects(context.Background(), cacheConfig)
|
||||
globalCacheObjectAPI, err = newServerCacheObjects(context.Background(), globalCacheConfig)
|
||||
logger.FatalIf(err, "Unable to initialize disk caching")
|
||||
}
|
||||
|
||||
|
||||
@@ -52,7 +52,7 @@ func printGatewayStartupMessage(apiEndPoints []string, backendType string) {
|
||||
// Prints common server startup message. Prints credential, region and browser access.
|
||||
func printGatewayCommonMsg(apiEndpoints []string) {
|
||||
// Get saved credentials.
|
||||
cred := globalServerConfig.GetCredential()
|
||||
cred := globalActiveCred
|
||||
|
||||
apiEndpointStr := strings.Join(apiEndpoints, " ")
|
||||
|
||||
@@ -64,7 +64,7 @@ func printGatewayCommonMsg(apiEndpoints []string) {
|
||||
}
|
||||
printEventNotifiers()
|
||||
|
||||
if globalIsBrowserEnabled {
|
||||
if globalBrowserEnabled {
|
||||
logStartupMessage(color.Blue("\nBrowser Access:"))
|
||||
logStartupMessage(fmt.Sprintf(getFormatStr(len(apiEndpointStr), 3), apiEndpointStr))
|
||||
}
|
||||
|
||||
@@ -86,7 +86,7 @@ ENVIRONMENT VARIABLES:
|
||||
MINIO_CACHE_DRIVES: List of mounted drives or directories delimited by ";".
|
||||
MINIO_CACHE_EXCLUDE: List of cache exclusion patterns delimited by ";".
|
||||
MINIO_CACHE_EXPIRY: Cache expiry duration in days.
|
||||
MINIO_CACHE_MAXUSE: Maximum permitted usage of the cache in percentage (0-100).
|
||||
MINIO_CACHE_QUOTA: Maximum permitted usage of the cache in percentage (0-100).
|
||||
|
||||
EXAMPLES:
|
||||
1. Start minio gateway server for Azure Blob Storage backend.
|
||||
@@ -105,7 +105,7 @@ EXAMPLES:
|
||||
{{.Prompt}} {{.EnvVarSetCommand}} MINIO_CACHE_DRIVES{{.AssignmentOperator}}"/mnt/drive1;/mnt/drive2;/mnt/drive3;/mnt/drive4"
|
||||
{{.Prompt}} {{.EnvVarSetCommand}} MINIO_CACHE_EXCLUDE{{.AssignmentOperator}}"bucket1/*;*.png"
|
||||
{{.Prompt}} {{.EnvVarSetCommand}} MINIO_CACHE_EXPIRY{{.AssignmentOperator}}40
|
||||
{{.Prompt}} {{.EnvVarSetCommand}} MINIO_CACHE_MAXUSE{{.AssignmentOperator}}80
|
||||
{{.Prompt}} {{.EnvVarSetCommand}} MINIO_CACHE_QUOTA{{.AssignmentOperator}}80
|
||||
{{.Prompt}} {{.HelpName}}
|
||||
`
|
||||
|
||||
|
||||
@@ -72,7 +72,7 @@ ENVIRONMENT VARIABLES:
|
||||
MINIO_CACHE_DRIVES: List of mounted drives or directories delimited by ";".
|
||||
MINIO_CACHE_EXCLUDE: List of cache exclusion patterns delimited by ";".
|
||||
MINIO_CACHE_EXPIRY: Cache expiry duration in days.
|
||||
MINIO_CACHE_MAXUSE: Maximum permitted usage of the cache in percentage (0-100).
|
||||
MINIO_CACHE_QUOTA: Maximum permitted usage of the cache in percentage (0-100).
|
||||
|
||||
EXAMPLES:
|
||||
1. Start minio gateway server for B2 backend.
|
||||
@@ -86,7 +86,7 @@ EXAMPLES:
|
||||
{{.Prompt}} {{.EnvVarSetCommand}} MINIO_CACHE_DRIVES{{.AssignmentOperator}}"/mnt/drive1;/mnt/drive2;/mnt/drive3;/mnt/drive4"
|
||||
{{.Prompt}} {{.EnvVarSetCommand}} MINIO_CACHE_EXCLUDE{{.AssignmentOperator}}"bucket1/*;*.png"
|
||||
{{.Prompt}} {{.EnvVarSetCommand}} MINIO_CACHE_EXPIRY{{.AssignmentOperator}}40
|
||||
{{.Prompt}} {{.EnvVarSetCommand}} MINIO_CACHE_MAXUSE{{.AssignmentOperator}}80
|
||||
{{.Prompt}} {{.EnvVarSetCommand}} MINIO_CACHE_QUOTA{{.AssignmentOperator}}80
|
||||
{{.Prompt}} {{.HelpName}}
|
||||
`
|
||||
minio.RegisterGatewayCommand(cli.Command{
|
||||
|
||||
@@ -123,7 +123,7 @@ ENVIRONMENT VARIABLES:
|
||||
MINIO_CACHE_DRIVES: List of mounted drives or directories delimited by ";".
|
||||
MINIO_CACHE_EXCLUDE: List of cache exclusion patterns delimited by ";".
|
||||
MINIO_CACHE_EXPIRY: Cache expiry duration in days.
|
||||
MINIO_CACHE_MAXUSE: Maximum permitted usage of the cache in percentage (0-100).
|
||||
MINIO_CACHE_QUOTA: Maximum permitted usage of the cache in percentage (0-100).
|
||||
|
||||
GCS credentials file:
|
||||
GOOGLE_APPLICATION_CREDENTIALS: Path to credentials.json
|
||||
@@ -143,7 +143,7 @@ EXAMPLES:
|
||||
{{.Prompt}} {{.EnvVarSetCommand}} MINIO_CACHE_DRIVES{{.AssignmentOperator}}"/mnt/drive1;/mnt/drive2;/mnt/drive3;/mnt/drive4"
|
||||
{{.Prompt}} {{.EnvVarSetCommand}} MINIO_CACHE_EXCLUDE{{.AssignmentOperator}}"bucket1/*;*.png"
|
||||
{{.Prompt}} {{.EnvVarSetCommand}} MINIO_CACHE_EXPIRY{{.AssignmentOperator}}40
|
||||
{{.Prompt}} {{.EnvVarSetCommand}} MINIO_CACHE_MAXUSE{{.AssignmentOperator}}80
|
||||
{{.Prompt}} {{.EnvVarSetCommand}} MINIO_CACHE_QUOTA{{.AssignmentOperator}}80
|
||||
{{.Prompt}} {{.HelpName}} mygcsprojectid
|
||||
`
|
||||
|
||||
|
||||
@@ -79,7 +79,7 @@ ENVIRONMENT VARIABLES:
|
||||
MINIO_CACHE_DRIVES: List of mounted drives or directories delimited by ";".
|
||||
MINIO_CACHE_EXCLUDE: List of cache exclusion patterns delimited by ";".
|
||||
MINIO_CACHE_EXPIRY: Cache expiry duration in days.
|
||||
MINIO_CACHE_MAXUSE: Maximum permitted usage of the cache in percentage (0-100).
|
||||
MINIO_CACHE_QUOTA: Maximum permitted usage of the cache in percentage (0-100).
|
||||
|
||||
EXAMPLES:
|
||||
1. Start minio gateway server for HDFS backend.
|
||||
@@ -93,7 +93,7 @@ EXAMPLES:
|
||||
{{.Prompt}} {{.EnvVarSetCommand}} MINIO_CACHE_DRIVES{{.AssignmentOperator}}"/mnt/drive1;/mnt/drive2;/mnt/drive3;/mnt/drive4"
|
||||
{{.Prompt}} {{.EnvVarSetCommand}} MINIO_CACHE_EXCLUDE{{.AssignmentOperator}}"bucket1/*;*.png"
|
||||
{{.Prompt}} {{.EnvVarSetCommand}} MINIO_CACHE_EXPIRY{{.AssignmentOperator}}40
|
||||
{{.Prompt}} {{.EnvVarSetCommand}} MINIO_CACHE_MAXUSE{{.AssignmentOperator}}80
|
||||
{{.Prompt}} {{.EnvVarSetCommand}} MINIO_CACHE_QUOTA{{.AssignmentOperator}}80
|
||||
{{.Prompt}} {{.HelpName}} hdfs://namenode:8200
|
||||
`
|
||||
|
||||
|
||||
@@ -56,7 +56,7 @@ ENVIRONMENT VARIABLES:
|
||||
MINIO_CACHE_DRIVES: List of mounted drives or directories delimited by ";".
|
||||
MINIO_CACHE_EXCLUDE: List of cache exclusion patterns delimited by ";".
|
||||
MINIO_CACHE_EXPIRY: Cache expiry duration in days.
|
||||
MINIO_CACHE_MAXUSE: Maximum permitted usage of the cache in percentage (0-100).
|
||||
MINIO_CACHE_QUOTA: Maximum permitted usage of the cache in percentage (0-100).
|
||||
|
||||
EXAMPLES:
|
||||
1. Start minio gateway server for NAS backend.
|
||||
@@ -70,7 +70,7 @@ EXAMPLES:
|
||||
{{.Prompt}} {{.EnvVarSetCommand}} MINIO_CACHE_DRIVES{{.AssignmentOperator}}"/mnt/drive1;/mnt/drive2;/mnt/drive3;/mnt/drive4"
|
||||
{{.Prompt}} {{.EnvVarSetCommand}} MINIO_CACHE_EXCLUDE{{.AssignmentOperator}}"bucket1/*;*.png"
|
||||
{{.Prompt}} {{.EnvVarSetCommand}} MINIO_CACHE_EXPIRY{{.AssignmentOperator}}40
|
||||
{{.Prompt}} {{.EnvVarSetCommand}} MINIO_CACHE_MAXUSE{{.AssignmentOperator}}80
|
||||
{{.Prompt}} {{.EnvVarSetCommand}} MINIO_CACHE_QUOTA{{.AssignmentOperator}}80
|
||||
{{.Prompt}} {{.HelpName}} /shared/nasvol
|
||||
`
|
||||
|
||||
|
||||
@@ -75,7 +75,7 @@ ENVIRONMENT VARIABLES:
|
||||
MINIO_CACHE_DRIVES: List of mounted drives or directories delimited by ";".
|
||||
MINIO_CACHE_EXCLUDE: List of cache exclusion patterns delimited by ";".
|
||||
MINIO_CACHE_EXPIRY: Cache expiry duration in days.
|
||||
MINIO_CACHE_MAXUSE: Maximum permitted usage of the cache in percentage (0-100).
|
||||
MINIO_CACHE_QUOTA: Maximum permitted usage of the cache in percentage (0-100).
|
||||
|
||||
EXAMPLES:
|
||||
1. Start minio gateway server for Aliyun OSS backend.
|
||||
@@ -94,7 +94,7 @@ EXAMPLES:
|
||||
{{.Prompt}} {{.EnvVarSetCommand}} MINIO_CACHE_DRIVES{{.AssignmentOperator}}"/mnt/drive1;/mnt/drive2;/mnt/drive3;/mnt/drive4"
|
||||
{{.Prompt}} {{.EnvVarSetCommand}} MINIO_CACHE_EXCLUDE{{.AssignmentOperator}}"bucket1/*;*.png"
|
||||
{{.Prompt}} {{.EnvVarSetCommand}} MINIO_CACHE_EXPIRY{{.AssignmentOperator}}40
|
||||
{{.Prompt}} {{.EnvVarSetCommand}} MINIO_CACHE_MAXUSE{{.AssignmentOperator}}80
|
||||
{{.Prompt}} {{.EnvVarSetCommand}} MINIO_CACHE_QUOTA{{.AssignmentOperator}}80
|
||||
{{.Prompt}} {{.HelpName}}
|
||||
`
|
||||
|
||||
|
||||
@@ -70,7 +70,7 @@ ENVIRONMENT VARIABLES:
|
||||
MINIO_CACHE_DRIVES: List of mounted drives or directories delimited by ";".
|
||||
MINIO_CACHE_EXCLUDE: List of cache exclusion patterns delimited by ";".
|
||||
MINIO_CACHE_EXPIRY: Cache expiry duration in days.
|
||||
MINIO_CACHE_MAXUSE: Maximum permitted usage of the cache in percentage (0-100).
|
||||
MINIO_CACHE_QUOTA: Maximum permitted usage of the cache in percentage (0-100).
|
||||
|
||||
LOGGER:
|
||||
MINIO_LOGGER_HTTP_ENDPOINT: HTTP endpoint URL to log all incoming requests.
|
||||
@@ -98,7 +98,7 @@ EXAMPLES:
|
||||
{{.Prompt}} {{.EnvVarSetCommand}} MINIO_CACHE_DRIVES{{.AssignmentOperator}}"/mnt/drive1;/mnt/drive2;/mnt/drive3;/mnt/drive4"
|
||||
{{.Prompt}} {{.EnvVarSetCommand}} MINIO_CACHE_EXCLUDE{{.AssignmentOperator}}"bucket1/*;*.png"
|
||||
{{.Prompt}} {{.EnvVarSetCommand}} MINIO_CACHE_EXPIRY{{.AssignmentOperator}}40
|
||||
{{.Prompt}} {{.EnvVarSetCommand}} MINIO_CACHE_MAXUSE{{.AssignmentOperator}}80
|
||||
{{.Prompt}} {{.EnvVarSetCommand}} MINIO_CACHE_QUOTA{{.AssignmentOperator}}80
|
||||
{{.Prompt}} {{.HelpName}}
|
||||
|
||||
4. Start minio gateway server for AWS S3 backend using AWS environment variables.
|
||||
@@ -262,7 +262,7 @@ func (g *S3) NewGatewayLayer(creds auth.Credentials) (minio.ObjectLayer, error)
|
||||
s := s3Objects{
|
||||
Client: clnt,
|
||||
}
|
||||
// Enables single encyption of KMS is configured.
|
||||
// Enables single encryption of KMS is configured.
|
||||
if minio.GlobalKMS != nil {
|
||||
encS := s3EncObjects{s}
|
||||
|
||||
|
||||
@@ -196,7 +196,7 @@ func guessIsBrowserReq(req *http.Request) bool {
|
||||
return false
|
||||
}
|
||||
aType := getRequestAuthType(req)
|
||||
return strings.Contains(req.Header.Get("User-Agent"), "Mozilla") && globalIsBrowserEnabled &&
|
||||
return strings.Contains(req.Header.Get("User-Agent"), "Mozilla") && globalBrowserEnabled &&
|
||||
(aType == authTypeJWT || aType == authTypeAnonymous)
|
||||
}
|
||||
|
||||
|
||||
@@ -113,7 +113,7 @@ func TestGuessIsRPC(t *testing.T) {
|
||||
|
||||
// Tests browser request guess function.
|
||||
func TestGuessIsBrowser(t *testing.T) {
|
||||
globalIsBrowserEnabled = true
|
||||
globalBrowserEnabled = true
|
||||
if guessIsBrowserReq(nil) {
|
||||
t.Fatal("Unexpected return for nil request")
|
||||
}
|
||||
|
||||
@@ -25,13 +25,17 @@ import (
|
||||
|
||||
etcd "github.com/coreos/etcd/clientv3"
|
||||
humanize "github.com/dustin/go-humanize"
|
||||
"github.com/minio/minio/cmd/config/cache"
|
||||
"github.com/minio/minio/cmd/config/compress"
|
||||
xldap "github.com/minio/minio/cmd/config/identity/ldap"
|
||||
"github.com/minio/minio/cmd/config/identity/openid"
|
||||
"github.com/minio/minio/cmd/config/policy/opa"
|
||||
"github.com/minio/minio/cmd/config/storageclass"
|
||||
"github.com/minio/minio/cmd/crypto"
|
||||
xhttp "github.com/minio/minio/cmd/http"
|
||||
"github.com/minio/minio/pkg/auth"
|
||||
"github.com/minio/minio/pkg/certs"
|
||||
"github.com/minio/minio/pkg/dns"
|
||||
"github.com/minio/minio/pkg/iam/openid"
|
||||
iampolicy "github.com/minio/minio/pkg/iam/policy"
|
||||
"github.com/minio/minio/pkg/pubsub"
|
||||
)
|
||||
|
||||
@@ -118,16 +122,7 @@ var (
|
||||
globalGatewayName = ""
|
||||
|
||||
// This flag is set to 'true' by default
|
||||
globalIsBrowserEnabled = true
|
||||
|
||||
// This flag is set to 'true' when MINIO_BROWSER env is set.
|
||||
globalIsEnvBrowser = false
|
||||
|
||||
// Set to true if credentials were passed from env, default is false.
|
||||
globalIsEnvCreds = false
|
||||
|
||||
// This flag is set to 'true' when MINIO_REGION env is set.
|
||||
globalIsEnvRegion = false
|
||||
globalBrowserEnabled = true
|
||||
|
||||
// This flag is set to 'true' when MINIO_UPDATE env is set to 'off'. Default is false.
|
||||
globalInplaceUpdateDisabled = false
|
||||
@@ -154,6 +149,10 @@ var (
|
||||
|
||||
globalLifecycleSys *LifecycleSys
|
||||
|
||||
globalStorageClass storageclass.Config
|
||||
globalLDAPConfig xldap.Config
|
||||
globalOpenIDConfig openid.Config
|
||||
|
||||
// CA root certificates, a nil value means system certs pool will be used
|
||||
globalRootCAs *x509.CertPool
|
||||
|
||||
@@ -196,25 +195,15 @@ var (
|
||||
globalOperationTimeout = newDynamicTimeout(10*time.Minute /*30*/, 600*time.Second) // default timeout for general ops
|
||||
globalHealingTimeout = newDynamicTimeout(30*time.Minute /*1*/, 30*time.Minute) // timeout for healing related ops
|
||||
|
||||
globalIsEnvWORM bool
|
||||
// Is worm enabled
|
||||
globalWORMEnabled bool
|
||||
|
||||
// Is Disk Caching set up
|
||||
globalIsDiskCacheEnabled bool
|
||||
|
||||
// Disk cache drives
|
||||
globalCacheDrives []string
|
||||
globalCacheConfig cache.Config
|
||||
|
||||
// Disk cache excludes
|
||||
globalCacheExcludes []string
|
||||
|
||||
// Disk cache expiry
|
||||
globalCacheExpiry = 90
|
||||
// Max allowed disk cache percentage
|
||||
globalCacheMaxUse = 80
|
||||
// Initialized KMS configuration for disk cache
|
||||
globalCacheKMS crypto.KMS
|
||||
|
||||
// Allocated etcd endpoint for config and bucket DNS.
|
||||
globalEtcdClient *etcd.Client
|
||||
|
||||
@@ -235,11 +224,7 @@ var (
|
||||
globalAutoEncryption bool
|
||||
|
||||
// Is compression enabled?
|
||||
globalIsCompressionEnabled = false
|
||||
|
||||
// Include-list for compression.
|
||||
globalCompressExtensions = []string{".txt", ".log", ".csv", ".json", ".tar", ".xml", ".bin"}
|
||||
globalCompressMimeTypes = []string{"text/*", "application/json", "application/xml"}
|
||||
globalCompressConfig compress.Config
|
||||
|
||||
// Some standard object extensions which we strictly dis-allow for compression.
|
||||
standardExcludeCompressExtensions = []string{".gz", ".bz2", ".rar", ".zip", ".7z", ".xz", ".mp4", ".mkv", ".mov"}
|
||||
@@ -251,7 +236,7 @@ var (
|
||||
globalOpenIDValidators *openid.Validators
|
||||
|
||||
// OPA policy system.
|
||||
globalPolicyOPA *iampolicy.Opa
|
||||
globalPolicyOPA *opa.Opa
|
||||
|
||||
// Deployment ID - unique per deployment
|
||||
globalDeploymentID string
|
||||
@@ -272,15 +257,8 @@ var (
|
||||
// list. Feel free to add new relevant fields.
|
||||
func getGlobalInfo() (globalInfo map[string]interface{}) {
|
||||
globalInfo = map[string]interface{}{
|
||||
"isDistXL": globalIsDistXL,
|
||||
"isXL": globalIsXL,
|
||||
"isBrowserEnabled": globalIsBrowserEnabled,
|
||||
"isWorm": globalWORMEnabled,
|
||||
"isEnvBrowser": globalIsEnvBrowser,
|
||||
"isEnvCreds": globalIsEnvCreds,
|
||||
"isEnvRegion": globalIsEnvRegion,
|
||||
"isSSL": globalIsSSL,
|
||||
"serverRegion": globalServerRegion,
|
||||
"isWorm": globalWORMEnabled,
|
||||
"serverRegion": globalServerRegion,
|
||||
// Add more relevant global settings here.
|
||||
}
|
||||
|
||||
|
||||
@@ -47,7 +47,7 @@ func parseLocationConstraint(r *http.Request) (location string, s3Error APIError
|
||||
} // else for both err as nil or io.EOF
|
||||
location = locationConstraint.Location
|
||||
if location == "" {
|
||||
location = globalServerConfig.GetRegion()
|
||||
location = globalServerRegion
|
||||
}
|
||||
return location, ErrNone
|
||||
}
|
||||
@@ -55,7 +55,7 @@ func parseLocationConstraint(r *http.Request) (location string, s3Error APIError
|
||||
// Validates input location is same as configured region
|
||||
// of MinIO server.
|
||||
func isValidLocation(location string) bool {
|
||||
return globalServerConfig.GetRegion() == "" || globalServerConfig.GetRegion() == location
|
||||
return globalServerRegion == "" || globalServerRegion == location
|
||||
}
|
||||
|
||||
// Supported headers that needs to be extracted.
|
||||
@@ -191,9 +191,9 @@ func getReqAccessCred(r *http.Request, region string) (cred auth.Credentials) {
|
||||
if cred.AccessKey == "" {
|
||||
claims, owner, _ := webRequestAuthenticate(r)
|
||||
if owner {
|
||||
return globalServerConfig.GetCredential()
|
||||
return globalActiveCred
|
||||
}
|
||||
cred, _ = globalIAMSys.GetUser(claims.Subject)
|
||||
cred, _ = globalIAMSys.GetUser(claims.AccessKey())
|
||||
}
|
||||
return cred
|
||||
}
|
||||
@@ -204,7 +204,7 @@ func extractReqParams(r *http.Request) map[string]string {
|
||||
return nil
|
||||
}
|
||||
|
||||
region := globalServerConfig.GetRegion()
|
||||
region := globalServerRegion
|
||||
cred := getReqAccessCred(r, region)
|
||||
|
||||
// Success.
|
||||
|
||||
@@ -26,6 +26,8 @@ import (
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/minio/minio/cmd/config"
|
||||
)
|
||||
|
||||
// Tests validate bucket LocationConstraint.
|
||||
@@ -81,7 +83,7 @@ func TestIsValidLocationContraint(t *testing.T) {
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
globalServerConfig.SetRegion(testCase.serverConfigRegion)
|
||||
config.SetRegion(globalServerConfig, testCase.serverConfigRegion)
|
||||
_, actualCode := parseLocationConstraint(testCase.request)
|
||||
if testCase.expectedCode != actualCode {
|
||||
t.Errorf("Test %d: Expected the APIErrCode to be %d, but instead found %d", i+1, testCase.expectedCode, actualCode)
|
||||
|
||||
15
cmd/iam.go
15
cmd/iam.go
@@ -1125,7 +1125,7 @@ func (sys *IAMSys) IsAllowedSTS(args iampolicy.Args) bool {
|
||||
return combinedPolicy.IsAllowed(args)
|
||||
}
|
||||
|
||||
pname, ok := args.Claims[iampolicy.PolicyName]
|
||||
pname, ok := args.Claims[iamPolicyName()]
|
||||
if !ok {
|
||||
// When claims are set, it should have a "policy" field.
|
||||
return false
|
||||
@@ -1199,16 +1199,16 @@ func (sys *IAMSys) IsAllowed(args iampolicy.Args) bool {
|
||||
return ok
|
||||
}
|
||||
|
||||
// With claims set, we should do STS related checks and validation.
|
||||
if len(args.Claims) > 0 {
|
||||
return sys.IsAllowedSTS(args)
|
||||
}
|
||||
|
||||
// Policies don't apply to the owner.
|
||||
if args.IsOwner {
|
||||
return true
|
||||
}
|
||||
|
||||
// With claims set, we should do STS related checks and validation.
|
||||
if _, ok := args.Claims["aud"]; ok {
|
||||
return sys.IsAllowedSTS(args)
|
||||
}
|
||||
|
||||
policies, err := sys.PolicyDBGet(args.AccountName, false)
|
||||
if err != nil {
|
||||
logger.LogIf(context.Background(), err)
|
||||
@@ -1306,8 +1306,7 @@ func NewIAMSys() *IAMSys {
|
||||
// The default users system
|
||||
var utype UsersSysType
|
||||
switch {
|
||||
case globalServerConfig != nil &&
|
||||
globalServerConfig.LDAPServerConfig.ServerAddr != "":
|
||||
case globalLDAPConfig.Enabled:
|
||||
utype = LDAPUsersSysType
|
||||
default:
|
||||
utype = MinIOUsersSysType
|
||||
|
||||
132
cmd/jwt.go
132
cmd/jwt.go
@@ -55,24 +55,30 @@ func authenticateJWTUsers(accessKey, secretKey string, expiry time.Duration) (st
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
expiresAt := UTCNow().Add(expiry)
|
||||
return authenticateJWTUsersWithCredentials(passedCredential, expiresAt)
|
||||
}
|
||||
|
||||
serverCred := globalServerConfig.GetCredential()
|
||||
if serverCred.AccessKey != passedCredential.AccessKey {
|
||||
func authenticateJWTUsersWithCredentials(credentials auth.Credentials, expiresAt time.Time) (string, error) {
|
||||
serverCred := globalActiveCred
|
||||
if serverCred.AccessKey != credentials.AccessKey {
|
||||
var ok bool
|
||||
serverCred, ok = globalIAMSys.GetUser(accessKey)
|
||||
serverCred, ok = globalIAMSys.GetUser(credentials.AccessKey)
|
||||
if !ok {
|
||||
return "", errInvalidAccessKeyID
|
||||
}
|
||||
}
|
||||
|
||||
if !serverCred.Equal(passedCredential) {
|
||||
if !serverCred.Equal(credentials) {
|
||||
return "", errAuthentication
|
||||
}
|
||||
|
||||
jwt := jwtgo.NewWithClaims(jwtgo.SigningMethodHS512, jwtgo.StandardClaims{
|
||||
ExpiresAt: UTCNow().Add(expiry).Unix(),
|
||||
Subject: accessKey,
|
||||
})
|
||||
claims := jwtgo.MapClaims{}
|
||||
claims["exp"] = expiresAt.Unix()
|
||||
claims["sub"] = credentials.AccessKey
|
||||
claims["accessKey"] = credentials.AccessKey
|
||||
|
||||
jwt := jwtgo.NewWithClaims(jwtgo.SigningMethodHS512, claims)
|
||||
return jwt.SignedString([]byte(serverCred.SecretKey))
|
||||
}
|
||||
|
||||
@@ -82,7 +88,7 @@ func authenticateJWTAdmin(accessKey, secretKey string, expiry time.Duration) (st
|
||||
return "", err
|
||||
}
|
||||
|
||||
serverCred := globalServerConfig.GetCredential()
|
||||
serverCred := globalActiveCred
|
||||
|
||||
if serverCred.AccessKey != passedCredential.AccessKey {
|
||||
return "", errInvalidAccessKeyID
|
||||
@@ -92,10 +98,12 @@ func authenticateJWTAdmin(accessKey, secretKey string, expiry time.Duration) (st
|
||||
return "", errAuthentication
|
||||
}
|
||||
|
||||
jwt := jwtgo.NewWithClaims(jwtgo.SigningMethodHS512, jwtgo.StandardClaims{
|
||||
ExpiresAt: UTCNow().Add(expiry).Unix(),
|
||||
Subject: accessKey,
|
||||
})
|
||||
claims := jwtgo.MapClaims{}
|
||||
claims["exp"] = UTCNow().Add(expiry).Unix()
|
||||
claims["sub"] = passedCredential.AccessKey
|
||||
claims["accessKey"] = passedCredential.AccessKey
|
||||
|
||||
jwt := jwtgo.NewWithClaims(jwtgo.SigningMethodHS512, claims)
|
||||
return jwt.SignedString([]byte(serverCred.SecretKey))
|
||||
}
|
||||
|
||||
@@ -121,18 +129,29 @@ func webTokenCallback(jwtToken *jwtgo.Token) (interface{}, error) {
|
||||
return nil, errAuthentication
|
||||
}
|
||||
|
||||
if claims, ok := jwtToken.Claims.(*jwtgo.StandardClaims); ok {
|
||||
if claims.Subject == globalServerConfig.GetCredential().AccessKey {
|
||||
return []byte(globalServerConfig.GetCredential().SecretKey), nil
|
||||
if claimsPtr, ok := jwtToken.Claims.(*jwtgo.MapClaims); ok {
|
||||
claims := *claimsPtr
|
||||
accessKey, ok := claims["accessKey"].(string)
|
||||
if !ok {
|
||||
accessKey, ok = claims["sub"].(string)
|
||||
if !ok {
|
||||
return nil, errInvalidAccessKeyID
|
||||
}
|
||||
}
|
||||
if accessKey == globalActiveCred.AccessKey {
|
||||
return []byte(globalActiveCred.SecretKey), nil
|
||||
}
|
||||
if globalIAMSys == nil {
|
||||
return nil, errInvalidAccessKeyID
|
||||
}
|
||||
cred, ok := globalIAMSys.GetUser(claims.Subject)
|
||||
if !ok {
|
||||
return nil, errInvalidAccessKeyID
|
||||
if _, ok = claims["aud"].(string); !ok {
|
||||
cred, ok := globalIAMSys.GetUser(accessKey)
|
||||
if !ok {
|
||||
return nil, errInvalidAccessKeyID
|
||||
}
|
||||
return []byte(cred.SecretKey), nil
|
||||
}
|
||||
return []byte(cred.SecretKey), nil
|
||||
return []byte(globalActiveCred.SecretKey), nil
|
||||
}
|
||||
|
||||
return nil, errAuthentication
|
||||
@@ -141,6 +160,10 @@ func webTokenCallback(jwtToken *jwtgo.Token) (interface{}, error) {
|
||||
func parseJWTWithClaims(tokenString string, claims jwtgo.Claims) (*jwtgo.Token, error) {
|
||||
p := &jwtgo.Parser{
|
||||
SkipClaimsValidation: true,
|
||||
ValidMethods: []string{
|
||||
jwtgo.SigningMethodHS256.Alg(),
|
||||
jwtgo.SigningMethodHS512.Alg(),
|
||||
},
|
||||
}
|
||||
jwtToken, err := p.ParseWithClaims(tokenString, claims, webTokenCallback)
|
||||
if err != nil {
|
||||
@@ -161,62 +184,77 @@ func isAuthTokenValid(token string) bool {
|
||||
return err == nil
|
||||
}
|
||||
|
||||
func webTokenAuthenticate(token string) (standardClaims, bool, error) {
|
||||
var claims = jwtgo.StandardClaims{}
|
||||
func webTokenAuthenticate(token string) (mapClaims, bool, error) {
|
||||
var claims = jwtgo.MapClaims{}
|
||||
if token == "" {
|
||||
return standardClaims{claims}, false, errNoAuthToken
|
||||
return mapClaims{claims}, false, errNoAuthToken
|
||||
}
|
||||
|
||||
jwtToken, err := parseJWTWithClaims(token, &claims)
|
||||
if err != nil {
|
||||
return standardClaims{claims}, false, err
|
||||
return mapClaims{claims}, false, err
|
||||
}
|
||||
if !jwtToken.Valid {
|
||||
return standardClaims{claims}, false, errAuthentication
|
||||
return mapClaims{claims}, false, errAuthentication
|
||||
}
|
||||
owner := claims.Subject == globalServerConfig.GetCredential().AccessKey
|
||||
return standardClaims{claims}, owner, nil
|
||||
accessKey, ok := claims["accessKey"].(string)
|
||||
if !ok {
|
||||
accessKey, ok = claims["sub"].(string)
|
||||
if !ok {
|
||||
return mapClaims{claims}, false, errAuthentication
|
||||
}
|
||||
}
|
||||
owner := accessKey == globalActiveCred.AccessKey
|
||||
return mapClaims{claims}, owner, nil
|
||||
}
|
||||
|
||||
// jwt standardClaims
|
||||
type standardClaims struct {
|
||||
jwtgo.StandardClaims
|
||||
type mapClaims struct {
|
||||
jwtgo.MapClaims
|
||||
}
|
||||
|
||||
func (s standardClaims) Map() map[string]interface{} {
|
||||
m := make(map[string]interface{})
|
||||
m["sub"] = s.Subject
|
||||
m["iss"] = s.Issuer
|
||||
m["aud"] = s.Audience
|
||||
m["jti"] = s.Id
|
||||
return m
|
||||
func (m mapClaims) Map() map[string]interface{} {
|
||||
return m.MapClaims
|
||||
}
|
||||
|
||||
func (m mapClaims) AccessKey() string {
|
||||
claimSub, ok := m.MapClaims["accessKey"].(string)
|
||||
if !ok {
|
||||
claimSub, _ = m.MapClaims["sub"].(string)
|
||||
}
|
||||
return claimSub
|
||||
}
|
||||
|
||||
// Check if the request is authenticated.
|
||||
// Returns nil if the request is authenticated. errNoAuthToken if token missing.
|
||||
// Returns errAuthentication for all other errors.
|
||||
func webRequestAuthenticate(req *http.Request) (standardClaims, bool, error) {
|
||||
var claims = jwtgo.StandardClaims{}
|
||||
func webRequestAuthenticate(req *http.Request) (mapClaims, bool, error) {
|
||||
var claims = jwtgo.MapClaims{}
|
||||
tokStr, err := jwtreq.AuthorizationHeaderExtractor.ExtractToken(req)
|
||||
if err != nil {
|
||||
if err == jwtreq.ErrNoTokenInRequest {
|
||||
return standardClaims{claims}, false, errNoAuthToken
|
||||
return mapClaims{claims}, false, errNoAuthToken
|
||||
}
|
||||
return standardClaims{claims}, false, err
|
||||
return mapClaims{claims}, false, err
|
||||
}
|
||||
jwtToken, err := parseJWTWithClaims(tokStr, &claims)
|
||||
if err != nil {
|
||||
return standardClaims{claims}, false, err
|
||||
return mapClaims{claims}, false, err
|
||||
}
|
||||
if !jwtToken.Valid {
|
||||
return standardClaims{claims}, false, errAuthentication
|
||||
return mapClaims{claims}, false, errAuthentication
|
||||
}
|
||||
owner := claims.Subject == globalServerConfig.GetCredential().AccessKey
|
||||
return standardClaims{claims}, owner, nil
|
||||
accessKey, ok := claims["accessKey"].(string)
|
||||
if !ok {
|
||||
accessKey, ok = claims["sub"].(string)
|
||||
if !ok {
|
||||
return mapClaims{claims}, false, errAuthentication
|
||||
}
|
||||
}
|
||||
owner := accessKey == globalActiveCred.AccessKey
|
||||
return mapClaims{claims}, owner, nil
|
||||
}
|
||||
|
||||
func newAuthToken() string {
|
||||
cred := globalServerConfig.GetCredential()
|
||||
cred := globalActiveCred
|
||||
token, err := authenticateNode(cred.AccessKey, cred.SecretKey)
|
||||
logger.CriticalIf(context.Background(), err)
|
||||
return token
|
||||
|
||||
@@ -38,7 +38,8 @@ func testAuthenticate(authType string, t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatalf("Error getting new credentials: %s", err)
|
||||
}
|
||||
globalServerConfig.SetCredential(cred)
|
||||
|
||||
globalActiveCred = cred
|
||||
|
||||
// Define test cases.
|
||||
testCases := []struct {
|
||||
@@ -105,7 +106,7 @@ func TestWebRequestAuthenticate(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
creds := globalServerConfig.GetCredential()
|
||||
creds := globalActiveCred
|
||||
token, err := getTokenString(creds.AccessKey, creds.SecretKey)
|
||||
if err != nil {
|
||||
t.Fatalf("unable get token %s", err)
|
||||
@@ -159,7 +160,7 @@ func BenchmarkAuthenticateNode(b *testing.B) {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
creds := globalServerConfig.GetCredential()
|
||||
creds := globalActiveCred
|
||||
b.ResetTimer()
|
||||
b.ReportAllocs()
|
||||
for i := 0; i < b.N; i++ {
|
||||
@@ -177,7 +178,7 @@ func BenchmarkAuthenticateWeb(b *testing.B) {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
creds := globalServerConfig.GetCredential()
|
||||
creds := globalActiveCred
|
||||
b.ResetTimer()
|
||||
b.ReportAllocs()
|
||||
for i := 0; i < b.N; i++ {
|
||||
|
||||
@@ -41,7 +41,7 @@ func createLockTestServer(t *testing.T) (string, *lockRESTServer, string) {
|
||||
lockMap: make(map[string][]lockRequesterInfo),
|
||||
},
|
||||
}
|
||||
creds := globalServerConfig.GetCredential()
|
||||
creds := globalActiveCred
|
||||
token, err := authenticateNode(creds.AccessKey, creds.SecretKey)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
|
||||
@@ -19,6 +19,7 @@ package logger
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/minio/minio/cmd/config"
|
||||
"github.com/minio/minio/pkg/env"
|
||||
)
|
||||
|
||||
@@ -29,8 +30,9 @@ type Console struct {
|
||||
|
||||
// HTTP logger target
|
||||
type HTTP struct {
|
||||
Enabled bool `json:"enabled"`
|
||||
Endpoint string `json:"endpoint"`
|
||||
Enabled bool `json:"enabled"`
|
||||
Endpoint string `json:"endpoint"`
|
||||
AuthToken string `json:"authToken"`
|
||||
}
|
||||
|
||||
// Config console and http logger targets
|
||||
@@ -42,13 +44,30 @@ type Config struct {
|
||||
|
||||
// HTTP endpoint logger
|
||||
const (
|
||||
EnvLoggerHTTPEndpoint = "MINIO_LOGGER_HTTP_ENDPOINT"
|
||||
EnvAuditLoggerHTTPEndpoint = "MINIO_AUDIT_LOGGER_HTTP_ENDPOINT"
|
||||
Endpoint = "endpoint"
|
||||
AuthToken = "auth_token"
|
||||
|
||||
EnvLoggerHTTPEndpoint = "MINIO_LOGGER_HTTP_ENDPOINT"
|
||||
EnvLoggerHTTPAuthToken = "MINIO_LOGGER_HTTP_AUTH_TOKEN"
|
||||
|
||||
EnvLoggerHTTPAuditEndpoint = "MINIO_LOGGER_HTTP_AUDIT_ENDPOINT"
|
||||
EnvLoggerHTTPAuditAuthToken = "MINIO_LOGGER_HTTP_AUDIT_AUTH_TOKEN"
|
||||
)
|
||||
|
||||
// Default target name when no targets are found
|
||||
const (
|
||||
defaultTarget = "_"
|
||||
// Default KVS for loggerHTTP and loggerAuditHTTP
|
||||
var (
|
||||
DefaultKVS = config.KVS{
|
||||
config.State: config.StateOff,
|
||||
config.Comment: "This is a default HTTP logging configuration",
|
||||
Endpoint: "",
|
||||
AuthToken: "",
|
||||
}
|
||||
DefaultAuditKVS = config.KVS{
|
||||
config.State: config.StateOff,
|
||||
config.Comment: "This is a default HTTP Audit logging configuration",
|
||||
Endpoint: "",
|
||||
AuthToken: "",
|
||||
}
|
||||
)
|
||||
|
||||
// NewConfig - initialize new logger config.
|
||||
@@ -63,12 +82,12 @@ func NewConfig() Config {
|
||||
}
|
||||
|
||||
// Create an example HTTP logger
|
||||
cfg.HTTP[defaultTarget] = HTTP{
|
||||
cfg.HTTP[config.Default] = HTTP{
|
||||
Endpoint: "https://username:password@example.com/api",
|
||||
}
|
||||
|
||||
// Create an example Audit logger
|
||||
cfg.Audit[defaultTarget] = HTTP{
|
||||
cfg.Audit[config.Default] = HTTP{
|
||||
Endpoint: "https://username:password@example.com/api/audit",
|
||||
}
|
||||
|
||||
@@ -76,33 +95,148 @@ func NewConfig() Config {
|
||||
}
|
||||
|
||||
// LookupConfig - lookup logger config, override with ENVs if set.
|
||||
func LookupConfig(cfg Config) (Config, error) {
|
||||
if cfg.HTTP == nil {
|
||||
cfg.HTTP = make(map[string]HTTP)
|
||||
}
|
||||
if cfg.Audit == nil {
|
||||
cfg.Audit = make(map[string]HTTP)
|
||||
}
|
||||
func LookupConfig(scfg config.Config) (Config, error) {
|
||||
cfg := NewConfig()
|
||||
|
||||
envs := env.List(EnvLoggerHTTPEndpoint)
|
||||
var loggerTargets []string
|
||||
for _, k := range envs {
|
||||
target := strings.TrimPrefix(k, EnvLoggerHTTPEndpoint+defaultTarget)
|
||||
target := strings.TrimPrefix(k, EnvLoggerHTTPEndpoint+config.Default)
|
||||
if target == EnvLoggerHTTPEndpoint {
|
||||
target = defaultTarget
|
||||
target = config.Default
|
||||
}
|
||||
loggerTargets = append(loggerTargets, target)
|
||||
}
|
||||
|
||||
var loggerAuditTargets []string
|
||||
envs = env.List(EnvLoggerHTTPAuditEndpoint)
|
||||
for _, k := range envs {
|
||||
target := strings.TrimPrefix(k, EnvLoggerHTTPAuditEndpoint+config.Default)
|
||||
if target == EnvLoggerHTTPAuditEndpoint {
|
||||
target = config.Default
|
||||
}
|
||||
loggerAuditTargets = append(loggerAuditTargets, target)
|
||||
}
|
||||
|
||||
// List legacy ENVs if any.
|
||||
envs = env.List(EnvAuditLoggerHTTPEndpoint)
|
||||
for _, k := range envs {
|
||||
target := strings.TrimPrefix(k, EnvAuditLoggerHTTPEndpoint+config.Default)
|
||||
if target == EnvAuditLoggerHTTPEndpoint {
|
||||
target = config.Default
|
||||
}
|
||||
loggerAuditTargets = append(loggerAuditTargets, target)
|
||||
}
|
||||
|
||||
for starget, kv := range scfg[config.LoggerHTTPSubSys] {
|
||||
subSysTarget := config.LoggerHTTPSubSys
|
||||
if starget != config.Default {
|
||||
subSysTarget = config.LoggerHTTPSubSys + ":" + starget
|
||||
}
|
||||
if err := config.CheckValidKeys(subSysTarget, kv, DefaultKVS); err != nil {
|
||||
return cfg, err
|
||||
}
|
||||
|
||||
enabled, err := config.ParseBool(kv.Get(config.State))
|
||||
if err != nil {
|
||||
return cfg, err
|
||||
}
|
||||
if !enabled {
|
||||
continue
|
||||
}
|
||||
|
||||
endpointEnv := EnvLoggerHTTPEndpoint
|
||||
if starget != config.Default {
|
||||
endpointEnv = EnvLoggerHTTPEndpoint + config.Default + starget
|
||||
}
|
||||
authTokenEnv := EnvLoggerHTTPAuthToken
|
||||
if starget != config.Default {
|
||||
authTokenEnv = EnvLoggerHTTPAuthToken + config.Default + starget
|
||||
}
|
||||
cfg.HTTP[starget] = HTTP{
|
||||
Enabled: true,
|
||||
Endpoint: env.Get(endpointEnv, kv.Get(Endpoint)),
|
||||
AuthToken: env.Get(authTokenEnv, kv.Get(AuthToken)),
|
||||
}
|
||||
}
|
||||
|
||||
for starget, kv := range scfg[config.LoggerHTTPAuditSubSys] {
|
||||
subSysTarget := config.LoggerHTTPAuditSubSys
|
||||
if starget != config.Default {
|
||||
subSysTarget = config.LoggerHTTPAuditSubSys + config.SubSystemSeparator + starget
|
||||
}
|
||||
if err := config.CheckValidKeys(subSysTarget, kv, DefaultAuditKVS); err != nil {
|
||||
return cfg, err
|
||||
}
|
||||
|
||||
enabled, err := config.ParseBool(kv.Get(config.State))
|
||||
if err != nil {
|
||||
return cfg, err
|
||||
}
|
||||
if !enabled {
|
||||
continue
|
||||
}
|
||||
|
||||
endpointEnv := EnvLoggerHTTPAuditEndpoint
|
||||
if starget != config.Default {
|
||||
endpointEnv = EnvLoggerHTTPAuditEndpoint + config.Default + starget
|
||||
}
|
||||
legacyEndpointEnv := EnvAuditLoggerHTTPEndpoint
|
||||
if starget != config.Default {
|
||||
legacyEndpointEnv = EnvAuditLoggerHTTPEndpoint + config.Default + starget
|
||||
}
|
||||
endpoint := env.Get(legacyEndpointEnv, "")
|
||||
if endpoint == "" {
|
||||
endpoint = env.Get(endpointEnv, kv.Get(Endpoint))
|
||||
}
|
||||
authTokenEnv := EnvLoggerHTTPAuditAuthToken
|
||||
if starget != config.Default {
|
||||
authTokenEnv = EnvLoggerHTTPAuditAuthToken + config.Default + starget
|
||||
}
|
||||
cfg.HTTP[starget] = HTTP{
|
||||
Enabled: true,
|
||||
Endpoint: endpoint,
|
||||
AuthToken: env.Get(authTokenEnv, kv.Get(AuthToken)),
|
||||
}
|
||||
}
|
||||
|
||||
for _, target := range loggerTargets {
|
||||
endpointEnv := EnvLoggerHTTPEndpoint
|
||||
if target != config.Default {
|
||||
endpointEnv = EnvLoggerHTTPEndpoint + config.Default + target
|
||||
}
|
||||
authTokenEnv := EnvLoggerHTTPAuthToken
|
||||
if target != config.Default {
|
||||
authTokenEnv = EnvLoggerHTTPAuthToken + config.Default + target
|
||||
}
|
||||
cfg.HTTP[target] = HTTP{
|
||||
Enabled: true,
|
||||
Endpoint: env.Get(k, cfg.HTTP[target].Endpoint),
|
||||
Enabled: true,
|
||||
Endpoint: env.Get(endpointEnv, ""),
|
||||
AuthToken: env.Get(authTokenEnv, ""),
|
||||
}
|
||||
}
|
||||
aenvs := env.List(EnvAuditLoggerHTTPEndpoint)
|
||||
for _, k := range aenvs {
|
||||
target := strings.TrimPrefix(k, EnvAuditLoggerHTTPEndpoint+defaultTarget)
|
||||
if target == EnvAuditLoggerHTTPEndpoint {
|
||||
target = defaultTarget
|
||||
|
||||
for _, target := range loggerAuditTargets {
|
||||
endpointEnv := EnvLoggerHTTPAuditEndpoint
|
||||
if target != config.Default {
|
||||
endpointEnv = EnvLoggerHTTPAuditEndpoint + config.Default + target
|
||||
}
|
||||
cfg.Audit[target] = HTTP{
|
||||
Enabled: true,
|
||||
Endpoint: env.Get(k, cfg.Audit[target].Endpoint),
|
||||
legacyEndpointEnv := EnvAuditLoggerHTTPEndpoint
|
||||
if target != config.Default {
|
||||
legacyEndpointEnv = EnvAuditLoggerHTTPEndpoint + config.Default + target
|
||||
}
|
||||
endpoint := env.Get(legacyEndpointEnv, "")
|
||||
if endpoint == "" {
|
||||
endpoint = env.Get(endpointEnv, "")
|
||||
}
|
||||
authTokenEnv := EnvLoggerHTTPAuditAuthToken
|
||||
if target != config.Default {
|
||||
authTokenEnv = EnvLoggerHTTPAuditAuthToken + config.Default + target
|
||||
}
|
||||
cfg.HTTP[target] = HTTP{
|
||||
Enabled: true,
|
||||
Endpoint: endpoint,
|
||||
AuthToken: env.Get(authTokenEnv, ""),
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
36
cmd/logger/help.go
Normal file
36
cmd/logger/help.go
Normal file
@@ -0,0 +1,36 @@
|
||||
/*
|
||||
* MinIO Cloud Storage, (C) 2019 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package logger
|
||||
|
||||
import "github.com/minio/minio/cmd/config"
|
||||
|
||||
// Help template for logger http and audit
|
||||
var (
|
||||
Help = config.HelpKV{
|
||||
Endpoint: `HTTP logger endpoint eg: "http://localhost:8080/minio/logs/server"`,
|
||||
AuthToken: "Authorization token for logger endpoint",
|
||||
config.State: "Indicates if HTTP logger is enabled or not",
|
||||
config.Comment: "A comment to describe the HTTP logger setting",
|
||||
}
|
||||
|
||||
HelpAudit = config.HelpKV{
|
||||
Endpoint: `HTTP Audit logger endpoint eg: "http://localhost:8080/minio/logs/audit"`,
|
||||
AuthToken: "Authorization token for logger endpoint",
|
||||
config.State: "Indicates if HTTP Audit logger is enabled or not",
|
||||
config.Comment: "A comment to describe the HTTP Audit logger setting",
|
||||
}
|
||||
)
|
||||
54
cmd/logger/legacy.go
Normal file
54
cmd/logger/legacy.go
Normal file
@@ -0,0 +1,54 @@
|
||||
/*
|
||||
* MinIO Cloud Storage, (C) 2019 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package logger
|
||||
|
||||
import "github.com/minio/minio/cmd/config"
|
||||
|
||||
// Legacy envs
|
||||
const (
|
||||
EnvAuditLoggerHTTPEndpoint = "MINIO_AUDIT_LOGGER_HTTP_ENDPOINT"
|
||||
)
|
||||
|
||||
// SetLoggerHTTPAudit - helper for migrating older config to newer KV format.
|
||||
func SetLoggerHTTPAudit(scfg config.Config, k string, args HTTP) {
|
||||
scfg[config.LoggerHTTPAuditSubSys][k] = config.KVS{
|
||||
config.State: func() string {
|
||||
if args.Enabled {
|
||||
return config.StateOn
|
||||
}
|
||||
return config.StateOff
|
||||
}(),
|
||||
config.Comment: "Settings for HTTP Audit logging, after migrating config",
|
||||
Endpoint: args.Endpoint,
|
||||
AuthToken: args.AuthToken,
|
||||
}
|
||||
}
|
||||
|
||||
// SetLoggerHTTP helper for migrating older config to newer KV format.
|
||||
func SetLoggerHTTP(scfg config.Config, k string, args HTTP) {
|
||||
scfg[config.LoggerHTTPSubSys][k] = config.KVS{
|
||||
config.State: func() string {
|
||||
if args.Enabled {
|
||||
return config.StateOn
|
||||
}
|
||||
return config.StateOff
|
||||
}(),
|
||||
config.Comment: "Settings for HTTP logging, after migrating config",
|
||||
Endpoint: args.Endpoint,
|
||||
AuthToken: args.AuthToken,
|
||||
}
|
||||
}
|
||||
@@ -31,6 +31,8 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/klauspost/compress/zip"
|
||||
"github.com/minio/minio/cmd/config"
|
||||
"github.com/minio/minio/cmd/config/notify"
|
||||
"github.com/minio/minio/cmd/crypto"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/event"
|
||||
@@ -53,7 +55,7 @@ type NotificationSys struct {
|
||||
// GetARNList - returns available ARNs.
|
||||
func (sys *NotificationSys) GetARNList() []string {
|
||||
arns := []string{}
|
||||
region := globalServerConfig.GetRegion()
|
||||
region := globalServerRegion
|
||||
for _, targetID := range sys.targetList.List() {
|
||||
// httpclient target is part of ListenBucketNotification
|
||||
// which doesn't need to be listed as part of the ARN list
|
||||
@@ -1096,8 +1098,12 @@ func (sys *NotificationSys) NetworkInfo() []madmin.ServerNetworkHardwareInfo {
|
||||
}
|
||||
|
||||
// NewNotificationSys - creates new notification system object.
|
||||
func NewNotificationSys(config *serverConfig, endpoints EndpointList) *NotificationSys {
|
||||
targetList := getNotificationTargets(config)
|
||||
func NewNotificationSys(cfg config.Config, endpoints EndpointList) *NotificationSys {
|
||||
targetList, err := notify.GetNotificationTargets(cfg, GlobalServiceDoneCh, globalRootCAs)
|
||||
if err != nil {
|
||||
logger.FatalIf(err, "Unable to start notification sub system")
|
||||
}
|
||||
|
||||
remoteHosts := getRemoteHosts(endpoints)
|
||||
remoteClients := getRestClients(remoteHosts)
|
||||
|
||||
@@ -1232,7 +1238,7 @@ func readNotificationConfig(ctx context.Context, objAPI ObjectLayer, bucketName
|
||||
return nil, err
|
||||
}
|
||||
|
||||
config, err := event.ParseConfig(bytes.NewReader(configData), globalServerConfig.GetRegion(), globalNotificationSys.targetList)
|
||||
config, err := event.ParseConfig(bytes.NewReader(configData), globalServerRegion, globalNotificationSys.targetList)
|
||||
logger.LogIf(ctx, err)
|
||||
return config, err
|
||||
}
|
||||
|
||||
@@ -36,6 +36,7 @@ import (
|
||||
"github.com/klauspost/compress/s2"
|
||||
"github.com/klauspost/readahead"
|
||||
"github.com/minio/minio-go/v6/pkg/s3utils"
|
||||
"github.com/minio/minio/cmd/config/compress"
|
||||
"github.com/minio/minio/cmd/config/storageclass"
|
||||
"github.com/minio/minio/cmd/crypto"
|
||||
xhttp "github.com/minio/minio/cmd/http"
|
||||
@@ -378,17 +379,17 @@ func (o ObjectInfo) GetActualSize() int64 {
|
||||
// Using compression and encryption together enables room for side channel attacks.
|
||||
// Eliminate non-compressible objects by extensions/content-types.
|
||||
func isCompressible(header http.Header, object string) bool {
|
||||
if crypto.IsRequested(header) || excludeForCompression(header, object) {
|
||||
if crypto.IsRequested(header) || excludeForCompression(header, object, globalCompressConfig) {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Eliminate the non-compressible objects.
|
||||
func excludeForCompression(header http.Header, object string) bool {
|
||||
func excludeForCompression(header http.Header, object string, cfg compress.Config) bool {
|
||||
objStr := object
|
||||
contentType := header.Get(xhttp.ContentType)
|
||||
if !globalIsCompressionEnabled {
|
||||
if !cfg.Enabled {
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -398,12 +399,12 @@ func excludeForCompression(header http.Header, object string) bool {
|
||||
}
|
||||
|
||||
// Filter compression includes.
|
||||
if len(globalCompressExtensions) == 0 || len(globalCompressMimeTypes) == 0 {
|
||||
if len(cfg.Extensions) == 0 || len(cfg.MimeTypes) == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
extensions := globalCompressExtensions
|
||||
mimeTypes := globalCompressMimeTypes
|
||||
extensions := cfg.Extensions
|
||||
mimeTypes := cfg.MimeTypes
|
||||
if hasStringSuffixInSlice(objStr, extensions) || hasPattern(mimeTypes, contentType) {
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -25,6 +25,7 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/klauspost/compress/s2"
|
||||
"github.com/minio/minio/cmd/config/compress"
|
||||
"github.com/minio/minio/cmd/crypto"
|
||||
)
|
||||
|
||||
@@ -417,9 +418,9 @@ func TestExcludeForCompression(t *testing.T) {
|
||||
},
|
||||
}
|
||||
for i, test := range testCases {
|
||||
globalIsCompressionEnabled = true
|
||||
got := excludeForCompression(test.header, test.object)
|
||||
globalIsCompressionEnabled = false
|
||||
got := excludeForCompression(test.header, test.object, compress.Config{
|
||||
Enabled: true,
|
||||
})
|
||||
if got != test.result {
|
||||
t.Errorf("Test %d - expected %v but received %v",
|
||||
i+1, test.result, got)
|
||||
|
||||
@@ -592,7 +592,7 @@ func getCpObjMetadataFromHeader(ctx context.Context, r *http.Request, userMeta m
|
||||
// Returns a minio-go Client configured to access remote host described by destDNSRecord
|
||||
// Applicable only in a federated deployment
|
||||
var getRemoteInstanceClient = func(r *http.Request, host string) (*miniogo.Core, error) {
|
||||
cred := getReqAccessCred(r, globalServerConfig.GetRegion())
|
||||
cred := getReqAccessCred(r, globalServerRegion)
|
||||
// In a federated deployment, all the instances share config files
|
||||
// and hence expected to have same credentials.
|
||||
core, err := miniogo.NewCore(host, cred.AccessKey, cred.SecretKey, globalIsSSL)
|
||||
@@ -1161,7 +1161,7 @@ func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Req
|
||||
}
|
||||
|
||||
case authTypePresigned, authTypeSigned:
|
||||
if s3Err = reqSignatureV4Verify(r, globalServerConfig.GetRegion(), serviceS3); s3Err != ErrNone {
|
||||
if s3Err = reqSignatureV4Verify(r, globalServerRegion, serviceS3); s3Err != ErrNone {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
@@ -1840,7 +1840,7 @@ func (api objectAPIHandlers) PutObjectPartHandler(w http.ResponseWriter, r *http
|
||||
return
|
||||
}
|
||||
case authTypePresigned, authTypeSigned:
|
||||
if s3Error = reqSignatureV4Verify(r, globalServerConfig.GetRegion(), serviceS3); s3Error != ErrNone {
|
||||
if s3Error = reqSignatureV4Verify(r, globalServerRegion, serviceS3); s3Error != ErrNone {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -61,7 +61,7 @@ func getServerInfo() (*ServerInfoData, error) {
|
||||
CommitID: CommitID,
|
||||
DeploymentID: globalDeploymentID,
|
||||
SQSARN: globalNotificationSys.GetARNList(),
|
||||
Region: globalServerConfig.GetRegion(),
|
||||
Region: globalServerRegion,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -132,7 +132,7 @@ func testPostPolicyBucketHandler(obj ObjectLayer, instanceType string, t TestErr
|
||||
// Register the API end points with XL/FS object layer.
|
||||
apiRouter := initTestAPIEndPoints(obj, []string{"PostPolicy"})
|
||||
|
||||
credentials := globalServerConfig.GetCredential()
|
||||
credentials := globalActiveCred
|
||||
|
||||
curTime := UTCNow()
|
||||
curTimePlus5Min := curTime.Add(time.Minute * 5)
|
||||
@@ -446,7 +446,7 @@ func testPostPolicyBucketHandlerRedirect(obj ObjectLayer, instanceType string, t
|
||||
// Register the API end points with XL/FS object layer.
|
||||
apiRouter := initTestAPIEndPoints(obj, []string{"PostPolicy"})
|
||||
|
||||
credentials := globalServerConfig.GetCredential()
|
||||
credentials := globalActiveCred
|
||||
|
||||
curTime := UTCNow()
|
||||
curTimePlus5Min := curTime.Add(time.Minute * 5)
|
||||
|
||||
@@ -113,7 +113,7 @@ func configureServerHandler(endpoints EndpointList) (http.Handler, error) {
|
||||
registerMetricsRouter(router)
|
||||
|
||||
// Register web router when its enabled.
|
||||
if globalIsBrowserEnabled {
|
||||
if globalBrowserEnabled {
|
||||
if err := registerWebRouter(router); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -31,6 +31,7 @@ import (
|
||||
"github.com/minio/minio/cmd/config"
|
||||
xhttp "github.com/minio/minio/cmd/http"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/auth"
|
||||
"github.com/minio/minio/pkg/certs"
|
||||
"github.com/minio/minio/pkg/color"
|
||||
"github.com/minio/minio/pkg/env"
|
||||
@@ -94,10 +95,10 @@ ENVIRONMENT VARIABLES:
|
||||
MINIO_ETCD_ENDPOINTS: To enable bucket DNS requests, set this value to list of etcd endpoints delimited by ",".
|
||||
|
||||
KMS:
|
||||
MINIO_SSE_VAULT_ENDPOINT: To enable Vault as KMS,set this value to Vault endpoint.
|
||||
MINIO_SSE_VAULT_APPROLE_ID: To enable Vault as KMS,set this value to Vault AppRole ID.
|
||||
MINIO_SSE_VAULT_APPROLE_SECRET: To enable Vault as KMS,set this value to Vault AppRole Secret ID.
|
||||
MINIO_SSE_VAULT_KEY_NAME: To enable Vault as KMS,set this value to Vault encryption key-ring name.
|
||||
MINIO_KMS_VAULT_ENDPOINT: To enable Vault as KMS,set this value to Vault endpoint.
|
||||
MINIO_KMS_VAULT_APPROLE_ID: To enable Vault as KMS,set this value to Vault AppRole ID.
|
||||
MINIO_KMS_VAULT_APPROLE_SECRET: To enable Vault as KMS,set this value to Vault AppRole Secret ID.
|
||||
MINIO_KMS_VAULT_KEY_NAME: To enable Vault as KMS,set this value to Vault encryption key-ring name.
|
||||
|
||||
EXAMPLES:
|
||||
1. Start minio server on "/home/shared" directory.
|
||||
@@ -119,10 +120,10 @@ EXAMPLES:
|
||||
{{.Prompt}} {{.HelpName}} http://node{1...32}.example.com/mnt/export/{1...32}
|
||||
|
||||
6. Start minio server with KMS enabled.
|
||||
{{.Prompt}} {{.EnvVarSetCommand}} MINIO_SSE_VAULT_APPROLE_ID{{.AssignmentOperator}}9b56cc08-8258-45d5-24a3-679876769126
|
||||
{{.Prompt}} {{.EnvVarSetCommand}} MINIO_SSE_VAULT_APPROLE_SECRET{{.AssignmentOperator}}4e30c52f-13e4-a6f5-0763-d50e8cb4321f
|
||||
{{.Prompt}} {{.EnvVarSetCommand}} MINIO_SSE_VAULT_ENDPOINT{{.AssignmentOperator}}https://vault-endpoint-ip:8200
|
||||
{{.Prompt}} {{.EnvVarSetCommand}} MINIO_SSE_VAULT_KEY_NAME{{.AssignmentOperator}}my-minio-key
|
||||
{{.Prompt}} {{.EnvVarSetCommand}} MINIO_KMS_VAULT_APPROLE_ID{{.AssignmentOperator}}9b56cc08-8258-45d5-24a3-679876769126
|
||||
{{.Prompt}} {{.EnvVarSetCommand}} MINIO_KMS_VAULT_APPROLE_SECRET{{.AssignmentOperator}}4e30c52f-13e4-a6f5-0763-d50e8cb4321f
|
||||
{{.Prompt}} {{.EnvVarSetCommand}} MINIO_KMS_VAULT_ENDPOINT{{.AssignmentOperator}}https://vault-endpoint-ip:8200
|
||||
{{.Prompt}} {{.EnvVarSetCommand}} MINIO_KMS_VAULT_KEY_NAME{{.AssignmentOperator}}my-minio-key
|
||||
{{.Prompt}} {{.HelpName}} /home/shared
|
||||
`,
|
||||
}
|
||||
@@ -181,12 +182,16 @@ func serverHandleEnvVars() {
|
||||
// Handle common environment variables.
|
||||
handleCommonEnvVars()
|
||||
|
||||
if serverRegion := env.Get("MINIO_REGION", ""); serverRegion != "" {
|
||||
// region Envs are set globally.
|
||||
globalIsEnvRegion = true
|
||||
globalServerRegion = serverRegion
|
||||
accessKey := env.Get(config.EnvAccessKey, "")
|
||||
secretKey := env.Get(config.EnvSecretKey, "")
|
||||
if accessKey != "" && secretKey != "" {
|
||||
cred, err := auth.CreateCredentials(accessKey, secretKey)
|
||||
if err != nil {
|
||||
logger.Fatal(config.ErrInvalidCredentials(err),
|
||||
"Unable to validate credentials inherited from the shell environment")
|
||||
}
|
||||
globalActiveCred = cred
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// serverMain handler called for 'minio server' command.
|
||||
@@ -231,38 +236,9 @@ func serverMain(ctx *cli.Context) {
|
||||
checkUpdate(getMinioMode())
|
||||
}
|
||||
|
||||
if globalIsDiskCacheEnabled {
|
||||
logger.StartupMessage(color.Red(color.Bold("Disk caching is allowed only for gateway deployments")))
|
||||
}
|
||||
|
||||
// FIXME: This code should be removed in future releases and we should have mandatory
|
||||
// check for ENVs credentials under distributed setup. Until all users migrate we
|
||||
// are intentionally providing backward compatibility.
|
||||
{
|
||||
// Check for backward compatibility and newer style.
|
||||
if !globalIsEnvCreds && globalIsDistXL {
|
||||
// Try to load old config file if any, for backward compatibility.
|
||||
var cfg = &serverConfig{}
|
||||
if _, err = Load(getConfigFile(), cfg); err == nil {
|
||||
globalActiveCred = cfg.Credential
|
||||
}
|
||||
|
||||
if os.IsNotExist(err) {
|
||||
if _, err = Load(getConfigFile()+".deprecated", cfg); err == nil {
|
||||
globalActiveCred = cfg.Credential
|
||||
}
|
||||
}
|
||||
|
||||
if globalActiveCred.IsValid() {
|
||||
// Credential is valid don't throw an error instead print a message regarding deprecation of 'config.json'
|
||||
// based model and proceed to use it for now in distributed setup.
|
||||
logger.Info(`Supplying credentials from your 'config.json' is **DEPRECATED**, Access key and Secret key in distributed server mode is expected to be specified with environment variables MINIO_ACCESS_KEY and MINIO_SECRET_KEY. This approach will become mandatory in future releases, please migrate to this approach soon.`)
|
||||
} else {
|
||||
// Credential is not available anywhere by both means, we cannot start distributed setup anymore, fail eagerly.
|
||||
logger.Fatal(config.ErrEnvCredentialsMissingDistributed(nil),
|
||||
"Unable to initialize the server in distributed mode")
|
||||
}
|
||||
}
|
||||
if !globalActiveCred.IsValid() && globalIsDistXL {
|
||||
logger.Fatal(config.ErrEnvCredentialsMissingDistributed(nil),
|
||||
"Unable to initialize the server in distributed mode")
|
||||
}
|
||||
|
||||
// Set system resources to maximum.
|
||||
@@ -335,6 +311,14 @@ func serverMain(ctx *cli.Context) {
|
||||
logger.Fatal(err, "Unable to initialize config system")
|
||||
}
|
||||
|
||||
if globalCacheConfig.Enabled {
|
||||
logger.StartupMessage(color.Red(color.Bold("Disk caching is recommended only for gateway deployments")))
|
||||
|
||||
// initialize the new disk cache objects.
|
||||
globalCacheObjectAPI, err = newServerCacheObjects(context.Background(), globalCacheConfig)
|
||||
logger.FatalIf(err, "Unable to initialize disk caching")
|
||||
}
|
||||
|
||||
// Create new IAM system.
|
||||
globalIAMSys = NewIAMSys()
|
||||
if err = globalIAMSys.Init(newObject); err != nil {
|
||||
@@ -390,6 +374,11 @@ func serverMain(ctx *cli.Context) {
|
||||
// Prints the formatted startup message once object layer is initialized.
|
||||
printStartupMessage(getAPIEndpoints())
|
||||
|
||||
if globalActiveCred.Equal(auth.DefaultCredentials) {
|
||||
msg := fmt.Sprintf("Detected default credentials '%s', please change the credentials immediately using 'MINIO_ACCESS_KEY' and 'MINIO_SECRET_KEY'", globalActiveCred)
|
||||
logger.StartupMessage(color.Red(color.Bold(msg)))
|
||||
}
|
||||
|
||||
// Set uptime time after object layer has initialized.
|
||||
globalBootTime = UTCNow()
|
||||
|
||||
|
||||
@@ -114,10 +114,10 @@ func stripStandardPorts(apiEndpoints []string) (newAPIEndpoints []string) {
|
||||
// Prints common server startup message. Prints credential, region and browser access.
|
||||
func printServerCommonMsg(apiEndpoints []string) {
|
||||
// Get saved credentials.
|
||||
cred := globalServerConfig.GetCredential()
|
||||
cred := globalActiveCred
|
||||
|
||||
// Get saved region.
|
||||
region := globalServerConfig.GetRegion()
|
||||
region := globalServerRegion
|
||||
|
||||
apiEndpointStr := strings.Join(apiEndpoints, " ")
|
||||
|
||||
@@ -132,7 +132,7 @@ func printServerCommonMsg(apiEndpoints []string) {
|
||||
}
|
||||
printEventNotifiers()
|
||||
|
||||
if globalIsBrowserEnabled {
|
||||
if globalBrowserEnabled {
|
||||
logStartupMessage(color.Blue("\nBrowser Access:"))
|
||||
logStartupMessage(fmt.Sprintf(getFormatStr(len(apiEndpointStr), 3), apiEndpointStr))
|
||||
}
|
||||
@@ -157,7 +157,7 @@ func printEventNotifiers() {
|
||||
// and custom platform specific message.
|
||||
func printCLIAccessMsg(endPoint string, alias string) {
|
||||
// Get saved credentials.
|
||||
cred := globalServerConfig.GetCredential()
|
||||
cred := globalActiveCred
|
||||
|
||||
// Configure 'mc', following block prints platform specific information for minio client.
|
||||
if color.IsTerminal() {
|
||||
|
||||
@@ -69,7 +69,7 @@ var resourceList = []string{
|
||||
}
|
||||
|
||||
func doesPolicySignatureV2Match(formValues http.Header) APIErrorCode {
|
||||
cred := globalServerConfig.GetCredential()
|
||||
cred := globalActiveCred
|
||||
accessKey := formValues.Get(xhttp.AmzAccessKeyID)
|
||||
cred, _, s3Err := checkKeyValid(accessKey)
|
||||
if s3Err != ErrNone {
|
||||
|
||||
@@ -52,8 +52,8 @@ func TestDoesPresignedV2SignatureMatch(t *testing.T) {
|
||||
now := UTCNow()
|
||||
|
||||
var (
|
||||
accessKey = globalServerConfig.GetCredential().AccessKey
|
||||
secretKey = globalServerConfig.GetCredential().SecretKey
|
||||
accessKey = globalActiveCred.AccessKey
|
||||
secretKey = globalActiveCred.SecretKey
|
||||
)
|
||||
testCases := []struct {
|
||||
queryParams map[string]string
|
||||
@@ -169,7 +169,7 @@ func TestValidateV2AuthHeader(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
accessID := globalServerConfig.GetCredential().AccessKey
|
||||
accessID := globalActiveCred.AccessKey
|
||||
testCases := []struct {
|
||||
authString string
|
||||
expectedError APIErrorCode
|
||||
@@ -248,7 +248,7 @@ func TestDoesPolicySignatureV2Match(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
creds := globalServerConfig.GetCredential()
|
||||
creds := globalActiveCred
|
||||
policy := "policy"
|
||||
testCases := []struct {
|
||||
accessKey string
|
||||
|
||||
@@ -123,7 +123,7 @@ func isValidRegion(reqRegion string, confRegion string) bool {
|
||||
// also returns if the access key is owner/admin.
|
||||
func checkKeyValid(accessKey string) (auth.Credentials, bool, APIErrorCode) {
|
||||
var owner = true
|
||||
var cred = globalServerConfig.GetCredential()
|
||||
var cred = globalActiveCred
|
||||
if cred.AccessKey != accessKey {
|
||||
if globalIAMSys == nil {
|
||||
return cred, false, ErrInvalidAccessKeyID
|
||||
|
||||
@@ -170,7 +170,7 @@ func compareSignatureV4(sig1, sig2 string) bool {
|
||||
// returns ErrNone if the signature matches.
|
||||
func doesPolicySignatureV4Match(formValues http.Header) APIErrorCode {
|
||||
// Server region.
|
||||
region := globalServerConfig.GetRegion()
|
||||
region := globalServerRegion
|
||||
|
||||
// Parse credential tag.
|
||||
credHeader, err := parseCredentialHeader("Credential="+formValues.Get(xhttp.AmzCredential), region, serviceS3)
|
||||
|
||||
@@ -37,7 +37,7 @@ func niceError(code APIErrorCode) string {
|
||||
func TestDoesPolicySignatureMatch(t *testing.T) {
|
||||
credentialTemplate := "%s/%s/%s/s3/aws4_request"
|
||||
now := UTCNow()
|
||||
accessKey := globalServerConfig.GetCredential().AccessKey
|
||||
accessKey := globalActiveCred.AccessKey
|
||||
|
||||
testCases := []struct {
|
||||
form http.Header
|
||||
@@ -73,7 +73,7 @@ func TestDoesPolicySignatureMatch(t *testing.T) {
|
||||
},
|
||||
"X-Amz-Date": []string{now.Format(iso8601Format)},
|
||||
"X-Amz-Signature": []string{
|
||||
getSignature(getSigningKey(globalServerConfig.GetCredential().SecretKey, now,
|
||||
getSignature(getSigningKey(globalActiveCred.SecretKey, now,
|
||||
globalMinioDefaultRegion, serviceS3), "policy"),
|
||||
},
|
||||
"Policy": []string{"policy"},
|
||||
@@ -106,8 +106,8 @@ func TestDoesPresignedSignatureMatch(t *testing.T) {
|
||||
now := UTCNow()
|
||||
credentialTemplate := "%s/%s/%s/s3/aws4_request"
|
||||
|
||||
region := globalServerConfig.GetRegion()
|
||||
accessKeyID := globalServerConfig.GetCredential().AccessKey
|
||||
region := globalServerRegion
|
||||
accessKeyID := globalActiveCred.AccessKey
|
||||
testCases := []struct {
|
||||
queryParams map[string]string
|
||||
headers map[string]string
|
||||
|
||||
@@ -24,6 +24,7 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/minio/minio/cmd/config"
|
||||
xnet "github.com/minio/minio/pkg/net"
|
||||
)
|
||||
|
||||
@@ -488,7 +489,7 @@ func testStorageAPIRenameFile(t *testing.T, storage StorageAPI) {
|
||||
}
|
||||
}
|
||||
|
||||
func newStorageRESTHTTPServerClient(t *testing.T) (*httptest.Server, *storageRESTClient, *serverConfig, string) {
|
||||
func newStorageRESTHTTPServerClient(t *testing.T) (*httptest.Server, *storageRESTClient, config.Config, string) {
|
||||
endpointPath, err := ioutil.TempDir("", ".TestStorageREST.")
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error %v", err)
|
||||
|
||||
@@ -73,7 +73,7 @@ func calculateSeedSignature(r *http.Request) (cred auth.Credentials, signature s
|
||||
v4Auth := req.Header.Get(xhttp.Authorization)
|
||||
|
||||
// Parse signature version '4' header.
|
||||
signV4Values, errCode := parseSignV4(v4Auth, globalServerConfig.GetRegion(), serviceS3)
|
||||
signV4Values, errCode := parseSignV4(v4Auth, globalServerRegion, serviceS3)
|
||||
if errCode != ErrNone {
|
||||
return cred, "", "", time.Time{}, errCode
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user