minio/cmd/config-current.go

670 lines
20 KiB
Go
Raw Normal View History

/*
* MinIO Cloud Storage, (C) 2016-2019 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"fmt"
"strings"
"sync"
"github.com/minio/minio/cmd/config"
"github.com/minio/minio/cmd/config/api"
"github.com/minio/minio/cmd/config/cache"
"github.com/minio/minio/cmd/config/compress"
"github.com/minio/minio/cmd/config/crawler"
"github.com/minio/minio/cmd/config/dns"
"github.com/minio/minio/cmd/config/etcd"
xldap "github.com/minio/minio/cmd/config/identity/ldap"
"github.com/minio/minio/cmd/config/identity/openid"
"github.com/minio/minio/cmd/config/notify"
"github.com/minio/minio/cmd/config/policy/opa"
"github.com/minio/minio/cmd/config/storageclass"
"github.com/minio/minio/cmd/crypto"
xhttp "github.com/minio/minio/cmd/http"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/cmd/logger/target/http"
"github.com/minio/minio/pkg/env"
"github.com/minio/minio/pkg/madmin"
)
func initHelp() {
var kvs = map[string]config.KVS{
config.EtcdSubSys: etcd.DefaultKVS,
config.CacheSubSys: cache.DefaultKVS,
config.CompressionSubSys: compress.DefaultKVS,
config.IdentityLDAPSubSys: xldap.DefaultKVS,
config.IdentityOpenIDSubSys: openid.DefaultKVS,
config.PolicyOPASubSys: opa.DefaultKVS,
config.RegionSubSys: config.DefaultRegionKVS,
config.APISubSys: api.DefaultKVS,
config.CredentialsSubSys: config.DefaultCredentialKVS,
config.KmsVaultSubSys: crypto.DefaultVaultKVS,
config.KmsKesSubSys: crypto.DefaultKesKVS,
config.LoggerWebhookSubSys: logger.DefaultKVS,
config.AuditWebhookSubSys: logger.DefaultAuditKVS,
config.CrawlerSubSys: crawler.DefaultKVS,
}
for k, v := range notify.DefaultNotificationKVS {
kvs[k] = v
}
if globalIsErasure {
kvs[config.StorageClassSubSys] = storageclass.DefaultKVS
}
config.RegisterDefaultKVS(kvs)
// Captures help for each sub-system
var helpSubSys = config.HelpKVS{
config.HelpKV{
Key: config.RegionSubSys,
Description: "label the location of the server",
},
config.HelpKV{
Key: config.CacheSubSys,
Description: "add caching storage tier",
},
config.HelpKV{
Key: config.CompressionSubSys,
Description: "enable server side compression of objects",
},
config.HelpKV{
Key: config.EtcdSubSys,
Description: "federate multiple clusters for IAM and Bucket DNS",
},
config.HelpKV{
Key: config.IdentityOpenIDSubSys,
Description: "enable OpenID SSO support",
},
config.HelpKV{
Key: config.IdentityLDAPSubSys,
Description: "enable LDAP SSO support",
},
config.HelpKV{
Key: config.PolicyOPASubSys,
Description: "[DEPRECATED] enable external OPA for policy enforcement",
},
config.HelpKV{
Key: config.KmsVaultSubSys,
Description: "enable external HashiCorp Vault key management service",
},
config.HelpKV{
Key: config.KmsKesSubSys,
Description: "enable external MinIO key encryption service",
},
config.HelpKV{
Key: config.APISubSys,
Description: "manage global HTTP API call specific features, such as throttling, authentication types, etc.",
},
config.HelpKV{
Key: config.CrawlerSubSys,
Description: "manage continuous disk crawling for bucket disk usage, lifecycle, quota and data integrity checks",
},
config.HelpKV{
Key: config.LoggerWebhookSubSys,
Description: "send server logs to webhook endpoints",
MultipleTargets: true,
},
config.HelpKV{
Key: config.AuditWebhookSubSys,
Description: "send audit logs to webhook endpoints",
MultipleTargets: true,
},
config.HelpKV{
Key: config.NotifyWebhookSubSys,
Description: "publish bucket notifications to webhook endpoints",
MultipleTargets: true,
},
config.HelpKV{
Key: config.NotifyAMQPSubSys,
Description: "publish bucket notifications to AMQP endpoints",
MultipleTargets: true,
},
config.HelpKV{
Key: config.NotifyKafkaSubSys,
Description: "publish bucket notifications to Kafka endpoints",
MultipleTargets: true,
},
config.HelpKV{
Key: config.NotifyMQTTSubSys,
Description: "publish bucket notifications to MQTT endpoints",
MultipleTargets: true,
},
config.HelpKV{
Key: config.NotifyNATSSubSys,
Description: "publish bucket notifications to NATS endpoints",
MultipleTargets: true,
},
config.HelpKV{
Key: config.NotifyNSQSubSys,
Description: "publish bucket notifications to NSQ endpoints",
MultipleTargets: true,
},
config.HelpKV{
Key: config.NotifyMySQLSubSys,
Description: "publish bucket notifications to MySQL databases",
MultipleTargets: true,
},
config.HelpKV{
Key: config.NotifyPostgresSubSys,
Description: "publish bucket notifications to Postgres databases",
MultipleTargets: true,
},
config.HelpKV{
Key: config.NotifyESSubSys,
Description: "publish bucket notifications to Elasticsearch endpoints",
MultipleTargets: true,
},
config.HelpKV{
Key: config.NotifyRedisSubSys,
Description: "publish bucket notifications to Redis datastores",
MultipleTargets: true,
},
}
if globalIsErasure {
helpSubSys = append(helpSubSys, config.HelpKV{})
copy(helpSubSys[2:], helpSubSys[1:])
helpSubSys[1] = config.HelpKV{
Key: config.StorageClassSubSys,
Description: "define object level redundancy",
}
}
var helpMap = map[string]config.HelpKVS{
"": helpSubSys, // Help for all sub-systems.
config.RegionSubSys: config.RegionHelp,
config.APISubSys: api.Help,
config.StorageClassSubSys: storageclass.Help,
config.EtcdSubSys: etcd.Help,
config.CacheSubSys: cache.Help,
config.CompressionSubSys: compress.Help,
config.CrawlerSubSys: crawler.Help,
config.IdentityOpenIDSubSys: openid.Help,
config.IdentityLDAPSubSys: xldap.Help,
config.PolicyOPASubSys: opa.Help,
config.KmsVaultSubSys: crypto.HelpVault,
config.KmsKesSubSys: crypto.HelpKes,
config.LoggerWebhookSubSys: logger.Help,
config.AuditWebhookSubSys: logger.HelpAudit,
config.NotifyAMQPSubSys: notify.HelpAMQP,
config.NotifyKafkaSubSys: notify.HelpKafka,
config.NotifyMQTTSubSys: notify.HelpMQTT,
config.NotifyNATSSubSys: notify.HelpNATS,
config.NotifyNSQSubSys: notify.HelpNSQ,
config.NotifyMySQLSubSys: notify.HelpMySQL,
config.NotifyPostgresSubSys: notify.HelpPostgres,
config.NotifyRedisSubSys: notify.HelpRedis,
config.NotifyWebhookSubSys: notify.HelpWebhook,
config.NotifyESSubSys: notify.HelpES,
}
config.RegisterHelpSubSys(helpMap)
}
var (
// globalServerConfig server config.
globalServerConfig config.Config
globalServerConfigMu sync.RWMutex
)
func validateConfig(s config.Config, setDriveCount int) error {
// Disable merging env values with config for validation.
env.SetEnvOff()
// Enable env values to validate KMS.
defer env.SetEnvOn()
if _, err := config.LookupCreds(s[config.CredentialsSubSys][config.Default]); err != nil {
return err
}
if _, err := config.LookupRegion(s[config.RegionSubSys][config.Default]); err != nil {
return err
}
if _, err := api.LookupConfig(s[config.APISubSys][config.Default]); err != nil {
return err
}
if globalIsErasure {
if _, err := storageclass.LookupConfig(s[config.StorageClassSubSys][config.Default], setDriveCount); err != nil {
return err
}
}
if _, err := cache.LookupConfig(s[config.CacheSubSys][config.Default]); err != nil {
return err
}
if _, err := compress.LookupConfig(s[config.CompressionSubSys][config.Default]); err != nil {
return err
}
if _, err := crawler.LookupConfig(s[config.CrawlerSubSys][config.Default]); err != nil {
return err
}
{
etcdCfg, err := etcd.LookupConfig(s[config.EtcdSubSys][config.Default], globalRootCAs)
if err != nil {
return err
}
if etcdCfg.Enabled {
etcdClnt, err := etcd.New(etcdCfg)
if err != nil {
return err
}
etcdClnt.Close()
}
}
{
kmsCfg, err := crypto.LookupConfig(s, globalCertsCADir.Get(), NewGatewayHTTPTransport())
if err != nil {
return err
}
// Set env to enable master key validation.
// this is needed only for KMS.
env.SetEnvOn()
if _, err = crypto.NewKMS(kmsCfg); err != nil {
return err
}
// Disable merging env values for the rest.
env.SetEnvOff()
}
if _, err := openid.LookupConfig(s[config.IdentityOpenIDSubSys][config.Default],
NewGatewayHTTPTransport(), xhttp.DrainBody); err != nil {
return err
}
{
cfg, err := xldap.Lookup(s[config.IdentityLDAPSubSys][config.Default],
globalRootCAs)
if err != nil {
return err
}
if cfg.Enabled {
conn, cerr := cfg.Connect()
if cerr != nil {
return cerr
}
conn.Close()
}
}
if _, err := opa.LookupConfig(s[config.PolicyOPASubSys][config.Default],
NewGatewayHTTPTransport(), xhttp.DrainBody); err != nil {
return err
}
if _, err := logger.LookupConfig(s); err != nil {
return err
}
certs: refactor cert manager to support multiple certificates (#10207) This commit refactors the certificate management implementation in the `certs` package such that multiple certificates can be specified at the same time. Therefore, the following layout of the `certs/` directory is expected: ``` certs/ │ ├─ public.crt ├─ private.key ├─ CAs/ // CAs directory is ignored │ │ │ ... │ ├─ example.com/ │ │ │ ├─ public.crt │ └─ private.key └─ foobar.org/ │ ├─ public.crt └─ private.key ... ``` However, directory names like `example.com` are just for human readability/organization and don't have any meaning w.r.t whether a particular certificate is served or not. This decision is made based on the SNI sent by the client and the SAN of the certificate. *** The `Manager` will pick a certificate based on the client trying to establish a TLS connection. In particular, it looks at the client hello (i.e. SNI) to determine which host the client tries to access. If the manager can find a certificate that matches the SNI it returns this certificate to the client. However, the client may choose to not send an SNI or tries to access a server directly via IP (`https://<ip>:<port>`). In this case, we cannot use the SNI to determine which certificate to serve. However, we also should not pick "the first" certificate that would be accepted by the client (based on crypto. parameters - like a signature algorithm) because it may be an internal certificate that contains internal hostnames. We would disclose internal infrastructure details doing so. Therefore, the `Manager` returns the "default" certificate when the client does not specify an SNI. The default certificate the top-level `public.crt` - i.e. `certs/public.crt`. This approach has some consequences: - It's the operator's responsibility to ensure that the top-level `public.crt` does not disclose any information (i.e. hostnames) that are not publicly visible. However, this was the case in the past already. - Any other `public.crt` - except for the top-level one - must not contain any IP SAN. The reason for this restriction is that the Manager cannot match a SNI to an IP b/c the SNI is the server host name. The entire purpose of SNI is to indicate which host the client tries to connect to when multiple hosts run on the same IP. So, a client will not set the SNI to an IP. If we would allow IP SANs in a lower-level `public.crt` a user would expect that it is possible to connect to MinIO directly via IP address and that the MinIO server would pick "the right" certificate. However, the MinIO server cannot determine which certificate to serve, and therefore always picks the "default" one. This may lead to all sorts of confusing errors like: "It works if I use `https:instance.minio.local` but not when I use `https://10.0.2.1`. These consequences/limitations should be pointed out / explained in our docs in an appropriate way. However, the support for multiple certificates should not have any impact on how deployment with a single certificate function today. Co-authored-by: Harshavardhana <harsha@minio.io>
2020-09-04 02:33:37 -04:00
return notify.TestNotificationTargets(GlobalContext, s, NewGatewayHTTPTransport(), globalNotificationSys.ConfiguredTargetIDs())
}
func lookupConfigs(s config.Config, setDriveCount int) {
ctx := GlobalContext
var err error
if !globalActiveCred.IsValid() {
// Env doesn't seem to be set, we fallback to lookup creds from the config.
globalActiveCred, err = config.LookupCreds(s[config.CredentialsSubSys][config.Default])
if err != nil {
logger.LogIf(ctx, fmt.Errorf("Invalid credentials configuration: %w", err))
}
}
if dnsURL, dnsUser, dnsPass, ok := env.LookupEnv(config.EnvDNSWebhook); ok {
globalDNSConfig, err = dns.NewOperatorDNS(dnsURL,
dns.Authentication(dnsUser, dnsPass),
dns.RootCAs(globalRootCAs))
if err != nil {
if globalIsGateway {
logger.FatalIf(err, "Unable to initialize remote webhook DNS config")
} else {
logger.LogIf(ctx, fmt.Errorf("Unable to initialize remote webhook DNS config %w", err))
}
}
}
etcdCfg, err := etcd.LookupConfig(s[config.EtcdSubSys][config.Default], globalRootCAs)
if err != nil {
if globalIsGateway {
logger.FatalIf(err, "Unable to initialize etcd config")
} else {
logger.LogIf(ctx, fmt.Errorf("Unable to initialize etcd config: %w", err))
}
}
if etcdCfg.Enabled {
if globalEtcdClient == nil {
globalEtcdClient, err = etcd.New(etcdCfg)
if err != nil {
if globalIsGateway {
logger.FatalIf(err, "Unable to initialize etcd config")
} else {
logger.LogIf(ctx, fmt.Errorf("Unable to initialize etcd config: %w", err))
}
}
}
if len(globalDomainNames) != 0 && !globalDomainIPs.IsEmpty() && globalEtcdClient != nil {
if globalDNSConfig != nil {
// if global DNS is already configured, indicate with a warning, incase
// users are confused.
logger.LogIf(ctx, fmt.Errorf("DNS store is already configured with %s, not using etcd for DNS store", globalDNSConfig))
} else {
globalDNSConfig, err = dns.NewCoreDNS(etcdCfg.Config,
dns.DomainNames(globalDomainNames),
dns.DomainIPs(globalDomainIPs),
dns.DomainPort(globalMinioPort),
dns.CoreDNSPath(etcdCfg.CoreDNSPath),
)
if err != nil {
if globalIsGateway {
logger.FatalIf(err, "Unable to initialize DNS config")
} else {
logger.LogIf(ctx, fmt.Errorf("Unable to initialize DNS config for %s: %w",
globalDomainNames, err))
}
}
}
}
}
// Bucket federation is 'true' only when IAM assets are not namespaced
// per tenant and all tenants interested in globally available users
// if namespace was requested such as specifying etcdPathPrefix then
// we assume that users are interested in global bucket support
// but not federation.
globalBucketFederation = etcdCfg.PathPrefix == "" && etcdCfg.Enabled
globalServerRegion, err = config.LookupRegion(s[config.RegionSubSys][config.Default])
if err != nil {
logger.LogIf(ctx, fmt.Errorf("Invalid region configuration: %w", err))
}
apiConfig, err := api.LookupConfig(s[config.APISubSys][config.Default])
if err != nil {
logger.LogIf(ctx, fmt.Errorf("Invalid api configuration: %w", err))
}
globalAPIConfig.init(apiConfig, setDriveCount)
// Initialize remote instance transport once.
getRemoteInstanceTransportOnce.Do(func() {
getRemoteInstanceTransport = newGatewayHTTPTransport(apiConfig.RemoteTransportDeadline)
})
if globalIsErasure {
globalStorageClass, err = storageclass.LookupConfig(s[config.StorageClassSubSys][config.Default], setDriveCount)
if err != nil {
logger.LogIf(ctx, fmt.Errorf("Unable to initialize storage class config: %w", err))
}
}
globalCacheConfig, err = cache.LookupConfig(s[config.CacheSubSys][config.Default])
if err != nil {
if globalIsGateway {
logger.FatalIf(err, "Unable to setup cache")
} else {
logger.LogIf(ctx, fmt.Errorf("Unable to setup cache: %w", err))
}
}
if globalCacheConfig.Enabled {
if cacheEncKey := env.Get(cache.EnvCacheEncryptionMasterKey, ""); cacheEncKey != "" {
globalCacheKMS, err = crypto.ParseMasterKey(cacheEncKey)
if err != nil {
logger.LogIf(ctx, fmt.Errorf("Unable to setup encryption cache: %w", err))
}
}
}
globalCrawlerConfig, err = crawler.LookupConfig(s[config.CrawlerSubSys][config.Default])
if err != nil {
logger.LogIf(ctx, fmt.Errorf("Unable to read crawler config: %w", err))
}
kmsCfg, err := crypto.LookupConfig(s, globalCertsCADir.Get(), NewGatewayHTTPTransport())
if err != nil {
logger.LogIf(ctx, fmt.Errorf("Unable to setup KMS config: %w", err))
}
GlobalKMS, err = crypto.NewKMS(kmsCfg)
if err != nil {
logger.LogIf(ctx, fmt.Errorf("Unable to setup KMS with current KMS config: %w", err))
}
// Enable auto-encryption if enabled
globalAutoEncryption = kmsCfg.AutoEncryption
if globalAutoEncryption && !globalIsGateway {
logger.LogIf(ctx, fmt.Errorf("%s env is deprecated please migrate to using `mc encrypt` at bucket level", crypto.EnvKMSAutoEncryption))
}
globalCompressConfig, err = compress.LookupConfig(s[config.CompressionSubSys][config.Default])
if err != nil {
logger.LogIf(ctx, fmt.Errorf("Unable to setup Compression: %w", err))
}
globalOpenIDConfig, err = openid.LookupConfig(s[config.IdentityOpenIDSubSys][config.Default],
NewGatewayHTTPTransport(), xhttp.DrainBody)
if err != nil {
logger.LogIf(ctx, fmt.Errorf("Unable to initialize OpenID: %w", err))
}
opaCfg, err := opa.LookupConfig(s[config.PolicyOPASubSys][config.Default],
NewGatewayHTTPTransport(), xhttp.DrainBody)
if err != nil {
logger.LogIf(ctx, fmt.Errorf("Unable to initialize OPA: %w", err))
}
globalOpenIDValidators = getOpenIDValidators(globalOpenIDConfig)
globalPolicyOPA = opa.New(opaCfg)
globalLDAPConfig, err = xldap.Lookup(s[config.IdentityLDAPSubSys][config.Default],
globalRootCAs)
if err != nil {
logger.LogIf(ctx, fmt.Errorf("Unable to parse LDAP configuration: %w", err))
}
// Load logger targets based on user's configuration
loggerUserAgent := getUserAgent(getMinioMode())
loggerCfg, err := logger.LookupConfig(s)
if err != nil {
logger.LogIf(ctx, fmt.Errorf("Unable to initialize logger: %w", err))
}
for _, l := range loggerCfg.HTTP {
if l.Enabled {
// Enable http logging
if err = logger.AddTarget(
http.New(http.WithEndpoint(l.Endpoint),
http.WithAuthToken(l.AuthToken),
http.WithUserAgent(loggerUserAgent),
http.WithLogKind(string(logger.All)),
http.WithTransport(NewGatewayHTTPTransport()),
),
); err != nil {
logger.LogIf(ctx, fmt.Errorf("Unable to initialize console HTTP target: %w", err))
}
}
}
for _, l := range loggerCfg.Audit {
if l.Enabled {
// Enable http audit logging
if err = logger.AddAuditTarget(
http.New(http.WithEndpoint(l.Endpoint),
http.WithAuthToken(l.AuthToken),
http.WithUserAgent(loggerUserAgent),
http.WithLogKind(string(logger.All)),
http.WithTransport(NewGatewayHTTPTransport()),
),
); err != nil {
logger.LogIf(ctx, fmt.Errorf("Unable to initialize audit HTTP target: %w", err))
}
}
}
certs: refactor cert manager to support multiple certificates (#10207) This commit refactors the certificate management implementation in the `certs` package such that multiple certificates can be specified at the same time. Therefore, the following layout of the `certs/` directory is expected: ``` certs/ │ ├─ public.crt ├─ private.key ├─ CAs/ // CAs directory is ignored │ │ │ ... │ ├─ example.com/ │ │ │ ├─ public.crt │ └─ private.key └─ foobar.org/ │ ├─ public.crt └─ private.key ... ``` However, directory names like `example.com` are just for human readability/organization and don't have any meaning w.r.t whether a particular certificate is served or not. This decision is made based on the SNI sent by the client and the SAN of the certificate. *** The `Manager` will pick a certificate based on the client trying to establish a TLS connection. In particular, it looks at the client hello (i.e. SNI) to determine which host the client tries to access. If the manager can find a certificate that matches the SNI it returns this certificate to the client. However, the client may choose to not send an SNI or tries to access a server directly via IP (`https://<ip>:<port>`). In this case, we cannot use the SNI to determine which certificate to serve. However, we also should not pick "the first" certificate that would be accepted by the client (based on crypto. parameters - like a signature algorithm) because it may be an internal certificate that contains internal hostnames. We would disclose internal infrastructure details doing so. Therefore, the `Manager` returns the "default" certificate when the client does not specify an SNI. The default certificate the top-level `public.crt` - i.e. `certs/public.crt`. This approach has some consequences: - It's the operator's responsibility to ensure that the top-level `public.crt` does not disclose any information (i.e. hostnames) that are not publicly visible. However, this was the case in the past already. - Any other `public.crt` - except for the top-level one - must not contain any IP SAN. The reason for this restriction is that the Manager cannot match a SNI to an IP b/c the SNI is the server host name. The entire purpose of SNI is to indicate which host the client tries to connect to when multiple hosts run on the same IP. So, a client will not set the SNI to an IP. If we would allow IP SANs in a lower-level `public.crt` a user would expect that it is possible to connect to MinIO directly via IP address and that the MinIO server would pick "the right" certificate. However, the MinIO server cannot determine which certificate to serve, and therefore always picks the "default" one. This may lead to all sorts of confusing errors like: "It works if I use `https:instance.minio.local` but not when I use `https://10.0.2.1`. These consequences/limitations should be pointed out / explained in our docs in an appropriate way. However, the support for multiple certificates should not have any impact on how deployment with a single certificate function today. Co-authored-by: Harshavardhana <harsha@minio.io>
2020-09-04 02:33:37 -04:00
globalConfigTargetList, err = notify.GetNotificationTargets(GlobalContext, s, NewGatewayHTTPTransport(), false)
if err != nil {
logger.LogIf(ctx, fmt.Errorf("Unable to initialize notification target(s): %w", err))
}
certs: refactor cert manager to support multiple certificates (#10207) This commit refactors the certificate management implementation in the `certs` package such that multiple certificates can be specified at the same time. Therefore, the following layout of the `certs/` directory is expected: ``` certs/ │ ├─ public.crt ├─ private.key ├─ CAs/ // CAs directory is ignored │ │ │ ... │ ├─ example.com/ │ │ │ ├─ public.crt │ └─ private.key └─ foobar.org/ │ ├─ public.crt └─ private.key ... ``` However, directory names like `example.com` are just for human readability/organization and don't have any meaning w.r.t whether a particular certificate is served or not. This decision is made based on the SNI sent by the client and the SAN of the certificate. *** The `Manager` will pick a certificate based on the client trying to establish a TLS connection. In particular, it looks at the client hello (i.e. SNI) to determine which host the client tries to access. If the manager can find a certificate that matches the SNI it returns this certificate to the client. However, the client may choose to not send an SNI or tries to access a server directly via IP (`https://<ip>:<port>`). In this case, we cannot use the SNI to determine which certificate to serve. However, we also should not pick "the first" certificate that would be accepted by the client (based on crypto. parameters - like a signature algorithm) because it may be an internal certificate that contains internal hostnames. We would disclose internal infrastructure details doing so. Therefore, the `Manager` returns the "default" certificate when the client does not specify an SNI. The default certificate the top-level `public.crt` - i.e. `certs/public.crt`. This approach has some consequences: - It's the operator's responsibility to ensure that the top-level `public.crt` does not disclose any information (i.e. hostnames) that are not publicly visible. However, this was the case in the past already. - Any other `public.crt` - except for the top-level one - must not contain any IP SAN. The reason for this restriction is that the Manager cannot match a SNI to an IP b/c the SNI is the server host name. The entire purpose of SNI is to indicate which host the client tries to connect to when multiple hosts run on the same IP. So, a client will not set the SNI to an IP. If we would allow IP SANs in a lower-level `public.crt` a user would expect that it is possible to connect to MinIO directly via IP address and that the MinIO server would pick "the right" certificate. However, the MinIO server cannot determine which certificate to serve, and therefore always picks the "default" one. This may lead to all sorts of confusing errors like: "It works if I use `https:instance.minio.local` but not when I use `https://10.0.2.1`. These consequences/limitations should be pointed out / explained in our docs in an appropriate way. However, the support for multiple certificates should not have any impact on how deployment with a single certificate function today. Co-authored-by: Harshavardhana <harsha@minio.io>
2020-09-04 02:33:37 -04:00
globalEnvTargetList, err = notify.GetNotificationTargets(GlobalContext, newServerConfig(), NewGatewayHTTPTransport(), true)
if err != nil {
logger.LogIf(ctx, fmt.Errorf("Unable to initialize notification target(s): %w", err))
}
}
2019-11-19 16:48:13 -05:00
// Help - return sub-system level help
type Help struct {
SubSys string `json:"subSys"`
Description string `json:"description"`
MultipleTargets bool `json:"multipleTargets"`
KeysHelp config.HelpKVS `json:"keysHelp"`
}
// GetHelp - returns help for sub-sys, a key for a sub-system or all the help.
2019-11-19 16:48:13 -05:00
func GetHelp(subSys, key string, envOnly bool) (Help, error) {
if len(subSys) == 0 {
return Help{KeysHelp: config.HelpSubSysMap[subSys]}, nil
}
subSystemValue := strings.SplitN(subSys, config.SubSystemSeparator, 2)
if len(subSystemValue) == 0 {
return Help{}, config.Errorf("invalid number of arguments %s", subSys)
}
subSys = subSystemValue[0]
subSysHelp, ok := config.HelpSubSysMap[""].Lookup(subSys)
if !ok {
return Help{}, config.Errorf("unknown sub-system %s", subSys)
}
h, ok := config.HelpSubSysMap[subSys]
if !ok {
return Help{}, config.Errorf("unknown sub-system %s", subSys)
}
if key != "" {
2019-11-19 16:48:13 -05:00
value, ok := h.Lookup(key)
if !ok {
return Help{}, config.Errorf("unknown key %s for sub-system %s",
key, subSys)
}
2019-11-19 16:48:13 -05:00
h = config.HelpKVS{value}
}
2019-11-19 16:48:13 -05:00
envHelp := config.HelpKVS{}
if envOnly {
// Only for multiple targets, make sure
// to list the ENV, for regular k/v EnableKey is
// implicit, for ENVs we cannot make it implicit.
if subSysHelp.MultipleTargets {
envK := config.EnvPrefix + strings.Join([]string{
strings.ToTitle(subSys), strings.ToTitle(madmin.EnableKey),
}, config.EnvWordDelimiter)
envHelp = append(envHelp, config.HelpKV{
Key: envK,
Description: fmt.Sprintf("enable %s target, default is 'off'", subSys),
Optional: false,
Type: "on|off",
})
}
2019-11-19 16:48:13 -05:00
for _, hkv := range h {
envK := config.EnvPrefix + strings.Join([]string{
2019-11-19 16:48:13 -05:00
strings.ToTitle(subSys), strings.ToTitle(hkv.Key),
}, config.EnvWordDelimiter)
2019-11-19 16:48:13 -05:00
envHelp = append(envHelp, config.HelpKV{
Key: envK,
Description: hkv.Description,
Optional: hkv.Optional,
Type: hkv.Type,
})
}
2019-11-19 16:48:13 -05:00
h = envHelp
}
return Help{
SubSys: subSys,
Description: subSysHelp.Description,
MultipleTargets: subSysHelp.MultipleTargets,
KeysHelp: h,
}, nil
}
func newServerConfig() config.Config {
return config.New()
}
// newSrvConfig - initialize a new server config, saves env parameters if
// found, otherwise use default parameters
func newSrvConfig(objAPI ObjectLayer) error {
// Initialize server config.
srvCfg := newServerConfig()
// hold the mutex lock before a new config is assigned.
globalServerConfigMu.Lock()
globalServerConfig = srvCfg
globalServerConfigMu.Unlock()
// Save config into file.
return saveServerConfig(GlobalContext, objAPI, globalServerConfig)
}
func getValidConfig(objAPI ObjectLayer) (config.Config, error) {
return readServerConfig(GlobalContext, objAPI)
}
// loadConfig - loads a new config from disk, overrides params
// from env if found and valid
func loadConfig(objAPI ObjectLayer) error {
srvCfg, err := getValidConfig(objAPI)
if err != nil {
return err
}
// Override any values from ENVs.
lookupConfigs(srvCfg, objAPI.SetDriveCount())
// hold the mutex lock before a new config is assigned.
globalServerConfigMu.Lock()
globalServerConfig = srvCfg
globalServerConfigMu.Unlock()
return nil
}
// getOpenIDValidators - returns ValidatorList which contains
// enabled providers in server config.
// A new authentication provider is added like below
// * Add a new provider in pkg/iam/openid package.
func getOpenIDValidators(cfg openid.Config) *openid.Validators {
validators := openid.NewValidators()
if cfg.JWKS.URL != nil {
validators.Add(openid.NewJWT(cfg))
}
return validators
}