mirror of https://github.com/minio/minio.git
Fix crash in console logger and also handle bucket DNS updates (#8654)
Also fix listenBucketNotification bugs seen by minio-js listen bucket notification API.
This commit is contained in:
parent
842d0241ed
commit
c8d82588c2
|
@ -36,7 +36,7 @@ matrix:
|
|||
- make verifiers
|
||||
- make crosscompile
|
||||
- make verify
|
||||
- cd browser && yarn && yarn test && cd ..
|
||||
- cd browser && npm install && npm run test && cd ..
|
||||
- bash -c 'shopt -s globstar; shellcheck mint/**/*.sh'
|
||||
|
||||
- os: windows
|
||||
|
@ -56,4 +56,4 @@ before_script:
|
|||
- if [[ "${TRAVIS_OS_NAME}" == "linux" ]]; then sudo sh -c 'echo 0 > /proc/sys/net/ipv6/conf/all/disable_ipv6'; fi
|
||||
|
||||
before_install:
|
||||
- if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then nvm install 11.10.1 ; fi
|
||||
- if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then nvm install stable ; fi
|
||||
|
|
|
@ -56,8 +56,12 @@ const (
|
|||
// - Range over all the available buckets
|
||||
// - Check if a bucket has an entry in etcd backend
|
||||
// -- If no, make an entry
|
||||
// -- If yes, check if the IP of entry matches local IP. This means entry is for this instance.
|
||||
// -- If IP of the entry doesn't match, this means entry is for another instance. Log an error to console.
|
||||
// -- If yes, check if the entry matches local IP check if we
|
||||
// need to update the entry then proceed to update
|
||||
// -- If yes, check if the IP of entry matches local IP.
|
||||
// This means entry is for this instance.
|
||||
// -- If IP of the entry doesn't match, this means entry is
|
||||
// for another instance. Log an error to console.
|
||||
func initFederatorBackend(buckets []BucketInfo, objLayer ObjectLayer) {
|
||||
if len(buckets) == 0 {
|
||||
return
|
||||
|
@ -85,11 +89,25 @@ func initFederatorBackend(buckets []BucketInfo, objLayer ObjectLayer) {
|
|||
}
|
||||
return gerr
|
||||
}
|
||||
if globalDomainIPs.Intersection(set.CreateStringSet(getHostsSlice(r)...)).IsEmpty() {
|
||||
// There is already an entry for this bucket, with all IP addresses different. This indicates a bucket name collision. Log an error and continue.
|
||||
return fmt.Errorf("Unable to add bucket DNS entry for bucket %s, an entry exists for the same bucket. Use one of these IP addresses %v to access the bucket", buckets[index].Name, globalDomainIPs.ToSlice())
|
||||
if !globalDomainIPs.Intersection(set.CreateStringSet(getHostsSlice(r)...)).IsEmpty() {
|
||||
if globalDomainIPs.Difference(set.CreateStringSet(getHostsSlice(r)...)).IsEmpty() {
|
||||
// No difference in terms of domainIPs and nothing
|
||||
// has changed so we don't change anything on the etcd.
|
||||
return nil
|
||||
}
|
||||
// if domain IPs intersect then it won't be an empty set.
|
||||
// such an intersection means that bucket exists on etcd.
|
||||
// but if we do see a difference with local domain IPs with
|
||||
// hostSlice from etcd then we should update with newer
|
||||
// domainIPs, we proceed to do that here.
|
||||
return globalDNSConfig.Put(buckets[index].Name)
|
||||
}
|
||||
return nil
|
||||
// No IPs seem to intersect, this means that bucket exists but has
|
||||
// different IP addresses perhaps from a different deployment.
|
||||
// bucket names are globally unique in federation at a given
|
||||
// path prefix, name collision is not allowed. We simply log
|
||||
// an error and continue.
|
||||
return fmt.Errorf("Unable to add bucket DNS entry for bucket %s, an entry exists for the same bucket. Use one of these IP addresses %v to access the bucket", buckets[index].Name, globalDomainIPs.ToSlice())
|
||||
}, index)
|
||||
}
|
||||
|
||||
|
@ -115,9 +133,11 @@ func initFederatorBackend(buckets []BucketInfo, objLayer ObjectLayer) {
|
|||
return nil
|
||||
}
|
||||
|
||||
// We go to here, so we know the bucket no longer exists, but is registered in DNS to this server
|
||||
// We go to here, so we know the bucket no longer exists,
|
||||
// but is registered in DNS to this server
|
||||
if err := globalDNSConfig.DeleteRecord(dnsBuckets[index]); err != nil {
|
||||
return fmt.Errorf("Failed to remove DNS entry for %s due to %w", dnsBuckets[index].Key, err)
|
||||
return fmt.Errorf("Failed to remove DNS entry for %s due to %w",
|
||||
dnsBuckets[index].Key, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
|
|
@ -31,8 +31,6 @@ import (
|
|||
xhttp "github.com/minio/minio/cmd/http"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/event"
|
||||
"github.com/minio/minio/pkg/handlers"
|
||||
xnet "github.com/minio/minio/pkg/net"
|
||||
"github.com/minio/minio/pkg/policy"
|
||||
)
|
||||
|
||||
|
@ -232,39 +230,39 @@ func (api objectAPIHandlers) ListenBucketNotificationHandler(w http.ResponseWrit
|
|||
values := r.URL.Query()
|
||||
|
||||
var prefix string
|
||||
if len(values["prefix"]) > 1 {
|
||||
if len(values[peerRESTListenPrefix]) > 1 {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrFilterNamePrefix), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
if len(values["prefix"]) == 1 {
|
||||
if err := event.ValidateFilterRuleValue(values["prefix"][0]); err != nil {
|
||||
if len(values[peerRESTListenPrefix]) == 1 {
|
||||
if err := event.ValidateFilterRuleValue(values[peerRESTListenPrefix][0]); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
prefix = values["prefix"][0]
|
||||
prefix = values[peerRESTListenPrefix][0]
|
||||
}
|
||||
|
||||
var suffix string
|
||||
if len(values["suffix"]) > 1 {
|
||||
if len(values[peerRESTListenSuffix]) > 1 {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrFilterNameSuffix), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
if len(values["suffix"]) == 1 {
|
||||
if err := event.ValidateFilterRuleValue(values["suffix"][0]); err != nil {
|
||||
if len(values[peerRESTListenSuffix]) == 1 {
|
||||
if err := event.ValidateFilterRuleValue(values[peerRESTListenSuffix][0]); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
suffix = values["suffix"][0]
|
||||
suffix = values[peerRESTListenSuffix][0]
|
||||
}
|
||||
|
||||
pattern := event.NewPattern(prefix, suffix)
|
||||
|
||||
eventNames := []event.Name{}
|
||||
for _, s := range values["events"] {
|
||||
var eventNames []event.Name
|
||||
for _, s := range values[peerRESTListenEvents] {
|
||||
eventName, err := event.ParseName(s)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
|
@ -279,14 +277,7 @@ func (api objectAPIHandlers) ListenBucketNotificationHandler(w http.ResponseWrit
|
|||
return
|
||||
}
|
||||
|
||||
host, err := xnet.ParseHost(handlers.GetSourceIP(r))
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
rulesMap := event.NewRulesMap(eventNames, pattern,
|
||||
event.TargetID{ID: "listen" + "+" + mustGetUUID() + "+" + host.Name, Name: host.Port.String()})
|
||||
rulesMap := event.NewRulesMap(eventNames, pattern, event.TargetID{ID: mustGetUUID()})
|
||||
|
||||
w.Header().Set(xhttp.ContentType, "text/event-stream")
|
||||
|
||||
|
@ -306,7 +297,7 @@ func (api objectAPIHandlers) ListenBucketNotificationHandler(w http.ResponseWrit
|
|||
}
|
||||
objectName, uerr := url.QueryUnescape(ev.S3.Object.Key)
|
||||
if uerr != nil {
|
||||
return false
|
||||
objectName = ev.S3.Object.Key
|
||||
}
|
||||
return len(rulesMap.Match(ev.EventName, objectName).ToSlice()) != 0
|
||||
})
|
||||
|
@ -315,7 +306,7 @@ func (api objectAPIHandlers) ListenBucketNotificationHandler(w http.ResponseWrit
|
|||
if peer == nil {
|
||||
continue
|
||||
}
|
||||
peer.Listen(listenCh, doneCh)
|
||||
peer.Listen(listenCh, doneCh, r.URL.Query())
|
||||
}
|
||||
|
||||
keepAliveTicker := time.NewTicker(500 * time.Millisecond)
|
||||
|
@ -326,8 +317,14 @@ func (api objectAPIHandlers) ListenBucketNotificationHandler(w http.ResponseWrit
|
|||
select {
|
||||
case evI := <-listenCh:
|
||||
ev := evI.(event.Event)
|
||||
if err := enc.Encode(struct{ Records []event.Event }{[]event.Event{ev}}); err != nil {
|
||||
return
|
||||
if len(string(ev.EventName)) > 0 {
|
||||
if err := enc.Encode(struct{ Records []event.Event }{[]event.Event{ev}}); err != nil {
|
||||
return
|
||||
}
|
||||
} else {
|
||||
if _, err := w.Write([]byte(" ")); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
w.(http.Flusher).Flush()
|
||||
case <-keepAliveTicker.C:
|
||||
|
|
|
@ -34,31 +34,40 @@ const defaultLogBufferCount = 10000
|
|||
|
||||
//HTTPConsoleLoggerSys holds global console logger state
|
||||
type HTTPConsoleLoggerSys struct {
|
||||
sync.RWMutex
|
||||
pubsub *pubsub.PubSub
|
||||
console *console.Target
|
||||
nodeName string
|
||||
// To protect ring buffer.
|
||||
logBufLk sync.RWMutex
|
||||
logBuf *ring.Ring
|
||||
}
|
||||
|
||||
// NewConsoleLogger - creates new HTTPConsoleLoggerSys with all nodes subscribed to
|
||||
// the console logging pub sub system
|
||||
func NewConsoleLogger(ctx context.Context, endpointZones EndpointZones) *HTTPConsoleLoggerSys {
|
||||
func mustGetNodeName(endpointZones EndpointZones) (nodeName string) {
|
||||
host, err := xnet.ParseHost(GetLocalPeer(endpointZones))
|
||||
if err != nil {
|
||||
logger.FatalIf(err, "Unable to start console logging subsystem")
|
||||
}
|
||||
var nodeName string
|
||||
if globalIsDistXL {
|
||||
nodeName = host.Name
|
||||
}
|
||||
return nodeName
|
||||
}
|
||||
|
||||
// NewConsoleLogger - creates new HTTPConsoleLoggerSys with all nodes subscribed to
|
||||
// the console logging pub sub system
|
||||
func NewConsoleLogger(ctx context.Context) *HTTPConsoleLoggerSys {
|
||||
ps := pubsub.New()
|
||||
return &HTTPConsoleLoggerSys{
|
||||
ps, nil, nodeName, sync.RWMutex{}, ring.New(defaultLogBufferCount),
|
||||
pubsub: ps,
|
||||
console: console.New(),
|
||||
logBuf: ring.New(defaultLogBufferCount),
|
||||
}
|
||||
}
|
||||
|
||||
// SetNodeName - sets the node name if any after distributed setup has initialized
|
||||
func (sys *HTTPConsoleLoggerSys) SetNodeName(endpointZones EndpointZones) {
|
||||
sys.nodeName = mustGetNodeName(endpointZones)
|
||||
}
|
||||
|
||||
// HasLogListeners returns true if console log listeners are registered
|
||||
// for this node or peers
|
||||
func (sys *HTTPConsoleLoggerSys) HasLogListeners() bool {
|
||||
|
@ -67,9 +76,9 @@ func (sys *HTTPConsoleLoggerSys) HasLogListeners() bool {
|
|||
|
||||
// Subscribe starts console logging for this node.
|
||||
func (sys *HTTPConsoleLoggerSys) Subscribe(subCh chan interface{}, doneCh chan struct{}, node string, last int, logKind string, filter func(entry interface{}) bool) {
|
||||
// Enable console logging for remote client even if local console logging is disabled in the config.
|
||||
if !sys.pubsub.HasSubscribers() {
|
||||
logger.AddTarget(globalConsoleSys.Console())
|
||||
// Enable console logging for remote client.
|
||||
if !sys.HasLogListeners() {
|
||||
logger.AddTarget(sys)
|
||||
}
|
||||
|
||||
cnt := 0
|
||||
|
@ -81,14 +90,14 @@ func (sys *HTTPConsoleLoggerSys) Subscribe(subCh chan interface{}, doneCh chan s
|
|||
}
|
||||
|
||||
lastN = make([]madmin.LogInfo, last)
|
||||
sys.logBufLk.RLock()
|
||||
sys.RLock()
|
||||
sys.logBuf.Do(func(p interface{}) {
|
||||
if p != nil && (p.(madmin.LogInfo)).SendLog(node, logKind) {
|
||||
lastN[cnt%last] = p.(madmin.LogInfo)
|
||||
cnt++
|
||||
}
|
||||
})
|
||||
sys.logBufLk.RUnlock()
|
||||
sys.RUnlock()
|
||||
// send last n console log messages in order filtered by node
|
||||
if cnt > 0 {
|
||||
for i := 0; i < last; i++ {
|
||||
|
@ -106,17 +115,6 @@ func (sys *HTTPConsoleLoggerSys) Subscribe(subCh chan interface{}, doneCh chan s
|
|||
sys.pubsub.Subscribe(subCh, doneCh, filter)
|
||||
}
|
||||
|
||||
// Console returns a console target
|
||||
func (sys *HTTPConsoleLoggerSys) Console() *HTTPConsoleLoggerSys {
|
||||
if sys == nil {
|
||||
return sys
|
||||
}
|
||||
if sys.console == nil {
|
||||
sys.console = console.New()
|
||||
}
|
||||
return sys
|
||||
}
|
||||
|
||||
// Send log message 'e' to console and publish to console
|
||||
// log pubsub system
|
||||
func (sys *HTTPConsoleLoggerSys) Send(e interface{}, logKind string) error {
|
||||
|
@ -129,11 +127,11 @@ func (sys *HTTPConsoleLoggerSys) Send(e interface{}, logKind string) error {
|
|||
}
|
||||
|
||||
sys.pubsub.Publish(lg)
|
||||
sys.logBufLk.Lock()
|
||||
sys.Lock()
|
||||
// add log to ring buffer
|
||||
sys.logBuf.Value = lg
|
||||
sys.logBuf = sys.logBuf.Next()
|
||||
sys.logBufLk.Unlock()
|
||||
sys.Unlock()
|
||||
|
||||
return sys.console.Send(e, string(logger.All))
|
||||
}
|
||||
|
|
|
@ -68,7 +68,13 @@ func runDataUsageInfoForFS(ctx context.Context, fsObj *FSObjects, endCh <-chan s
|
|||
// Save the data usage in the disk
|
||||
err := storeDataUsageInBackend(ctx, fsObj, usageInfo)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
if globalWORMEnabled {
|
||||
if _, ok := err.(ObjectAlreadyExists); !ok {
|
||||
logger.LogIf(ctx, err)
|
||||
}
|
||||
} else {
|
||||
logger.LogIf(ctx, err)
|
||||
}
|
||||
}
|
||||
select {
|
||||
case <-endCh:
|
||||
|
@ -97,7 +103,13 @@ func runDataUsageInfoForXLZones(ctx context.Context, z *xlZones, endCh <-chan st
|
|||
usageInfo := z.crawlAndGetDataUsage(ctx, endCh)
|
||||
err := storeDataUsageInBackend(ctx, z, usageInfo)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
if globalWORMEnabled {
|
||||
if _, ok := err.(ObjectAlreadyExists); !ok {
|
||||
logger.LogIf(ctx, err)
|
||||
}
|
||||
} else {
|
||||
logger.LogIf(ctx, err)
|
||||
}
|
||||
}
|
||||
select {
|
||||
case <-endCh:
|
||||
|
|
|
@ -38,7 +38,10 @@ import (
|
|||
func init() {
|
||||
logger.Init(GOPATH, GOROOT)
|
||||
logger.RegisterError(config.FmtError)
|
||||
logger.AddTarget(globalConsoleSys.Console())
|
||||
|
||||
// Initialize globalConsoleSys system
|
||||
globalConsoleSys = NewConsoleLogger(context.Background())
|
||||
logger.AddTarget(globalConsoleSys)
|
||||
}
|
||||
|
||||
var (
|
||||
|
@ -145,9 +148,6 @@ func StartGateway(ctx *cli.Context, gw Gateway) {
|
|||
// Set when gateway is enabled
|
||||
globalIsGateway = true
|
||||
|
||||
// Initialize globalConsoleSys system
|
||||
globalConsoleSys = NewConsoleLogger(context.Background(), globalEndpoints)
|
||||
|
||||
enableConfigOps := gatewayName == "nas"
|
||||
|
||||
// TODO: We need to move this code with globalConfigSys.Init()
|
||||
|
|
|
@ -83,23 +83,6 @@ FLAGS:
|
|||
ENDPOINT:
|
||||
Azure server endpoint. Default ENDPOINT is https://core.windows.net
|
||||
|
||||
ENVIRONMENT VARIABLES:
|
||||
ACCESS:
|
||||
MINIO_ACCESS_KEY: Username or access key of Azure storage.
|
||||
MINIO_SECRET_KEY: Password or secret key of Azure storage.
|
||||
|
||||
BROWSER:
|
||||
MINIO_BROWSER: To disable web browser access, set this value to "off".
|
||||
|
||||
DOMAIN:
|
||||
MINIO_DOMAIN: To enable virtual-host-style requests, set this value to MinIO host domain name.
|
||||
|
||||
CACHE:
|
||||
MINIO_CACHE_DRIVES: List of mounted drives or directories delimited by ",".
|
||||
MINIO_CACHE_EXCLUDE: List of cache exclusion patterns delimited by ",".
|
||||
MINIO_CACHE_EXPIRY: Cache expiry duration in days.
|
||||
MINIO_CACHE_QUOTA: Maximum permitted usage of the cache in percentage (0-100).
|
||||
|
||||
EXAMPLES:
|
||||
1. Start minio gateway server for Azure Blob Storage backend on custom endpoint.
|
||||
{{.Prompt}} {{.EnvVarSetCommand}} MINIO_ACCESS_KEY{{.AssignmentOperator}}azureaccountname
|
||||
|
|
|
@ -57,30 +57,14 @@ USAGE:
|
|||
FLAGS:
|
||||
{{range .VisibleFlags}}{{.}}
|
||||
{{end}}{{end}}
|
||||
ENVIRONMENT VARIABLES:
|
||||
ACCESS:
|
||||
MINIO_ACCESS_KEY: B2 account id.
|
||||
MINIO_SECRET_KEY: B2 application key.
|
||||
|
||||
BROWSER:
|
||||
MINIO_BROWSER: To disable web browser access, set this value to "off".
|
||||
|
||||
DOMAIN:
|
||||
MINIO_DOMAIN: To enable virtual-host-style requests, set this value to MinIO host domain name.
|
||||
|
||||
CACHE:
|
||||
MINIO_CACHE_DRIVES: List of mounted drives or directories delimited by ",".
|
||||
MINIO_CACHE_EXCLUDE: List of cache exclusion patterns delimited by ",".
|
||||
MINIO_CACHE_EXPIRY: Cache expiry duration in days.
|
||||
MINIO_CACHE_QUOTA: Maximum permitted usage of the cache in percentage (0-100).
|
||||
|
||||
EXAMPLES:
|
||||
1. Start minio gateway server for B2 backend.
|
||||
1. Start minio gateway server for B2 backend
|
||||
{{.Prompt}} {{.EnvVarSetCommand}} MINIO_ACCESS_KEY{{.AssignmentOperator}}accountID
|
||||
{{.Prompt}} {{.EnvVarSetCommand}} MINIO_SECRET_KEY{{.AssignmentOperator}}applicationKey
|
||||
{{.Prompt}} {{.HelpName}}
|
||||
|
||||
2. Start minio gateway server for B2 backend with edge caching enabled.
|
||||
2. Start minio gateway server for B2 backend with edge caching enabled
|
||||
{{.Prompt}} {{.EnvVarSetCommand}} MINIO_ACCESS_KEY{{.AssignmentOperator}}accountID
|
||||
{{.Prompt}} {{.EnvVarSetCommand}} MINIO_SECRET_KEY{{.AssignmentOperator}}applicationKey
|
||||
{{.Prompt}} {{.EnvVarSetCommand}} MINIO_CACHE_DRIVES{{.AssignmentOperator}}"/mnt/drive1,/mnt/drive2,/mnt/drive3,/mnt/drive4"
|
||||
|
|
|
@ -106,37 +106,19 @@ FLAGS:
|
|||
{{range .VisibleFlags}}{{.}}
|
||||
{{end}}{{end}}
|
||||
PROJECTID:
|
||||
GCS project-id should be provided if GOOGLE_APPLICATION_CREDENTIALS environmental variable is not set.
|
||||
optional GCS project-id expected GOOGLE_APPLICATION_CREDENTIALS env is not set
|
||||
|
||||
ENVIRONMENT VARIABLES:
|
||||
ACCESS:
|
||||
MINIO_ACCESS_KEY: Username or access key of GCS.
|
||||
MINIO_SECRET_KEY: Password or secret key of GCS.
|
||||
|
||||
BROWSER:
|
||||
MINIO_BROWSER: To disable web browser access, set this value to "off".
|
||||
|
||||
DOMAIN:
|
||||
MINIO_DOMAIN: To enable virtual-host-style requests, set this value to MinIO host domain name.
|
||||
|
||||
CACHE:
|
||||
MINIO_CACHE_DRIVES: List of mounted drives or directories delimited by ",".
|
||||
MINIO_CACHE_EXCLUDE: List of cache exclusion patterns delimited by ",".
|
||||
MINIO_CACHE_EXPIRY: Cache expiry duration in days.
|
||||
MINIO_CACHE_QUOTA: Maximum permitted usage of the cache in percentage (0-100).
|
||||
|
||||
GCS credentials file:
|
||||
GOOGLE_APPLICATION_CREDENTIALS: Path to credentials.json
|
||||
GOOGLE_APPLICATION_CREDENTIALS:
|
||||
path to credentials.json, generated it from here https://developers.google.com/identity/protocols/application-default-credentials
|
||||
|
||||
EXAMPLES:
|
||||
1. Start minio gateway server for GCS backend.
|
||||
1. Start minio gateway server for GCS backend
|
||||
{{.Prompt}} {{.EnvVarSetCommand}} GOOGLE_APPLICATION_CREDENTIALS{{.AssignmentOperator}}/path/to/credentials.json
|
||||
(Instructions to generate credentials : https://developers.google.com/identity/protocols/application-default-credentials)
|
||||
{{.Prompt}} {{.EnvVarSetCommand}} MINIO_ACCESS_KEY{{.AssignmentOperator}}accesskey
|
||||
{{.Prompt}} {{.EnvVarSetCommand}} MINIO_SECRET_KEY{{.AssignmentOperator}}secretkey
|
||||
{{.Prompt}} {{.HelpName}} mygcsprojectid
|
||||
|
||||
2. Start minio gateway server for GCS backend with edge caching enabled.
|
||||
2. Start minio gateway server for GCS backend with edge caching enabled
|
||||
{{.Prompt}} {{.EnvVarSetCommand}} GOOGLE_APPLICATION_CREDENTIALS{{.AssignmentOperator}}/path/to/credentials.json
|
||||
{{.Prompt}} {{.EnvVarSetCommand}} MINIO_ACCESS_KEY{{.AssignmentOperator}}accesskey
|
||||
{{.Prompt}} {{.EnvVarSetCommand}} MINIO_SECRET_KEY{{.AssignmentOperator}}secretkey
|
||||
|
|
|
@ -64,30 +64,13 @@ FLAGS:
|
|||
HDFS-NAMENODE:
|
||||
HDFS namenode URI
|
||||
|
||||
ENVIRONMENT VARIABLES:
|
||||
ACCESS:
|
||||
MINIO_ACCESS_KEY: Username or access key of minimum 3 characters in length.
|
||||
MINIO_SECRET_KEY: Password or secret key of minimum 8 characters in length.
|
||||
|
||||
BROWSER:
|
||||
MINIO_BROWSER: To disable web browser access, set this value to "off".
|
||||
|
||||
DOMAIN:
|
||||
MINIO_DOMAIN: To enable virtual-host-style requests, set this value to Minio host domain name.
|
||||
|
||||
CACHE:
|
||||
MINIO_CACHE_DRIVES: List of mounted drives or directories delimited by ",".
|
||||
MINIO_CACHE_EXCLUDE: List of cache exclusion patterns delimited by ",".
|
||||
MINIO_CACHE_EXPIRY: Cache expiry duration in days.
|
||||
MINIO_CACHE_QUOTA: Maximum permitted usage of the cache in percentage (0-100).
|
||||
|
||||
EXAMPLES:
|
||||
1. Start minio gateway server for HDFS backend.
|
||||
1. Start minio gateway server for HDFS backend
|
||||
{{.Prompt}} {{.EnvVarSetCommand}} MINIO_ACCESS_KEY{{.AssignmentOperator}}accesskey
|
||||
{{.Prompt}} {{.EnvVarSetCommand}} MINIO_SECRET_KEY{{.AssignmentOperator}}secretkey
|
||||
{{.Prompt}} {{.HelpName}} hdfs://namenode:8200
|
||||
|
||||
2. Start minio gateway server for HDFS with edge caching enabled.
|
||||
2. Start minio gateway server for HDFS with edge caching enabled
|
||||
{{.Prompt}} {{.EnvVarSetCommand}} MINIO_ACCESS_KEY{{.AssignmentOperator}}accesskey
|
||||
{{.Prompt}} {{.EnvVarSetCommand}} MINIO_SECRET_KEY{{.AssignmentOperator}}secretkey
|
||||
{{.Prompt}} {{.EnvVarSetCommand}} MINIO_CACHE_DRIVES{{.AssignmentOperator}}"/mnt/drive1,/mnt/drive2,/mnt/drive3,/mnt/drive4"
|
||||
|
|
|
@ -39,32 +39,15 @@ FLAGS:
|
|||
{{range .VisibleFlags}}{{.}}
|
||||
{{end}}{{end}}
|
||||
PATH:
|
||||
Path to NAS mount point.
|
||||
|
||||
ENVIRONMENT VARIABLES:
|
||||
ACCESS:
|
||||
MINIO_ACCESS_KEY: Username or access key of minimum 3 characters in length.
|
||||
MINIO_SECRET_KEY: Password or secret key of minimum 8 characters in length.
|
||||
|
||||
BROWSER:
|
||||
MINIO_BROWSER: To disable web browser access, set this value to "off".
|
||||
|
||||
DOMAIN:
|
||||
MINIO_DOMAIN: To enable virtual-host-style requests, set this value to MinIO host domain name.
|
||||
|
||||
CACHE:
|
||||
MINIO_CACHE_DRIVES: List of mounted drives or directories delimited by ",".
|
||||
MINIO_CACHE_EXCLUDE: List of cache exclusion patterns delimited by ",".
|
||||
MINIO_CACHE_EXPIRY: Cache expiry duration in days.
|
||||
MINIO_CACHE_QUOTA: Maximum permitted usage of the cache in percentage (0-100).
|
||||
path to NAS mount point
|
||||
|
||||
EXAMPLES:
|
||||
1. Start minio gateway server for NAS backend.
|
||||
1. Start minio gateway server for NAS backend
|
||||
{{.Prompt}} {{.EnvVarSetCommand}} MINIO_ACCESS_KEY{{.AssignmentOperator}}accesskey
|
||||
{{.Prompt}} {{.EnvVarSetCommand}} MINIO_SECRET_KEY{{.AssignmentOperator}}secretkey
|
||||
{{.Prompt}} {{.HelpName}} /shared/nasvol
|
||||
|
||||
2. Start minio gateway server for NAS with edge caching enabled.
|
||||
2. Start minio gateway server for NAS with edge caching enabled
|
||||
{{.Prompt}} {{.EnvVarSetCommand}} MINIO_ACCESS_KEY{{.AssignmentOperator}}accesskey
|
||||
{{.Prompt}} {{.EnvVarSetCommand}} MINIO_SECRET_KEY{{.AssignmentOperator}}secretkey
|
||||
{{.Prompt}} {{.EnvVarSetCommand}} MINIO_CACHE_DRIVES{{.AssignmentOperator}}"/mnt/drive1,/mnt/drive2,/mnt/drive3,/mnt/drive4"
|
||||
|
|
|
@ -58,37 +58,15 @@ FLAGS:
|
|||
{{range .VisibleFlags}}{{.}}
|
||||
{{end}}{{end}}
|
||||
ENDPOINT:
|
||||
OSS server endpoint. Default ENDPOINT is https://oss.aliyuncs.com
|
||||
|
||||
ENVIRONMENT VARIABLES:
|
||||
ACCESS:
|
||||
MINIO_ACCESS_KEY: Username or access key of OSS storage.
|
||||
MINIO_SECRET_KEY: Password or secret key of OSS storage.
|
||||
|
||||
BROWSER:
|
||||
MINIO_BROWSER: To disable web browser access, set this value to "off".
|
||||
|
||||
DOMAIN:
|
||||
MINIO_DOMAIN: To enable virtual-host-style requests, set this value to MinIO host domain name.
|
||||
|
||||
CACHE:
|
||||
MINIO_CACHE_DRIVES: List of mounted drives or directories delimited by ",".
|
||||
MINIO_CACHE_EXCLUDE: List of cache exclusion patterns delimited by ",".
|
||||
MINIO_CACHE_EXPIRY: Cache expiry duration in days.
|
||||
MINIO_CACHE_QUOTA: Maximum permitted usage of the cache in percentage (0-100).
|
||||
oss server endpoint. Default ENDPOINT is https://oss.aliyuncs.com
|
||||
|
||||
EXAMPLES:
|
||||
1. Start minio gateway server for Aliyun OSS backend.
|
||||
1. Start minio gateway server for Aliyun OSS backend
|
||||
{{.Prompt}} {{.EnvVarSetCommand}} MINIO_ACCESS_KEY{{.AssignmentOperator}}accesskey
|
||||
{{.Prompt}} {{.EnvVarSetCommand}} MINIO_SECRET_KEY{{.AssignmentOperator}}secretkey
|
||||
{{.Prompt}} {{.HelpName}}
|
||||
|
||||
2. Start minio gateway server for Aliyun OSS backend on custom endpoint.
|
||||
{{.Prompt}} {{.EnvVarSetCommand}} MINIO_ACCESS_KEY{{.AssignmentOperator}}Q3AM3UQ867SPQQA43P2F
|
||||
{{.Prompt}} {{.EnvVarSetCommand}} MINIO_SECRET_KEY{{.AssignmentOperator}}zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG
|
||||
{{.Prompt}} {{.HelpName}} https://oss.example.com
|
||||
|
||||
3. Start minio gateway server for Aliyun OSS backend with edge caching enabled.
|
||||
2. Start minio gateway server for Aliyun OSS backend with edge caching enabled
|
||||
{{.Prompt}} {{.EnvVarSetCommand}} MINIO_ACCESS_KEY{{.AssignmentOperator}}accesskey
|
||||
{{.Prompt}} {{.EnvVarSetCommand}} MINIO_SECRET_KEY{{.AssignmentOperator}}secretkey
|
||||
{{.Prompt}} {{.EnvVarSetCommand}} MINIO_CACHE_DRIVES{{.AssignmentOperator}}"/mnt/drive1,/mnt/drive2,/mnt/drive3,/mnt/drive4"
|
||||
|
|
|
@ -53,32 +53,15 @@ FLAGS:
|
|||
{{range .VisibleFlags}}{{.}}
|
||||
{{end}}{{end}}
|
||||
ENDPOINT:
|
||||
S3 server endpoint. Default ENDPOINT is https://s3.amazonaws.com
|
||||
|
||||
ENVIRONMENT VARIABLES:
|
||||
ACCESS:
|
||||
MINIO_ACCESS_KEY: Username or access key of S3 storage.
|
||||
MINIO_SECRET_KEY: Password or secret key of S3 storage.
|
||||
|
||||
BROWSER:
|
||||
MINIO_BROWSER: To disable web browser access, set this value to "off".
|
||||
|
||||
DOMAIN:
|
||||
MINIO_DOMAIN: To enable virtual-host-style requests, set this value to MinIO host domain name.
|
||||
|
||||
CACHE:
|
||||
MINIO_CACHE_DRIVES: List of mounted drives or directories delimited by ",".
|
||||
MINIO_CACHE_EXCLUDE: List of cache exclusion patterns delimited by ",".
|
||||
MINIO_CACHE_EXPIRY: Cache expiry duration in days.
|
||||
MINIO_CACHE_QUOTA: Maximum permitted usage of the cache in percentage (0-100).
|
||||
s3 server endpoint. Default ENDPOINT is https://s3.amazonaws.com
|
||||
|
||||
EXAMPLES:
|
||||
1. Start minio gateway server for AWS S3 backend.
|
||||
1. Start minio gateway server for AWS S3 backend
|
||||
{{.Prompt}} {{.EnvVarSetCommand}} MINIO_ACCESS_KEY{{.AssignmentOperator}}accesskey
|
||||
{{.Prompt}} {{.EnvVarSetCommand}} MINIO_SECRET_KEY{{.AssignmentOperator}}secretkey
|
||||
{{.Prompt}} {{.HelpName}}
|
||||
|
||||
4. Start minio gateway server for AWS S3 backend with edge caching enabled.
|
||||
2. Start minio gateway server for AWS S3 backend with edge caching enabled
|
||||
{{.Prompt}} {{.EnvVarSetCommand}} MINIO_ACCESS_KEY{{.AssignmentOperator}}accesskey
|
||||
{{.Prompt}} {{.EnvVarSetCommand}} MINIO_SECRET_KEY{{.AssignmentOperator}}secretkey
|
||||
{{.Prompt}} {{.EnvVarSetCommand}} MINIO_CACHE_DRIVES{{.AssignmentOperator}}"/mnt/drive1,/mnt/drive2,/mnt/drive3,/mnt/drive4"
|
||||
|
@ -86,15 +69,6 @@ EXAMPLES:
|
|||
{{.Prompt}} {{.EnvVarSetCommand}} MINIO_CACHE_EXPIRY{{.AssignmentOperator}}40
|
||||
{{.Prompt}} {{.EnvVarSetCommand}} MINIO_CACHE_QUOTA{{.AssignmentOperator}}80
|
||||
{{.Prompt}} {{.HelpName}}
|
||||
|
||||
4. Start minio gateway server for AWS S3 backend using AWS environment variables.
|
||||
NOTE: The access and secret key in this case will authenticate with MinIO instead
|
||||
of AWS and AWS envs will be used to authenticate to AWS S3.
|
||||
{{.Prompt}} {{.EnvVarSetCommand}} AWS_ACCESS_KEY_ID{{.AssignmentOperator}}aws_access_key
|
||||
{{.Prompt}} {{.EnvVarSetCommand}} AWS_SECRET_ACCESS_KEY{{.AssignmentOperator}}aws_secret_key
|
||||
{{.Prompt}} {{.EnvVarSetCommand}} MINIO_ACCESS_KEY{{.AssignmentOperator}}accesskey
|
||||
{{.Prompt}} {{.EnvVarSetCommand}} MINIO_SECRET_KEY{{.AssignmentOperator}}secretkey
|
||||
{{.Prompt}} {{.HelpName}}
|
||||
`
|
||||
|
||||
minio.RegisterGatewayCommand(cli.Command{
|
||||
|
|
|
@ -649,7 +649,7 @@ func (client *peerRESTClient) doTrace(traceCh chan interface{}, doneCh chan stru
|
|||
}
|
||||
}
|
||||
|
||||
func (client *peerRESTClient) doListen(listenCh chan interface{}, doneCh chan struct{}) {
|
||||
func (client *peerRESTClient) doListen(listenCh chan interface{}, doneCh chan struct{}, v url.Values) {
|
||||
// To cancel the REST request in case doneCh gets closed.
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
|
||||
|
@ -664,7 +664,7 @@ func (client *peerRESTClient) doListen(listenCh chan interface{}, doneCh chan st
|
|||
cancel()
|
||||
}()
|
||||
|
||||
respBody, err := client.callWithContext(ctx, peerRESTMethodListen, nil, nil, -1)
|
||||
respBody, err := client.callWithContext(ctx, peerRESTMethodListen, v, nil, -1)
|
||||
defer http.DrainBody(respBody)
|
||||
|
||||
if err != nil {
|
||||
|
@ -688,10 +688,10 @@ func (client *peerRESTClient) doListen(listenCh chan interface{}, doneCh chan st
|
|||
}
|
||||
|
||||
// Listen - listen on peers.
|
||||
func (client *peerRESTClient) Listen(listenCh chan interface{}, doneCh chan struct{}) {
|
||||
func (client *peerRESTClient) Listen(listenCh chan interface{}, doneCh chan struct{}, v url.Values) {
|
||||
go func() {
|
||||
for {
|
||||
client.doListen(listenCh, doneCh)
|
||||
client.doListen(listenCh, doneCh, v)
|
||||
select {
|
||||
case <-doneCh:
|
||||
return
|
||||
|
|
|
@ -81,4 +81,8 @@ const (
|
|||
peerRESTDryRun = "dry-run"
|
||||
peerRESTTraceAll = "all"
|
||||
peerRESTTraceErr = "err"
|
||||
|
||||
peerRESTListenPrefix = "prefix"
|
||||
peerRESTListenSuffix = "suffix"
|
||||
peerRESTListenEvents = "events"
|
||||
)
|
||||
|
|
|
@ -24,6 +24,7 @@ import (
|
|||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
@ -915,6 +916,53 @@ func (s *peerRESTServer) ListenHandler(w http.ResponseWriter, r *http.Request) {
|
|||
return
|
||||
}
|
||||
|
||||
values := r.URL.Query()
|
||||
|
||||
var prefix string
|
||||
if len(values[peerRESTListenPrefix]) > 1 {
|
||||
s.writeErrorResponse(w, errors.New("Invalid request"))
|
||||
return
|
||||
}
|
||||
|
||||
if len(values[peerRESTListenPrefix]) == 1 {
|
||||
if err := event.ValidateFilterRuleValue(values[peerRESTListenPrefix][0]); err != nil {
|
||||
s.writeErrorResponse(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
prefix = values[peerRESTListenPrefix][0]
|
||||
}
|
||||
|
||||
var suffix string
|
||||
if len(values[peerRESTListenSuffix]) > 1 {
|
||||
s.writeErrorResponse(w, errors.New("Invalid request"))
|
||||
return
|
||||
}
|
||||
|
||||
if len(values[peerRESTListenSuffix]) == 1 {
|
||||
if err := event.ValidateFilterRuleValue(values[peerRESTListenSuffix][0]); err != nil {
|
||||
s.writeErrorResponse(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
suffix = values[peerRESTListenSuffix][0]
|
||||
}
|
||||
|
||||
pattern := event.NewPattern(prefix, suffix)
|
||||
|
||||
var eventNames []event.Name
|
||||
for _, ev := range values[peerRESTListenEvents] {
|
||||
eventName, err := event.ParseName(ev)
|
||||
if err != nil {
|
||||
s.writeErrorResponse(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
eventNames = append(eventNames, eventName)
|
||||
}
|
||||
|
||||
rulesMap := event.NewRulesMap(eventNames, pattern, event.TargetID{ID: mustGetUUID()})
|
||||
|
||||
w.WriteHeader(http.StatusOK)
|
||||
w.(http.Flusher).Flush()
|
||||
|
||||
|
@ -925,8 +973,16 @@ func (s *peerRESTServer) ListenHandler(w http.ResponseWriter, r *http.Request) {
|
|||
// Use buffered channel to take care of burst sends or slow w.Write()
|
||||
ch := make(chan interface{}, 2000)
|
||||
|
||||
globalHTTPListen.Subscribe(ch, doneCh, func(entry interface{}) bool {
|
||||
return true
|
||||
globalHTTPListen.Subscribe(ch, doneCh, func(evI interface{}) bool {
|
||||
ev, ok := evI.(event.Event)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
objectName, uerr := url.QueryUnescape(ev.S3.Object.Key)
|
||||
if uerr != nil {
|
||||
objectName = ev.S3.Object.Key
|
||||
}
|
||||
return len(rulesMap.Match(ev.EventName, objectName).ToSlice()) != 0
|
||||
})
|
||||
|
||||
keepAliveTicker := time.NewTicker(500 * time.Millisecond)
|
||||
|
@ -1108,7 +1164,7 @@ func registerPeerRESTHandlers(router *mux.Router) {
|
|||
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodBackgroundOpsStatus).HandlerFunc(server.BackgroundOpsStatusHandler)
|
||||
|
||||
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodTrace).HandlerFunc(server.TraceHandler)
|
||||
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodListen).HandlerFunc(server.ListenHandler)
|
||||
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodListen).HandlerFunc(httpTraceHdrs(server.ListenHandler))
|
||||
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodBackgroundHealStatus).HandlerFunc(server.BackgroundHealStatusHandler)
|
||||
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodLog).HandlerFunc(server.ConsoleLogHandler)
|
||||
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodPutBucketObjectLockConfig).HandlerFunc(httpTraceHdrs(server.PutBucketObjectLockConfigHandler)).Queries(restQueries(peerRESTBucket)...)
|
||||
|
|
|
@ -40,7 +40,11 @@ import (
|
|||
func init() {
|
||||
logger.Init(GOPATH, GOROOT)
|
||||
logger.RegisterError(config.FmtError)
|
||||
logger.AddTarget(globalConsoleSys.Console())
|
||||
|
||||
// Initialize globalConsoleSys system
|
||||
globalConsoleSys = NewConsoleLogger(context.Background())
|
||||
logger.AddTarget(globalConsoleSys)
|
||||
|
||||
gob.Register(VerifyFileError(""))
|
||||
gob.Register(DeleteFileError(""))
|
||||
}
|
||||
|
@ -65,6 +69,7 @@ var serverCmd = cli.Command{
|
|||
USAGE:
|
||||
{{.HelpName}} {{if .VisibleFlags}}[FLAGS] {{end}}DIR1 [DIR2..]
|
||||
{{.HelpName}} {{if .VisibleFlags}}[FLAGS] {{end}}DIR{1...64}
|
||||
{{.HelpName}} {{if .VisibleFlags}}[FLAGS] {{end}}DIR{1...64} DIR{65...128}
|
||||
|
||||
DIR:
|
||||
DIR points to a directory on a filesystem. When you want to combine
|
||||
|
@ -76,56 +81,21 @@ DIR:
|
|||
FLAGS:
|
||||
{{range .VisibleFlags}}{{.}}
|
||||
{{end}}{{end}}
|
||||
ENVIRONMENT VARIABLES:
|
||||
ACCESS:
|
||||
MINIO_ACCESS_KEY: Custom username or access key of minimum 3 characters in length.
|
||||
MINIO_SECRET_KEY: Custom password or secret key of minimum 8 characters in length.
|
||||
|
||||
BROWSER:
|
||||
MINIO_BROWSER: To disable web browser access, set this value to "off".
|
||||
|
||||
DOMAIN:
|
||||
MINIO_DOMAIN: To enable virtual-host-style requests, set this value to MinIO host domain name.
|
||||
|
||||
WORM:
|
||||
MINIO_WORM: To turn on Write-Once-Read-Many in server, set this value to "on".
|
||||
|
||||
BUCKET-DNS:
|
||||
MINIO_DOMAIN: To enable bucket DNS requests, set this value to MinIO host domain name.
|
||||
MINIO_PUBLIC_IPS: To enable bucket DNS requests, set this value to list of MinIO host public IP(s) delimited by ",".
|
||||
MINIO_ETCD_ENDPOINTS: To enable bucket DNS requests, set this value to list of etcd endpoints delimited by ",".
|
||||
|
||||
KMS:
|
||||
MINIO_KMS_VAULT_ENDPOINT: To enable Vault as KMS,set this value to Vault endpoint.
|
||||
MINIO_KMS_VAULT_APPROLE_ID: To enable Vault as KMS,set this value to Vault AppRole ID.
|
||||
MINIO_KMS_VAULT_APPROLE_SECRET: To enable Vault as KMS,set this value to Vault AppRole Secret ID.
|
||||
MINIO_KMS_VAULT_KEY_NAME: To enable Vault as KMS,set this value to Vault encryption key-ring name.
|
||||
|
||||
EXAMPLES:
|
||||
1. Start minio server on "/home/shared" directory.
|
||||
{{.Prompt}} {{.HelpName}} /home/shared
|
||||
|
||||
2. Start minio server bound to a specific ADDRESS:PORT.
|
||||
{{.Prompt}} {{.HelpName}} --address 192.168.1.101:9000 /home/shared
|
||||
|
||||
3. Start minio server and enable virtual-host-style requests.
|
||||
{{.Prompt}} {{.EnvVarSetCommand}} MINIO_DOMAIN{{.AssignmentOperator}}mydomain.com
|
||||
{{.Prompt}} {{.HelpName}} --address mydomain.com:9000 /mnt/export
|
||||
|
||||
4. Start erasure coded minio server on a node with 64 drives.
|
||||
{{.Prompt}} {{.HelpName}} /mnt/export{1...64}
|
||||
|
||||
5. Start distributed minio server on an 32 node setup with 32 drives each. Run following command on all the 32 nodes.
|
||||
2. Start distributed minio server on an 32 node setup with 32 drives each, run following command on all the nodes
|
||||
{{.Prompt}} {{.EnvVarSetCommand}} MINIO_ACCESS_KEY{{.AssignmentOperator}}minio
|
||||
{{.Prompt}} {{.EnvVarSetCommand}} MINIO_SECRET_KEY{{.AssignmentOperator}}miniostorage
|
||||
{{.Prompt}} {{.HelpName}} http://node{1...32}.example.com/mnt/export/{1...32}
|
||||
|
||||
6. Start minio server with KMS enabled.
|
||||
{{.Prompt}} {{.EnvVarSetCommand}} MINIO_KMS_VAULT_APPROLE_ID{{.AssignmentOperator}}9b56cc08-8258-45d5-24a3-679876769126
|
||||
{{.Prompt}} {{.EnvVarSetCommand}} MINIO_KMS_VAULT_APPROLE_SECRET{{.AssignmentOperator}}4e30c52f-13e4-a6f5-0763-d50e8cb4321f
|
||||
{{.Prompt}} {{.EnvVarSetCommand}} MINIO_KMS_VAULT_ENDPOINT{{.AssignmentOperator}}https://vault-endpoint-ip:8200
|
||||
{{.Prompt}} {{.EnvVarSetCommand}} MINIO_KMS_VAULT_KEY_NAME{{.AssignmentOperator}}my-minio-key
|
||||
{{.Prompt}} {{.HelpName}} /home/shared
|
||||
3. Start distributed minio server in an expanded setup, run the following command on all the nodes
|
||||
{{.Prompt}} {{.EnvVarSetCommand}} MINIO_ACCESS_KEY{{.AssignmentOperator}}minio
|
||||
{{.Prompt}} {{.EnvVarSetCommand}} MINIO_SECRET_KEY{{.AssignmentOperator}}miniostorage
|
||||
{{.Prompt}} {{.HelpName}} http://node{1...16}.example.com/mnt/export/{1...32} \
|
||||
http://node{17...64}.example.com/mnt/export/{1...64}
|
||||
`,
|
||||
}
|
||||
|
||||
|
@ -236,7 +206,15 @@ func initSafeMode(buckets []BucketInfo) (err error) {
|
|||
// Migrate all backend configs to encrypted backend configs, optionally
|
||||
// handles rotating keys for encryption.
|
||||
if err = handleEncryptedConfigBackend(newObject, true); err != nil {
|
||||
return fmt.Errorf("Unable to handle encrypted backend for config, iam and policies: %w", err)
|
||||
if globalWORMEnabled {
|
||||
if _, ok := err.(ObjectAlreadyExists); !ok {
|
||||
return fmt.Errorf("Unable to handle encrypted backend for config, iam and policies: %w",
|
||||
err)
|
||||
}
|
||||
// Ignore ObjectAlreadyExists if globalWORMEnabled is true.
|
||||
} else {
|
||||
return fmt.Errorf("Unable to handle encrypted backend for config, iam and policies: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// **** WARNING ****
|
||||
|
@ -298,6 +276,9 @@ func serverMain(ctx *cli.Context) {
|
|||
cli.ShowCommandHelpAndExit(ctx, "server", 1)
|
||||
}
|
||||
|
||||
// Initialize globalConsoleSys system
|
||||
globalConsoleSys = NewConsoleLogger(context.Background())
|
||||
|
||||
signal.Notify(globalOSSignalCh, os.Interrupt, syscall.SIGTERM)
|
||||
|
||||
// Handle all server command args.
|
||||
|
@ -306,6 +287,9 @@ func serverMain(ctx *cli.Context) {
|
|||
// Handle all server environment vars.
|
||||
serverHandleEnvVars()
|
||||
|
||||
// Set node name, only set for distributed setup.
|
||||
globalConsoleSys.SetNodeName(globalEndpoints)
|
||||
|
||||
// Initialize all help
|
||||
initHelp()
|
||||
|
||||
|
@ -349,9 +333,6 @@ func serverMain(ctx *cli.Context) {
|
|||
globalBackgroundHealState = initHealState()
|
||||
}
|
||||
|
||||
// Initialize globalConsoleSys system
|
||||
globalConsoleSys = NewConsoleLogger(context.Background(), globalEndpoints)
|
||||
|
||||
// Configure server.
|
||||
var handler http.Handler
|
||||
handler, err = configureServerHandler(globalEndpoints)
|
||||
|
|
|
@ -74,6 +74,9 @@ func init() {
|
|||
// Set system resources to maximum.
|
||||
setMaxResources()
|
||||
|
||||
// Initialize globalConsoleSys system
|
||||
globalConsoleSys = NewConsoleLogger(context.Background())
|
||||
|
||||
logger.Disable = true
|
||||
|
||||
initHelp()
|
||||
|
@ -497,9 +500,6 @@ func resetTestGlobals() {
|
|||
|
||||
// Configure the server for the test run.
|
||||
func newTestConfig(bucketLocation string, obj ObjectLayer) (err error) {
|
||||
// Initialize globalConsoleSys system
|
||||
globalConsoleSys = NewConsoleLogger(context.Background(), globalEndpoints)
|
||||
|
||||
// Initialize server config.
|
||||
if err = newSrvConfig(obj); err != nil {
|
||||
return err
|
||||
|
|
|
@ -66,7 +66,7 @@ export MINIO_SECRET_KEY=<SECRET_KEY>
|
|||
minio server http://host{1...32}/export{1...32} http://host{33...64}/export{1...32}
|
||||
```
|
||||
|
||||
Now the server has expanded storage of *1024* more disks in total of *2048* disks, new object upload requests automatically start using the least used cluster. This expansion strategy works endlessly, so you can perpetually expand your clusters as needed.
|
||||
Now the server has expanded storage of *1024* more disks in total of *2048* disks, new object upload requests automatically start using the least used cluster. This expansion strategy works endlessly, so you can perpetually expand your clusters as needed. When you restart, it is immediate and non-disruptive to the applications. Each group of servers in the command-line is called a zone. There are 2 zones in this example. Objects are placed in zones based on which zone has the *fmost free* space. Within each zone, the location of the erasure-set of drives is determined based on a deterministic hashing algorithm.
|
||||
|
||||
## 3. Test your setup
|
||||
To test this setup, access the MinIO server via browser or [`mc`](https://docs.min.io/docs/minio-client-quickstart-guide).
|
||||
|
|
Loading…
Reference in New Issue