mirror of
https://github.com/minio/minio.git
synced 2024-12-25 06:35:56 -05:00
replication: centralize healthcheck for remote targets (#15516)
This PR moves health check from minio-go client to being managed on the server. Additionally integrating health check into site replication
This commit is contained in:
parent
48640b1de2
commit
21fe14201f
@ -249,7 +249,7 @@ func (a adminAPIHandlers) SetRemoteTargetHandler(w http.ResponseWriter, r *http.
|
|||||||
}
|
}
|
||||||
if err = globalBucketTargetSys.SetTarget(ctx, bucket, &target, update); err != nil {
|
if err = globalBucketTargetSys.SetTarget(ctx, bucket, &target, update); err != nil {
|
||||||
switch err.(type) {
|
switch err.(type) {
|
||||||
case BucketRemoteConnectionErr:
|
case RemoteTargetConnectionErr:
|
||||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrReplicationRemoteConnectionError, err), r.URL)
|
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrReplicationRemoteConnectionError, err), r.URL)
|
||||||
default:
|
default:
|
||||||
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
|
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
|
||||||
|
@ -888,7 +888,7 @@ var errorCodes = errorCodeMap{
|
|||||||
},
|
},
|
||||||
ErrReplicationRemoteConnectionError: {
|
ErrReplicationRemoteConnectionError: {
|
||||||
Code: "XMinioAdminReplicationRemoteConnectionError",
|
Code: "XMinioAdminReplicationRemoteConnectionError",
|
||||||
Description: "Remote service connection error - please check remote service credentials and target bucket",
|
Description: "Remote service connection error",
|
||||||
HTTPStatusCode: http.StatusNotFound,
|
HTTPStatusCode: http.StatusNotFound,
|
||||||
},
|
},
|
||||||
ErrReplicationBandwidthLimitError: {
|
ErrReplicationBandwidthLimitError: {
|
||||||
@ -2074,7 +2074,7 @@ func toAPIErrorCode(ctx context.Context, err error) (apiErr APIErrorCode) {
|
|||||||
apiErr = ErrRemoteDestinationNotFoundError
|
apiErr = ErrRemoteDestinationNotFoundError
|
||||||
case BucketRemoteTargetNotFound:
|
case BucketRemoteTargetNotFound:
|
||||||
apiErr = ErrRemoteTargetNotFoundError
|
apiErr = ErrRemoteTargetNotFoundError
|
||||||
case BucketRemoteConnectionErr:
|
case RemoteTargetConnectionErr:
|
||||||
apiErr = ErrReplicationRemoteConnectionError
|
apiErr = ErrReplicationRemoteConnectionError
|
||||||
case BucketRemoteAlreadyExists:
|
case BucketRemoteAlreadyExists:
|
||||||
apiErr = ErrBucketRemoteAlreadyExists
|
apiErr = ErrBucketRemoteAlreadyExists
|
||||||
|
@ -310,7 +310,7 @@ func (api objectAPIHandlers) ResetBucketReplicationStartHandler(w http.ResponseW
|
|||||||
rinfo.Targets = append(rinfo.Targets, ResyncTarget{Arn: tgtArns[0], ResetID: target.ResetID})
|
rinfo.Targets = append(rinfo.Targets, ResyncTarget{Arn: tgtArns[0], ResetID: target.ResetID})
|
||||||
if err = globalBucketTargetSys.SetTarget(ctx, bucket, &target, true); err != nil {
|
if err = globalBucketTargetSys.SetTarget(ctx, bucket, &target, true); err != nil {
|
||||||
switch err.(type) {
|
switch err.(type) {
|
||||||
case BucketRemoteConnectionErr:
|
case RemoteTargetConnectionErr:
|
||||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrReplicationRemoteConnectionError, err), r.URL)
|
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrReplicationRemoteConnectionError, err), r.URL)
|
||||||
default:
|
default:
|
||||||
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
|
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
|
||||||
|
@ -534,7 +534,7 @@ func replicateDeleteToTarget(ctx context.Context, dobj DeletedObjectReplicationI
|
|||||||
if dobj.VersionID != "" && rinfo.VersionPurgeStatus == Complete {
|
if dobj.VersionID != "" && rinfo.VersionPurgeStatus == Complete {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if tgt.IsOffline() {
|
if globalBucketTargetSys.isOffline(tgt.EndpointURL()) {
|
||||||
logger.LogIf(ctx, fmt.Errorf("remote target is offline for bucket:%s arn:%s", dobj.Bucket, tgt.ARN))
|
logger.LogIf(ctx, fmt.Errorf("remote target is offline for bucket:%s arn:%s", dobj.Bucket, tgt.ARN))
|
||||||
sendEvent(eventArgs{
|
sendEvent(eventArgs{
|
||||||
BucketName: dobj.Bucket,
|
BucketName: dobj.Bucket,
|
||||||
@ -1039,7 +1039,7 @@ func replicateObjectToTarget(ctx context.Context, ri ReplicateObjectInfo, object
|
|||||||
rinfo.ReplicationResynced = true
|
rinfo.ReplicationResynced = true
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if tgt.IsOffline() {
|
if globalBucketTargetSys.isOffline(tgt.EndpointURL()) {
|
||||||
logger.LogIf(ctx, fmt.Errorf("remote target is offline for bucket:%s arn:%s", bucket, tgt.ARN))
|
logger.LogIf(ctx, fmt.Errorf("remote target is offline for bucket:%s arn:%s", bucket, tgt.ARN))
|
||||||
sendEvent(eventArgs{
|
sendEvent(eventArgs{
|
||||||
EventName: event.ObjectReplicationNotTracked,
|
EventName: event.ObjectReplicationNotTracked,
|
||||||
@ -1659,7 +1659,7 @@ func proxyHeadToRepTarget(ctx context.Context, bucket, object string, rs *HTTPRa
|
|||||||
}
|
}
|
||||||
for _, t := range proxyTargets.Targets {
|
for _, t := range proxyTargets.Targets {
|
||||||
tgt = globalBucketTargetSys.GetRemoteTargetClient(ctx, t.Arn)
|
tgt = globalBucketTargetSys.GetRemoteTargetClient(ctx, t.Arn)
|
||||||
if tgt == nil || tgt.IsOffline() {
|
if tgt == nil || globalBucketTargetSys.isOffline(tgt.EndpointURL()) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
// if proxying explicitly disabled on remote target
|
// if proxying explicitly disabled on remote target
|
||||||
|
@ -19,6 +19,8 @@ package cmd
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"net/url"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@ -35,7 +37,9 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
defaultHealthCheckDuration = 30 * time.Second
|
defaultHealthCheckDuration = 5 * time.Second
|
||||||
|
// default interval for reload of all remote target endpoints
|
||||||
|
defaultHealthCheckReloadDuration = 30 * time.Minute
|
||||||
)
|
)
|
||||||
|
|
||||||
// BucketTargetSys represents bucket targets subsystem
|
// BucketTargetSys represents bucket targets subsystem
|
||||||
@ -43,6 +47,113 @@ type BucketTargetSys struct {
|
|||||||
sync.RWMutex
|
sync.RWMutex
|
||||||
arnRemotesMap map[string]*TargetClient
|
arnRemotesMap map[string]*TargetClient
|
||||||
targetsMap map[string][]madmin.BucketTarget
|
targetsMap map[string][]madmin.BucketTarget
|
||||||
|
hMutex sync.RWMutex
|
||||||
|
hc map[string]epHealth
|
||||||
|
hcClient *madmin.AnonymousClient
|
||||||
|
}
|
||||||
|
|
||||||
|
// epHealth struct represents health of a replication target endpoint.
|
||||||
|
type epHealth struct {
|
||||||
|
Endpoint string
|
||||||
|
Scheme string
|
||||||
|
Online bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// isOffline returns current liveness result of remote target. Add endpoint to
|
||||||
|
// healthcheck map if missing and default to online status
|
||||||
|
func (sys *BucketTargetSys) isOffline(ep *url.URL) bool {
|
||||||
|
sys.hMutex.RLock()
|
||||||
|
defer sys.hMutex.RUnlock()
|
||||||
|
if h, ok := sys.hc[ep.Host]; ok {
|
||||||
|
return !h.Online
|
||||||
|
}
|
||||||
|
go sys.initHC(ep)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sys *BucketTargetSys) initHC(ep *url.URL) {
|
||||||
|
sys.hMutex.Lock()
|
||||||
|
sys.hc[ep.Host] = epHealth{
|
||||||
|
Endpoint: ep.Host,
|
||||||
|
Scheme: ep.Scheme,
|
||||||
|
Online: true,
|
||||||
|
}
|
||||||
|
sys.hMutex.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
// newHCClient initializes an anonymous client for performing health check on the remote endpoints
|
||||||
|
func newHCClient() *madmin.AnonymousClient {
|
||||||
|
clnt, e := madmin.NewAnonymousClientNoEndpoint()
|
||||||
|
if e != nil {
|
||||||
|
logger.LogOnceIf(GlobalContext, fmt.Errorf("WARNING: Unable to initialize health check client"), string(replicationSubsystem))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
clnt.SetCustomTransport(globalRemoteTargetTransport)
|
||||||
|
return clnt
|
||||||
|
}
|
||||||
|
|
||||||
|
// heartBeat performs liveness check on remote endpoints.
|
||||||
|
func (sys *BucketTargetSys) heartBeat(ctx context.Context) {
|
||||||
|
hcTimer := time.NewTimer(defaultHealthCheckDuration)
|
||||||
|
defer hcTimer.Stop()
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-hcTimer.C:
|
||||||
|
sys.hMutex.RLock()
|
||||||
|
var eps []madmin.ServerProperties
|
||||||
|
for _, ep := range sys.hc {
|
||||||
|
eps = append(eps, madmin.ServerProperties{Endpoint: ep.Endpoint, Scheme: ep.Scheme})
|
||||||
|
}
|
||||||
|
sys.hMutex.RUnlock()
|
||||||
|
|
||||||
|
if len(eps) > 0 {
|
||||||
|
cctx, cancel := context.WithTimeout(ctx, 30*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
m := map[string]epHealth{}
|
||||||
|
for result := range sys.hcClient.Alive(cctx, madmin.AliveOpts{}, eps...) {
|
||||||
|
var online bool
|
||||||
|
if result.Error == nil {
|
||||||
|
online = result.Online
|
||||||
|
}
|
||||||
|
m[result.Endpoint.Host] = epHealth{
|
||||||
|
Endpoint: result.Endpoint.Host,
|
||||||
|
Scheme: result.Endpoint.Scheme,
|
||||||
|
Online: online,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
sys.hMutex.Lock()
|
||||||
|
sys.hc = m
|
||||||
|
sys.hMutex.Unlock()
|
||||||
|
}
|
||||||
|
hcTimer.Reset(defaultHealthCheckDuration)
|
||||||
|
case <-ctx.Done():
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// periodically rebuild the healthcheck map from list of targets to clear
|
||||||
|
// out stale endpoints
|
||||||
|
func (sys *BucketTargetSys) reloadHealthCheckers(ctx context.Context) {
|
||||||
|
m := make(map[string]epHealth)
|
||||||
|
tgts := sys.ListTargets(ctx, "", "")
|
||||||
|
for _, t := range tgts {
|
||||||
|
if _, ok := m[t.Endpoint]; !ok {
|
||||||
|
scheme := "http"
|
||||||
|
if t.Secure {
|
||||||
|
scheme = "https"
|
||||||
|
}
|
||||||
|
m[t.Endpoint] = epHealth{
|
||||||
|
Online: true,
|
||||||
|
Endpoint: t.Endpoint,
|
||||||
|
Scheme: scheme,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
sys.hMutex.Lock()
|
||||||
|
// swap out the map
|
||||||
|
sys.hc = m
|
||||||
|
sys.hMutex.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
// ListTargets lists bucket targets across tenant or for individual bucket, and returns
|
// ListTargets lists bucket targets across tenant or for individual bucket, and returns
|
||||||
@ -91,9 +202,6 @@ func (sys *BucketTargetSys) Delete(bucket string) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
for _, t := range tgts {
|
for _, t := range tgts {
|
||||||
if tgt, ok := sys.arnRemotesMap[t.Arn]; ok && tgt.healthCancelFn != nil {
|
|
||||||
tgt.healthCancelFn()
|
|
||||||
}
|
|
||||||
delete(sys.arnRemotesMap, t.Arn)
|
delete(sys.arnRemotesMap, t.Arn)
|
||||||
}
|
}
|
||||||
delete(sys.targetsMap, bucket)
|
delete(sys.targetsMap, bucket)
|
||||||
@ -116,7 +224,7 @@ func (sys *BucketTargetSys) SetTarget(ctx context.Context, bucket string, tgt *m
|
|||||||
if minio.ToErrorResponse(err).Code == "NoSuchBucket" {
|
if minio.ToErrorResponse(err).Code == "NoSuchBucket" {
|
||||||
return BucketRemoteTargetNotFound{Bucket: tgt.TargetBucket}
|
return BucketRemoteTargetNotFound{Bucket: tgt.TargetBucket}
|
||||||
}
|
}
|
||||||
return BucketRemoteConnectionErr{Bucket: tgt.TargetBucket, Err: err}
|
return RemoteTargetConnectionErr{Bucket: tgt.TargetBucket, Err: err}
|
||||||
}
|
}
|
||||||
if tgt.Type == madmin.ReplicationService {
|
if tgt.Type == madmin.ReplicationService {
|
||||||
if !globalBucketVersioningSys.Enabled(bucket) {
|
if !globalBucketVersioningSys.Enabled(bucket) {
|
||||||
@ -124,7 +232,7 @@ func (sys *BucketTargetSys) SetTarget(ctx context.Context, bucket string, tgt *m
|
|||||||
}
|
}
|
||||||
vcfg, err := clnt.GetBucketVersioning(ctx, tgt.TargetBucket)
|
vcfg, err := clnt.GetBucketVersioning(ctx, tgt.TargetBucket)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return BucketRemoteConnectionErr{Bucket: tgt.TargetBucket, Err: err}
|
return RemoteTargetConnectionErr{Bucket: tgt.TargetBucket, Err: err}
|
||||||
}
|
}
|
||||||
if vcfg.Status != string(versioning.Enabled) {
|
if vcfg.Status != string(versioning.Enabled) {
|
||||||
return BucketRemoteTargetNotVersioned{Bucket: tgt.TargetBucket}
|
return BucketRemoteTargetNotVersioned{Bucket: tgt.TargetBucket}
|
||||||
@ -152,10 +260,7 @@ func (sys *BucketTargetSys) SetTarget(ctx context.Context, bucket string, tgt *m
|
|||||||
if !found && !update {
|
if !found && !update {
|
||||||
newtgts = append(newtgts, *tgt)
|
newtgts = append(newtgts, *tgt)
|
||||||
}
|
}
|
||||||
// cancel health check for previous target client to avoid leak.
|
|
||||||
if prevClnt, ok := sys.arnRemotesMap[tgt.Arn]; ok && prevClnt.healthCancelFn != nil {
|
|
||||||
prevClnt.healthCancelFn()
|
|
||||||
}
|
|
||||||
sys.targetsMap[bucket] = newtgts
|
sys.targetsMap[bucket] = newtgts
|
||||||
sys.arnRemotesMap[tgt.Arn] = clnt
|
sys.arnRemotesMap[tgt.Arn] = clnt
|
||||||
sys.updateBandwidthLimit(bucket, tgt.BandwidthLimit)
|
sys.updateBandwidthLimit(bucket, tgt.BandwidthLimit)
|
||||||
@ -227,9 +332,6 @@ func (sys *BucketTargetSys) RemoveTarget(ctx context.Context, bucket, arnStr str
|
|||||||
return BucketRemoteTargetNotFound{Bucket: bucket}
|
return BucketRemoteTargetNotFound{Bucket: bucket}
|
||||||
}
|
}
|
||||||
sys.targetsMap[bucket] = targets
|
sys.targetsMap[bucket] = targets
|
||||||
if tgt, ok := sys.arnRemotesMap[arnStr]; ok && tgt.healthCancelFn != nil {
|
|
||||||
tgt.healthCancelFn()
|
|
||||||
}
|
|
||||||
delete(sys.arnRemotesMap, arnStr)
|
delete(sys.arnRemotesMap, arnStr)
|
||||||
sys.updateBandwidthLimit(bucket, 0)
|
sys.updateBandwidthLimit(bucket, 0)
|
||||||
return nil
|
return nil
|
||||||
@ -258,11 +360,29 @@ func (sys *BucketTargetSys) GetRemoteBucketTargetByArn(ctx context.Context, buck
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewBucketTargetSys - creates new replication system.
|
// NewBucketTargetSys - creates new replication system.
|
||||||
func NewBucketTargetSys() *BucketTargetSys {
|
func NewBucketTargetSys(ctx context.Context) *BucketTargetSys {
|
||||||
return &BucketTargetSys{
|
sys := &BucketTargetSys{
|
||||||
arnRemotesMap: make(map[string]*TargetClient),
|
arnRemotesMap: make(map[string]*TargetClient),
|
||||||
targetsMap: make(map[string][]madmin.BucketTarget),
|
targetsMap: make(map[string][]madmin.BucketTarget),
|
||||||
|
hc: make(map[string]epHealth),
|
||||||
|
hcClient: newHCClient(),
|
||||||
}
|
}
|
||||||
|
// reload healthcheck endpoints map periodically to remove stale endpoints from the map.
|
||||||
|
go func() {
|
||||||
|
rTimer := time.NewTimer(defaultHealthCheckReloadDuration)
|
||||||
|
defer rTimer.Stop()
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-rTimer.C:
|
||||||
|
sys.reloadHealthCheckers(ctx)
|
||||||
|
rTimer.Reset(defaultHealthCheckReloadDuration)
|
||||||
|
case <-ctx.Done():
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
go sys.heartBeat(ctx)
|
||||||
|
return sys
|
||||||
}
|
}
|
||||||
|
|
||||||
// UpdateAllTargets updates target to reflect metadata updates
|
// UpdateAllTargets updates target to reflect metadata updates
|
||||||
@ -276,9 +396,6 @@ func (sys *BucketTargetSys) UpdateAllTargets(bucket string, tgts *madmin.BucketT
|
|||||||
// Remove existingtarget and arn association
|
// Remove existingtarget and arn association
|
||||||
if tgts, ok := sys.targetsMap[bucket]; ok {
|
if tgts, ok := sys.targetsMap[bucket]; ok {
|
||||||
for _, t := range tgts {
|
for _, t := range tgts {
|
||||||
if tgt, ok := sys.arnRemotesMap[t.Arn]; ok && tgt.healthCancelFn != nil {
|
|
||||||
tgt.healthCancelFn()
|
|
||||||
}
|
|
||||||
delete(sys.arnRemotesMap, t.Arn)
|
delete(sys.arnRemotesMap, t.Arn)
|
||||||
}
|
}
|
||||||
delete(sys.targetsMap, bucket)
|
delete(sys.targetsMap, bucket)
|
||||||
@ -345,10 +462,6 @@ func (sys *BucketTargetSys) getRemoteTargetClient(tcfg *madmin.BucketTarget) (*T
|
|||||||
if tcfg.HealthCheckDuration >= 1 { // require minimum health check duration of 1 sec.
|
if tcfg.HealthCheckDuration >= 1 { // require minimum health check duration of 1 sec.
|
||||||
hcDuration = tcfg.HealthCheckDuration
|
hcDuration = tcfg.HealthCheckDuration
|
||||||
}
|
}
|
||||||
cancelFn, err := api.HealthCheck(hcDuration)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
tc := &TargetClient{
|
tc := &TargetClient{
|
||||||
Client: api,
|
Client: api,
|
||||||
healthCheckDuration: hcDuration,
|
healthCheckDuration: hcDuration,
|
||||||
@ -356,9 +469,10 @@ func (sys *BucketTargetSys) getRemoteTargetClient(tcfg *madmin.BucketTarget) (*T
|
|||||||
Bucket: tcfg.TargetBucket,
|
Bucket: tcfg.TargetBucket,
|
||||||
StorageClass: tcfg.StorageClass,
|
StorageClass: tcfg.StorageClass,
|
||||||
disableProxy: tcfg.DisableProxy,
|
disableProxy: tcfg.DisableProxy,
|
||||||
healthCancelFn: cancelFn,
|
|
||||||
ARN: tcfg.Arn,
|
ARN: tcfg.Arn,
|
||||||
ResetID: tcfg.ResetID,
|
ResetID: tcfg.ResetID,
|
||||||
|
Endpoint: tcfg.Endpoint,
|
||||||
|
Secure: tcfg.Secure,
|
||||||
}
|
}
|
||||||
return tc, nil
|
return tc, nil
|
||||||
}
|
}
|
||||||
@ -432,7 +546,8 @@ type TargetClient struct {
|
|||||||
replicateSync bool
|
replicateSync bool
|
||||||
StorageClass string // storage class on remote
|
StorageClass string // storage class on remote
|
||||||
disableProxy bool
|
disableProxy bool
|
||||||
healthCancelFn context.CancelFunc // cancellation function for client healthcheck
|
|
||||||
ARN string // ARN to uniquely identify remote target
|
ARN string // ARN to uniquely identify remote target
|
||||||
ResetID string
|
ResetID string
|
||||||
|
Endpoint string
|
||||||
|
Secure bool
|
||||||
}
|
}
|
||||||
|
@ -422,11 +422,18 @@ func (e BucketRemoteTargetNotFound) Error() string {
|
|||||||
return "Remote target not found: " + e.Bucket
|
return "Remote target not found: " + e.Bucket
|
||||||
}
|
}
|
||||||
|
|
||||||
// BucketRemoteConnectionErr remote target connection failure.
|
// RemoteTargetConnectionErr remote target connection failure.
|
||||||
type BucketRemoteConnectionErr GenericError
|
type RemoteTargetConnectionErr struct {
|
||||||
|
Err error
|
||||||
|
Bucket string
|
||||||
|
Endpoint string
|
||||||
|
}
|
||||||
|
|
||||||
func (e BucketRemoteConnectionErr) Error() string {
|
func (e RemoteTargetConnectionErr) Error() string {
|
||||||
return fmt.Sprintf("Remote service endpoint or target bucket not available: %s \n\t%s", e.Bucket, e.Err.Error())
|
if e.Bucket != "" {
|
||||||
|
return fmt.Sprintf("Remote service endpoint offline or target bucket/remote service credentials invalid: %s \n\t%s", e.Bucket, e.Err.Error())
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("Remote service endpoint %s not available\n\t%s", e.Endpoint, e.Err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
// BucketRemoteAlreadyExists remote already exists for this target type.
|
// BucketRemoteAlreadyExists remote already exists for this target type.
|
||||||
|
@ -290,7 +290,7 @@ func initAllSubsystems() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Create new bucket replication subsytem
|
// Create new bucket replication subsytem
|
||||||
globalBucketTargetSys = NewBucketTargetSys()
|
globalBucketTargetSys = NewBucketTargetSys(GlobalContext)
|
||||||
|
|
||||||
// Create new ILM tier configuration subsystem
|
// Create new ILM tier configuration subsystem
|
||||||
globalTierConfigMgr = NewTierConfigMgr()
|
globalTierConfigMgr = NewTierConfigMgr()
|
||||||
|
@ -2217,6 +2217,9 @@ func getAdminClient(endpoint, accessKey, secretKey string) (*madmin.AdminClient,
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
if globalBucketTargetSys.isOffline(epURL) {
|
||||||
|
return nil, RemoteTargetConnectionErr{Endpoint: epURL.String(), Err: fmt.Errorf("remote target is offline for endpoint %s", epURL.String())}
|
||||||
|
}
|
||||||
client, err := madmin.New(epURL.Host, accessKey, secretKey, epURL.Scheme == "https")
|
client, err := madmin.New(epURL.Host, accessKey, secretKey, epURL.Scheme == "https")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -2230,6 +2233,10 @@ func getS3Client(pc madmin.PeerSite) (*minioClient.Client, error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
if globalBucketTargetSys.isOffline(ep) {
|
||||||
|
return nil, RemoteTargetConnectionErr{Endpoint: ep.String(), Err: fmt.Errorf("remote target is offline for endpoint %s", ep.String())}
|
||||||
|
}
|
||||||
|
|
||||||
return minioClient.New(ep.Host, &minioClient.Options{
|
return minioClient.New(ep.Host, &minioClient.Options{
|
||||||
Creds: credentials.NewStaticV4(pc.AccessKey, pc.SecretKey, ""),
|
Creds: credentials.NewStaticV4(pc.AccessKey, pc.SecretKey, ""),
|
||||||
Secure: ep.Scheme == "https",
|
Secure: ep.Scheme == "https",
|
||||||
|
2
go.mod
2
go.mod
@ -48,7 +48,7 @@ require (
|
|||||||
github.com/minio/dperf v0.4.2
|
github.com/minio/dperf v0.4.2
|
||||||
github.com/minio/highwayhash v1.0.2
|
github.com/minio/highwayhash v1.0.2
|
||||||
github.com/minio/kes v0.20.0
|
github.com/minio/kes v0.20.0
|
||||||
github.com/minio/madmin-go v1.4.17
|
github.com/minio/madmin-go v1.4.20
|
||||||
github.com/minio/minio-go/v7 v7.0.34
|
github.com/minio/minio-go/v7 v7.0.34
|
||||||
github.com/minio/pkg v1.3.0
|
github.com/minio/pkg v1.3.0
|
||||||
github.com/minio/selfupdate v0.5.0
|
github.com/minio/selfupdate v0.5.0
|
||||||
|
4
go.sum
4
go.sum
@ -622,8 +622,8 @@ github.com/minio/highwayhash v1.0.2/go.mod h1:BQskDq+xkJ12lmlUUi7U0M5Swg3EWR+dLT
|
|||||||
github.com/minio/kes v0.20.0 h1:1tyC51Rr8zTregTESuT/QN/iebNMX7B9t7d3xLNMEpE=
|
github.com/minio/kes v0.20.0 h1:1tyC51Rr8zTregTESuT/QN/iebNMX7B9t7d3xLNMEpE=
|
||||||
github.com/minio/kes v0.20.0/go.mod h1:3FW1BQkMGQW78yhy+69tUq5bdcf5rnXJizyeKB9a/tc=
|
github.com/minio/kes v0.20.0/go.mod h1:3FW1BQkMGQW78yhy+69tUq5bdcf5rnXJizyeKB9a/tc=
|
||||||
github.com/minio/madmin-go v1.3.5/go.mod h1:vGKGboQgGIWx4DuDUaXixjlIEZOCIp6ivJkQoiVaACc=
|
github.com/minio/madmin-go v1.3.5/go.mod h1:vGKGboQgGIWx4DuDUaXixjlIEZOCIp6ivJkQoiVaACc=
|
||||||
github.com/minio/madmin-go v1.4.17 h1:o8b42lrJW9qyIthIa+24IzygZjmPm04iSUSBITcvzYU=
|
github.com/minio/madmin-go v1.4.20 h1:OpPxc8uIaevJMpBGSZ2TXJvFGvPSDn0IT3VJPh7w90M=
|
||||||
github.com/minio/madmin-go v1.4.17/go.mod h1:ez87VmMtsxP7DRxjKJKD4RDNW+nhO2QF9KSzwxBDQ98=
|
github.com/minio/madmin-go v1.4.20/go.mod h1:ez87VmMtsxP7DRxjKJKD4RDNW+nhO2QF9KSzwxBDQ98=
|
||||||
github.com/minio/mc v0.0.0-20220805080128-351d021b924b h1:ikMXncKqNE/0acH6us6yy3v+gJBP7nGv/3Rc9F7vRio=
|
github.com/minio/mc v0.0.0-20220805080128-351d021b924b h1:ikMXncKqNE/0acH6us6yy3v+gJBP7nGv/3Rc9F7vRio=
|
||||||
github.com/minio/mc v0.0.0-20220805080128-351d021b924b/go.mod h1:YUXIqqgGfFknByv0eeJSMBQl/WGuEN0XkpW68/ghBm0=
|
github.com/minio/mc v0.0.0-20220805080128-351d021b924b/go.mod h1:YUXIqqgGfFknByv0eeJSMBQl/WGuEN0XkpW68/ghBm0=
|
||||||
github.com/minio/md5-simd v1.1.0/go.mod h1:XpBqgZULrMYD3R+M28PcmP0CkI7PEMzB3U77ZrKZ0Gw=
|
github.com/minio/md5-simd v1.1.0/go.mod h1:XpBqgZULrMYD3R+M28PcmP0CkI7PEMzB3U77ZrKZ0Gw=
|
||||||
|
Loading…
Reference in New Issue
Block a user