2021-04-18 15:41:13 -04:00
|
|
|
// Copyright (c) 2015-2021 MinIO, Inc.
|
|
|
|
//
|
|
|
|
// This file is part of MinIO Object Storage stack
|
|
|
|
//
|
|
|
|
// This program is free software: you can redistribute it and/or modify
|
|
|
|
// it under the terms of the GNU Affero General Public License as published by
|
|
|
|
// the Free Software Foundation, either version 3 of the License, or
|
|
|
|
// (at your option) any later version.
|
|
|
|
//
|
|
|
|
// This program is distributed in the hope that it will be useful
|
|
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
// GNU Affero General Public License for more details.
|
|
|
|
//
|
|
|
|
// You should have received a copy of the GNU Affero General Public License
|
|
|
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
2020-07-21 20:49:56 -04:00
|
|
|
|
|
|
|
package cmd
|
|
|
|
|
|
|
|
import (
|
2022-06-18 09:55:39 -04:00
|
|
|
"bytes"
|
|
|
|
"encoding/base64"
|
2020-07-21 20:49:56 -04:00
|
|
|
"encoding/json"
|
2022-06-18 09:55:39 -04:00
|
|
|
"encoding/xml"
|
|
|
|
"errors"
|
|
|
|
"fmt"
|
2020-07-21 20:49:56 -04:00
|
|
|
"io"
|
|
|
|
"net/http"
|
2022-06-18 09:55:39 -04:00
|
|
|
"strings"
|
|
|
|
"time"
|
2020-07-21 20:49:56 -04:00
|
|
|
|
2021-05-11 05:02:32 -04:00
|
|
|
jsoniter "github.com/json-iterator/go"
|
2022-06-18 09:55:39 -04:00
|
|
|
"github.com/klauspost/compress/zip"
|
2024-03-01 16:09:42 -05:00
|
|
|
"github.com/minio/kms-go/kes"
|
2023-06-19 20:53:08 -04:00
|
|
|
"github.com/minio/madmin-go/v3"
|
2022-06-18 09:55:39 -04:00
|
|
|
"github.com/minio/minio-go/v7/pkg/tags"
|
|
|
|
"github.com/minio/minio/internal/bucket/lifecycle"
|
|
|
|
objectlock "github.com/minio/minio/internal/bucket/object/lock"
|
|
|
|
"github.com/minio/minio/internal/bucket/versioning"
|
|
|
|
"github.com/minio/minio/internal/event"
|
|
|
|
"github.com/minio/minio/internal/kms"
|
2023-01-23 06:12:47 -05:00
|
|
|
"github.com/minio/mux"
|
2024-05-24 19:05:23 -04:00
|
|
|
"github.com/minio/pkg/v3/policy"
|
2020-07-21 20:49:56 -04:00
|
|
|
)
|
|
|
|
|
|
|
|
const (
|
2020-07-28 14:50:47 -04:00
|
|
|
bucketQuotaConfigFile = "quota.json"
|
|
|
|
bucketTargetsFile = "bucket-targets.json"
|
2020-07-21 20:49:56 -04:00
|
|
|
)
|
|
|
|
|
|
|
|
// PutBucketQuotaConfigHandler - PUT Bucket quota configuration.
|
|
|
|
// ----------
|
|
|
|
// Places a quota configuration on the specified bucket. The quota
|
|
|
|
// specified in the quota configuration will be applied by default
|
|
|
|
// to enforce total quota for the specified bucket.
|
|
|
|
func (a adminAPIHandlers) PutBucketQuotaConfigHandler(w http.ResponseWriter, r *http.Request) {
|
2023-07-13 17:52:21 -04:00
|
|
|
ctx := r.Context()
|
2020-07-21 20:49:56 -04:00
|
|
|
|
2023-09-04 15:57:37 -04:00
|
|
|
objectAPI, _ := validateAdminReq(ctx, w, r, policy.SetBucketQuotaAdminAction)
|
2020-07-21 20:49:56 -04:00
|
|
|
if objectAPI == nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
vars := mux.Vars(r)
|
2021-04-15 19:32:13 -04:00
|
|
|
bucket := pathClean(vars["bucket"])
|
2020-07-21 20:49:56 -04:00
|
|
|
|
2022-07-25 20:51:32 -04:00
|
|
|
if _, err := objectAPI.GetBucketInfo(ctx, bucket, BucketOptions{}); err != nil {
|
2020-07-21 20:49:56 -04:00
|
|
|
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2022-09-19 14:05:16 -04:00
|
|
|
data, err := io.ReadAll(r.Body)
|
2020-07-21 20:49:56 -04:00
|
|
|
if err != nil {
|
|
|
|
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2022-01-19 23:02:24 -05:00
|
|
|
quotaConfig, err := parseBucketQuota(bucket, data)
|
|
|
|
if err != nil {
|
2021-06-17 23:27:04 -04:00
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
2020-07-21 20:49:56 -04:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2022-06-28 21:09:20 -04:00
|
|
|
updatedAt, err := globalBucketMetadataSys.Update(ctx, bucket, bucketQuotaConfigFile, data)
|
|
|
|
if err != nil {
|
2021-06-17 23:27:04 -04:00
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
2020-07-21 20:49:56 -04:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2022-01-19 23:02:24 -05:00
|
|
|
bucketMeta := madmin.SRBucketMeta{
|
2022-06-28 21:09:20 -04:00
|
|
|
Type: madmin.SRBucketMetaTypeQuotaConfig,
|
|
|
|
Bucket: bucket,
|
|
|
|
Quota: data,
|
|
|
|
UpdatedAt: updatedAt,
|
2022-01-19 23:02:24 -05:00
|
|
|
}
|
2023-12-19 16:22:47 -05:00
|
|
|
if quotaConfig.Size == 0 && quotaConfig.Quota == 0 {
|
2022-01-19 23:02:24 -05:00
|
|
|
bucketMeta.Quota = nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Call site replication hook.
|
2024-04-04 08:04:40 -04:00
|
|
|
replLogIf(ctx, globalSiteReplicationSys.BucketMetaHook(ctx, bucketMeta))
|
2022-01-19 23:02:24 -05:00
|
|
|
|
2020-07-21 20:49:56 -04:00
|
|
|
// Write success response.
|
|
|
|
writeSuccessResponseHeadersOnly(w)
|
|
|
|
}
|
|
|
|
|
|
|
|
// GetBucketQuotaConfigHandler - gets bucket quota configuration
|
|
|
|
func (a adminAPIHandlers) GetBucketQuotaConfigHandler(w http.ResponseWriter, r *http.Request) {
|
2023-07-13 17:52:21 -04:00
|
|
|
ctx := r.Context()
|
2020-07-21 20:49:56 -04:00
|
|
|
|
2023-09-04 15:57:37 -04:00
|
|
|
objectAPI, _ := validateAdminReq(ctx, w, r, policy.GetBucketQuotaAdminAction)
|
2020-07-21 20:49:56 -04:00
|
|
|
if objectAPI == nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
vars := mux.Vars(r)
|
2021-04-15 19:32:13 -04:00
|
|
|
bucket := pathClean(vars["bucket"])
|
2021-04-03 12:03:42 -04:00
|
|
|
|
2022-07-25 20:51:32 -04:00
|
|
|
if _, err := objectAPI.GetBucketInfo(ctx, bucket, BucketOptions{}); err != nil {
|
2020-07-21 20:49:56 -04:00
|
|
|
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2022-04-24 05:36:31 -04:00
|
|
|
config, _, err := globalBucketMetadataSys.GetQuotaConfig(ctx, bucket)
|
2020-07-21 20:49:56 -04:00
|
|
|
if err != nil {
|
|
|
|
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
configData, err := json.Marshal(config)
|
|
|
|
if err != nil {
|
|
|
|
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Write success response.
|
|
|
|
writeSuccessResponseJSON(w, configData)
|
|
|
|
}
|
|
|
|
|
2020-08-06 20:10:21 -04:00
|
|
|
// SetRemoteTargetHandler - sets a remote target for bucket
|
|
|
|
func (a adminAPIHandlers) SetRemoteTargetHandler(w http.ResponseWriter, r *http.Request) {
|
2023-07-13 17:52:21 -04:00
|
|
|
ctx := r.Context()
|
2020-07-21 20:49:56 -04:00
|
|
|
|
|
|
|
vars := mux.Vars(r)
|
2021-04-15 19:32:13 -04:00
|
|
|
bucket := pathClean(vars["bucket"])
|
2021-08-08 01:43:01 -04:00
|
|
|
update := r.Form.Get("update") == "true"
|
2020-07-21 20:49:56 -04:00
|
|
|
|
|
|
|
// Get current object layer instance.
|
2023-09-04 15:57:37 -04:00
|
|
|
objectAPI, _ := validateAdminReq(ctx, w, r, policy.SetBucketTargetAction)
|
2020-07-21 20:49:56 -04:00
|
|
|
if objectAPI == nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check if bucket exists.
|
2022-07-25 20:51:32 -04:00
|
|
|
if _, err := objectAPI.GetBucketInfo(ctx, bucket, BucketOptions{}); err != nil {
|
2020-07-21 20:49:56 -04:00
|
|
|
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2023-02-06 12:27:29 -05:00
|
|
|
cred, _, s3Err := validateAdminSignature(ctx, r, "")
|
2020-07-21 20:49:56 -04:00
|
|
|
if s3Err != ErrNone {
|
|
|
|
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
password := cred.SecretKey
|
|
|
|
|
|
|
|
reqBytes, err := madmin.DecryptData(password, io.LimitReader(r.Body, r.ContentLength))
|
|
|
|
if err != nil {
|
|
|
|
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrAdminConfigBadJSON, err), r.URL)
|
|
|
|
return
|
|
|
|
}
|
2020-07-28 14:50:47 -04:00
|
|
|
var target madmin.BucketTarget
|
2022-01-02 12:15:06 -05:00
|
|
|
json := jsoniter.ConfigCompatibleWithStandardLibrary
|
2020-07-21 20:49:56 -04:00
|
|
|
if err = json.Unmarshal(reqBytes, &target); err != nil {
|
|
|
|
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrAdminConfigBadJSON, err), r.URL)
|
|
|
|
return
|
|
|
|
}
|
2020-11-25 14:24:50 -05:00
|
|
|
sameTarget, _ := isLocalHost(target.URL().Hostname(), target.URL().Port(), globalMinioPort)
|
2020-07-30 22:55:22 -04:00
|
|
|
if sameTarget && bucket == target.TargetBucket {
|
|
|
|
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrBucketRemoteIdenticalToSource), r.URL)
|
|
|
|
return
|
|
|
|
}
|
2020-11-24 22:09:05 -05:00
|
|
|
|
2020-08-06 20:10:21 -04:00
|
|
|
target.SourceBucket = bucket
|
2021-04-28 18:26:20 -04:00
|
|
|
var ops []madmin.TargetUpdateType
|
|
|
|
if update {
|
2021-08-08 01:43:01 -04:00
|
|
|
ops = madmin.GetTargetUpdateOps(r.Form)
|
2021-04-28 18:26:20 -04:00
|
|
|
} else {
|
2022-12-14 06:24:06 -05:00
|
|
|
var exists bool // true if arn exists
|
2023-02-20 05:36:13 -05:00
|
|
|
target.Arn, exists = globalBucketTargetSys.getRemoteARN(bucket, &target, "")
|
2022-12-14 06:24:06 -05:00
|
|
|
if exists && target.Arn != "" { // return pre-existing ARN
|
|
|
|
data, err := json.Marshal(target.Arn)
|
|
|
|
if err != nil {
|
|
|
|
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
// Write success response.
|
|
|
|
writeSuccessResponseJSON(w, data)
|
|
|
|
return
|
|
|
|
}
|
2020-11-24 22:09:05 -05:00
|
|
|
}
|
2020-07-30 22:55:22 -04:00
|
|
|
if target.Arn == "" {
|
|
|
|
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrAdminConfigBadJSON, err), r.URL)
|
2020-07-21 20:49:56 -04:00
|
|
|
return
|
|
|
|
}
|
2023-05-11 18:48:40 -04:00
|
|
|
if globalSiteReplicationSys.isEnabled() && !update {
|
|
|
|
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrRemoteTargetDenyAddError, err), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2021-04-28 18:26:20 -04:00
|
|
|
if update {
|
|
|
|
// overlay the updates on existing target
|
|
|
|
tgt := globalBucketTargetSys.GetRemoteBucketTargetByArn(ctx, bucket, target.Arn)
|
|
|
|
if tgt.Empty() {
|
|
|
|
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrRemoteTargetNotFoundError, err), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
for _, op := range ops {
|
|
|
|
switch op {
|
|
|
|
case madmin.CredentialsUpdateType:
|
2023-05-11 18:48:40 -04:00
|
|
|
if !globalSiteReplicationSys.isEnabled() {
|
|
|
|
// credentials update is possible only in bucket replication. User will never
|
|
|
|
// know the site replicator creds.
|
|
|
|
tgt.Credentials = target.Credentials
|
|
|
|
tgt.TargetBucket = target.TargetBucket
|
|
|
|
tgt.Secure = target.Secure
|
|
|
|
tgt.Endpoint = target.Endpoint
|
2022-12-23 18:44:48 -05:00
|
|
|
}
|
2021-04-28 18:26:20 -04:00
|
|
|
case madmin.SyncUpdateType:
|
|
|
|
tgt.ReplicationSync = target.ReplicationSync
|
|
|
|
case madmin.ProxyUpdateType:
|
|
|
|
tgt.DisableProxy = target.DisableProxy
|
|
|
|
case madmin.PathUpdateType:
|
|
|
|
tgt.Path = target.Path
|
|
|
|
case madmin.BandwidthLimitUpdateType:
|
|
|
|
tgt.BandwidthLimit = target.BandwidthLimit
|
|
|
|
case madmin.HealthCheckDurationUpdateType:
|
|
|
|
tgt.HealthCheckDuration = target.HealthCheckDuration
|
|
|
|
}
|
|
|
|
}
|
|
|
|
target = tgt
|
|
|
|
}
|
2021-06-24 21:29:30 -04:00
|
|
|
|
|
|
|
// enforce minimum bandwidth limit as 100MBps
|
2021-06-27 13:11:13 -04:00
|
|
|
if target.BandwidthLimit > 0 && target.BandwidthLimit < 100*1000*1000 {
|
2021-06-24 21:29:30 -04:00
|
|
|
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrReplicationBandwidthLimitError, err), r.URL)
|
|
|
|
return
|
|
|
|
}
|
2020-11-24 22:09:05 -05:00
|
|
|
if err = globalBucketTargetSys.SetTarget(ctx, bucket, &target, update); err != nil {
|
2021-03-26 21:58:13 -04:00
|
|
|
switch err.(type) {
|
2022-08-16 20:46:22 -04:00
|
|
|
case RemoteTargetConnectionErr:
|
2021-03-26 21:58:13 -04:00
|
|
|
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrReplicationRemoteConnectionError, err), r.URL)
|
|
|
|
default:
|
|
|
|
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
|
|
|
|
}
|
2020-07-21 20:49:56 -04:00
|
|
|
return
|
|
|
|
}
|
2020-08-06 20:10:21 -04:00
|
|
|
targets, err := globalBucketTargetSys.ListBucketTargets(ctx, bucket)
|
2020-07-30 22:55:22 -04:00
|
|
|
if err != nil {
|
2021-06-17 23:27:04 -04:00
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
2020-07-30 22:55:22 -04:00
|
|
|
return
|
|
|
|
}
|
|
|
|
tgtBytes, err := json.Marshal(&targets)
|
|
|
|
if err != nil {
|
|
|
|
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrAdminConfigBadJSON, err), r.URL)
|
|
|
|
return
|
|
|
|
}
|
2022-06-28 21:09:20 -04:00
|
|
|
if _, err = globalBucketMetadataSys.Update(ctx, bucket, bucketTargetsFile, tgtBytes); err != nil {
|
2021-06-17 23:27:04 -04:00
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
2020-07-30 22:55:22 -04:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
data, err := json.Marshal(target.Arn)
|
|
|
|
if err != nil {
|
|
|
|
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
|
|
|
return
|
|
|
|
}
|
2020-07-21 20:49:56 -04:00
|
|
|
// Write success response.
|
2020-07-30 22:55:22 -04:00
|
|
|
writeSuccessResponseJSON(w, data)
|
2020-07-21 20:49:56 -04:00
|
|
|
}
|
|
|
|
|
2020-08-06 20:10:21 -04:00
|
|
|
// ListRemoteTargetsHandler - lists remote target(s) for a bucket or gets a target
|
2020-07-30 22:55:22 -04:00
|
|
|
// for a particular ARN type
|
2020-08-06 20:10:21 -04:00
|
|
|
func (a adminAPIHandlers) ListRemoteTargetsHandler(w http.ResponseWriter, r *http.Request) {
|
2023-07-13 17:52:21 -04:00
|
|
|
ctx := r.Context()
|
2020-07-21 20:49:56 -04:00
|
|
|
|
|
|
|
vars := mux.Vars(r)
|
2021-04-15 19:32:13 -04:00
|
|
|
bucket := pathClean(vars["bucket"])
|
2020-07-30 22:55:22 -04:00
|
|
|
arnType := vars["type"]
|
2022-05-30 13:58:37 -04:00
|
|
|
|
2020-07-21 20:49:56 -04:00
|
|
|
// Get current object layer instance.
|
2023-09-04 15:57:37 -04:00
|
|
|
objectAPI, _ := validateAdminReq(ctx, w, r, policy.GetBucketTargetAction)
|
2020-07-21 20:49:56 -04:00
|
|
|
if objectAPI == nil {
|
|
|
|
return
|
|
|
|
}
|
2020-08-06 20:10:21 -04:00
|
|
|
if bucket != "" {
|
2020-09-23 13:37:54 -04:00
|
|
|
// Check if bucket exists.
|
2022-07-25 20:51:32 -04:00
|
|
|
if _, err := objectAPI.GetBucketInfo(ctx, bucket, BucketOptions{}); err != nil {
|
2020-09-23 13:37:54 -04:00
|
|
|
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
|
|
|
|
return
|
|
|
|
}
|
2020-08-06 20:10:21 -04:00
|
|
|
if _, err := globalBucketMetadataSys.GetBucketTargetsConfig(bucket); err != nil {
|
|
|
|
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
|
|
|
return
|
2020-07-30 22:55:22 -04:00
|
|
|
}
|
2020-07-21 20:49:56 -04:00
|
|
|
}
|
2020-08-06 20:10:21 -04:00
|
|
|
targets := globalBucketTargetSys.ListTargets(ctx, bucket, arnType)
|
2020-07-30 22:55:22 -04:00
|
|
|
data, err := json.Marshal(targets)
|
2020-07-21 20:49:56 -04:00
|
|
|
if err != nil {
|
|
|
|
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
// Write success response.
|
|
|
|
writeSuccessResponseJSON(w, data)
|
|
|
|
}
|
|
|
|
|
2020-08-06 20:10:21 -04:00
|
|
|
// RemoveRemoteTargetHandler - removes a remote target for bucket with specified ARN
|
|
|
|
func (a adminAPIHandlers) RemoveRemoteTargetHandler(w http.ResponseWriter, r *http.Request) {
|
2023-07-13 17:52:21 -04:00
|
|
|
ctx := r.Context()
|
2020-07-21 20:49:56 -04:00
|
|
|
|
|
|
|
vars := mux.Vars(r)
|
2021-04-15 19:32:13 -04:00
|
|
|
bucket := pathClean(vars["bucket"])
|
2020-07-30 22:55:22 -04:00
|
|
|
arn := vars["arn"]
|
|
|
|
|
2020-07-21 20:49:56 -04:00
|
|
|
// Get current object layer instance.
|
2023-09-04 15:57:37 -04:00
|
|
|
objectAPI, _ := validateAdminReq(ctx, w, r, policy.SetBucketTargetAction)
|
2020-07-21 20:49:56 -04:00
|
|
|
if objectAPI == nil {
|
|
|
|
return
|
|
|
|
}
|
2020-08-06 20:10:21 -04:00
|
|
|
|
|
|
|
// Check if bucket exists.
|
2022-07-25 20:51:32 -04:00
|
|
|
if _, err := objectAPI.GetBucketInfo(ctx, bucket, BucketOptions{}); err != nil {
|
2020-08-06 20:10:21 -04:00
|
|
|
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2020-07-30 22:55:22 -04:00
|
|
|
if err := globalBucketTargetSys.RemoveTarget(ctx, bucket, arn); err != nil {
|
|
|
|
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
|
|
|
|
return
|
|
|
|
}
|
2020-08-06 20:10:21 -04:00
|
|
|
targets, err := globalBucketTargetSys.ListBucketTargets(ctx, bucket)
|
2020-07-21 20:49:56 -04:00
|
|
|
if err != nil {
|
2021-06-17 23:27:04 -04:00
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
2020-07-30 22:55:22 -04:00
|
|
|
return
|
|
|
|
}
|
|
|
|
tgtBytes, err := json.Marshal(&targets)
|
|
|
|
if err != nil {
|
|
|
|
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrAdminConfigBadJSON, err), r.URL)
|
2020-07-21 20:49:56 -04:00
|
|
|
return
|
|
|
|
}
|
2022-06-28 21:09:20 -04:00
|
|
|
if _, err = globalBucketMetadataSys.Update(ctx, bucket, bucketTargetsFile, tgtBytes); err != nil {
|
2021-06-17 23:27:04 -04:00
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
2020-07-30 22:55:22 -04:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2020-07-21 20:49:56 -04:00
|
|
|
// Write success response.
|
2020-07-30 22:55:22 -04:00
|
|
|
writeSuccessNoContent(w)
|
2020-07-21 20:49:56 -04:00
|
|
|
}
|
2022-06-18 09:55:39 -04:00
|
|
|
|
|
|
|
// ExportBucketMetadataHandler - exports all bucket metadata as a zipped file
|
|
|
|
func (a adminAPIHandlers) ExportBucketMetadataHandler(w http.ResponseWriter, r *http.Request) {
|
2023-07-13 17:52:21 -04:00
|
|
|
ctx := r.Context()
|
2022-06-18 09:55:39 -04:00
|
|
|
|
|
|
|
bucket := pathClean(r.Form.Get("bucket"))
|
|
|
|
// Get current object layer instance.
|
2023-09-04 15:57:37 -04:00
|
|
|
objectAPI, _ := validateAdminReq(ctx, w, r, policy.ExportBucketMetadataAction)
|
2022-06-18 09:55:39 -04:00
|
|
|
if objectAPI == nil {
|
|
|
|
return
|
|
|
|
}
|
2022-07-21 14:05:44 -04:00
|
|
|
|
2022-06-18 09:55:39 -04:00
|
|
|
var (
|
|
|
|
buckets []BucketInfo
|
|
|
|
err error
|
|
|
|
)
|
|
|
|
if bucket != "" {
|
|
|
|
// Check if bucket exists.
|
2022-07-25 20:51:32 -04:00
|
|
|
if _, err := objectAPI.GetBucketInfo(ctx, bucket, BucketOptions{}); err != nil {
|
2022-06-18 09:55:39 -04:00
|
|
|
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
buckets = append(buckets, BucketInfo{Name: bucket})
|
|
|
|
} else {
|
2022-07-25 20:51:32 -04:00
|
|
|
buckets, err = objectAPI.ListBuckets(ctx, BucketOptions{})
|
2022-06-18 09:55:39 -04:00
|
|
|
if err != nil {
|
|
|
|
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Initialize a zip writer which will provide a zipped content
|
|
|
|
// of bucket metadata
|
|
|
|
zipWriter := zip.NewWriter(w)
|
|
|
|
defer zipWriter.Close()
|
2023-10-10 03:33:42 -04:00
|
|
|
|
|
|
|
rawDataFn := func(r io.Reader, filename string, sz int) {
|
2022-06-18 09:55:39 -04:00
|
|
|
header, zerr := zip.FileInfoHeader(dummyFileInfo{
|
|
|
|
name: filename,
|
|
|
|
size: int64(sz),
|
|
|
|
mode: 0o600,
|
|
|
|
modTime: time.Now(),
|
|
|
|
isDir: false,
|
|
|
|
sys: nil,
|
|
|
|
})
|
2023-10-10 03:33:42 -04:00
|
|
|
if zerr == nil {
|
|
|
|
header.Method = zip.Deflate
|
|
|
|
zwriter, zerr := zipWriter.CreateHeader(header)
|
|
|
|
if zerr == nil {
|
|
|
|
io.Copy(zwriter, r)
|
|
|
|
}
|
2022-06-18 09:55:39 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
cfgFiles := []string{
|
|
|
|
bucketPolicyConfig,
|
|
|
|
bucketNotificationConfig,
|
|
|
|
bucketLifecycleConfig,
|
|
|
|
bucketSSEConfig,
|
|
|
|
bucketTaggingConfig,
|
|
|
|
bucketQuotaConfigFile,
|
|
|
|
objectLockConfig,
|
|
|
|
bucketVersioningConfig,
|
|
|
|
bucketReplicationConfig,
|
|
|
|
bucketTargetsFile,
|
|
|
|
}
|
|
|
|
for _, bi := range buckets {
|
|
|
|
for _, cfgFile := range cfgFiles {
|
|
|
|
cfgPath := pathJoin(bi.Name, cfgFile)
|
|
|
|
bucket := bi.Name
|
|
|
|
switch cfgFile {
|
|
|
|
case bucketNotificationConfig:
|
|
|
|
config, err := globalBucketMetadataSys.GetNotificationConfig(bucket)
|
|
|
|
if err != nil {
|
2024-04-04 08:04:40 -04:00
|
|
|
adminLogIf(ctx, err)
|
2022-06-20 19:13:45 -04:00
|
|
|
writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL)
|
2022-06-18 09:55:39 -04:00
|
|
|
return
|
|
|
|
}
|
|
|
|
configData, err := xml.Marshal(config)
|
|
|
|
if err != nil {
|
2022-06-20 19:13:45 -04:00
|
|
|
writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL)
|
2022-06-18 09:55:39 -04:00
|
|
|
return
|
|
|
|
}
|
2023-10-10 03:33:42 -04:00
|
|
|
rawDataFn(bytes.NewReader(configData), cfgPath, len(configData))
|
2022-06-18 09:55:39 -04:00
|
|
|
case bucketLifecycleConfig:
|
2023-05-25 01:52:39 -04:00
|
|
|
config, _, err := globalBucketMetadataSys.GetLifecycleConfig(bucket)
|
2022-06-18 09:55:39 -04:00
|
|
|
if err != nil {
|
|
|
|
if errors.Is(err, BucketLifecycleNotFound{Bucket: bucket}) {
|
|
|
|
continue
|
|
|
|
}
|
2024-04-04 08:04:40 -04:00
|
|
|
adminLogIf(ctx, err)
|
2022-06-20 19:13:45 -04:00
|
|
|
writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL)
|
2022-06-18 09:55:39 -04:00
|
|
|
return
|
|
|
|
}
|
|
|
|
configData, err := xml.Marshal(config)
|
|
|
|
if err != nil {
|
2022-06-20 19:13:45 -04:00
|
|
|
writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL)
|
2022-06-18 09:55:39 -04:00
|
|
|
return
|
|
|
|
}
|
2023-10-10 03:33:42 -04:00
|
|
|
rawDataFn(bytes.NewReader(configData), cfgPath, len(configData))
|
2022-06-18 09:55:39 -04:00
|
|
|
case bucketQuotaConfigFile:
|
|
|
|
config, _, err := globalBucketMetadataSys.GetQuotaConfig(ctx, bucket)
|
|
|
|
if err != nil {
|
|
|
|
if errors.Is(err, BucketQuotaConfigNotFound{Bucket: bucket}) {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
configData, err := json.Marshal(config)
|
|
|
|
if err != nil {
|
|
|
|
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
|
|
|
return
|
|
|
|
}
|
2023-10-10 03:33:42 -04:00
|
|
|
rawDataFn(bytes.NewReader(configData), cfgPath, len(configData))
|
2022-06-18 09:55:39 -04:00
|
|
|
case bucketSSEConfig:
|
|
|
|
config, _, err := globalBucketMetadataSys.GetSSEConfig(bucket)
|
|
|
|
if err != nil {
|
|
|
|
if errors.Is(err, BucketSSEConfigNotFound{Bucket: bucket}) {
|
|
|
|
continue
|
|
|
|
}
|
2022-06-20 19:13:45 -04:00
|
|
|
writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL)
|
2022-06-18 09:55:39 -04:00
|
|
|
return
|
|
|
|
}
|
|
|
|
configData, err := xml.Marshal(config)
|
|
|
|
if err != nil {
|
2022-06-20 19:13:45 -04:00
|
|
|
writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL)
|
2022-06-18 09:55:39 -04:00
|
|
|
return
|
|
|
|
}
|
2023-10-10 03:33:42 -04:00
|
|
|
rawDataFn(bytes.NewReader(configData), cfgPath, len(configData))
|
2022-06-18 09:55:39 -04:00
|
|
|
case bucketTaggingConfig:
|
|
|
|
config, _, err := globalBucketMetadataSys.GetTaggingConfig(bucket)
|
|
|
|
if err != nil {
|
|
|
|
if errors.Is(err, BucketTaggingNotFound{Bucket: bucket}) {
|
|
|
|
continue
|
|
|
|
}
|
2022-06-20 19:13:45 -04:00
|
|
|
writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL)
|
2022-06-18 09:55:39 -04:00
|
|
|
return
|
|
|
|
}
|
|
|
|
configData, err := xml.Marshal(config)
|
|
|
|
if err != nil {
|
2022-06-20 19:13:45 -04:00
|
|
|
writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL)
|
2022-06-18 09:55:39 -04:00
|
|
|
return
|
|
|
|
}
|
2023-10-10 03:33:42 -04:00
|
|
|
rawDataFn(bytes.NewReader(configData), cfgPath, len(configData))
|
2022-06-18 09:55:39 -04:00
|
|
|
case objectLockConfig:
|
|
|
|
config, _, err := globalBucketMetadataSys.GetObjectLockConfig(bucket)
|
|
|
|
if err != nil {
|
|
|
|
if errors.Is(err, BucketObjectLockConfigNotFound{Bucket: bucket}) {
|
|
|
|
continue
|
|
|
|
}
|
2022-06-20 19:13:45 -04:00
|
|
|
writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL)
|
2022-06-18 09:55:39 -04:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
configData, err := xml.Marshal(config)
|
|
|
|
if err != nil {
|
2022-06-20 19:13:45 -04:00
|
|
|
writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL)
|
2022-06-18 09:55:39 -04:00
|
|
|
return
|
|
|
|
}
|
2023-10-10 03:33:42 -04:00
|
|
|
rawDataFn(bytes.NewReader(configData), cfgPath, len(configData))
|
2022-06-18 09:55:39 -04:00
|
|
|
case bucketVersioningConfig:
|
|
|
|
config, _, err := globalBucketMetadataSys.GetVersioningConfig(bucket)
|
|
|
|
if err != nil {
|
2022-06-20 19:13:45 -04:00
|
|
|
writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL)
|
2022-06-18 09:55:39 -04:00
|
|
|
return
|
|
|
|
}
|
|
|
|
// ignore empty versioning configs
|
|
|
|
if config.Status != versioning.Enabled && config.Status != versioning.Suspended {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
configData, err := xml.Marshal(config)
|
|
|
|
if err != nil {
|
2022-06-20 19:13:45 -04:00
|
|
|
writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL)
|
2022-06-18 09:55:39 -04:00
|
|
|
return
|
|
|
|
}
|
2023-10-10 03:33:42 -04:00
|
|
|
rawDataFn(bytes.NewReader(configData), cfgPath, len(configData))
|
2022-06-18 09:55:39 -04:00
|
|
|
case bucketReplicationConfig:
|
|
|
|
config, _, err := globalBucketMetadataSys.GetReplicationConfig(ctx, bucket)
|
|
|
|
if err != nil {
|
|
|
|
if errors.Is(err, BucketReplicationConfigNotFound{Bucket: bucket}) {
|
|
|
|
continue
|
|
|
|
}
|
2022-06-20 19:13:45 -04:00
|
|
|
writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL)
|
2022-06-18 09:55:39 -04:00
|
|
|
return
|
|
|
|
}
|
|
|
|
configData, err := xml.Marshal(config)
|
|
|
|
if err != nil {
|
2022-06-20 19:13:45 -04:00
|
|
|
writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL)
|
2022-06-18 09:55:39 -04:00
|
|
|
return
|
|
|
|
}
|
2023-10-10 03:33:42 -04:00
|
|
|
rawDataFn(bytes.NewReader(configData), cfgPath, len(configData))
|
2022-06-18 09:55:39 -04:00
|
|
|
case bucketTargetsFile:
|
|
|
|
config, err := globalBucketMetadataSys.GetBucketTargetsConfig(bucket)
|
|
|
|
if err != nil {
|
|
|
|
if errors.Is(err, BucketRemoteTargetNotFound{Bucket: bucket}) {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
configData, err := xml.Marshal(config)
|
|
|
|
if err != nil {
|
2022-06-20 19:13:45 -04:00
|
|
|
writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL)
|
2022-06-18 09:55:39 -04:00
|
|
|
return
|
|
|
|
}
|
2023-10-10 03:33:42 -04:00
|
|
|
rawDataFn(bytes.NewReader(configData), cfgPath, len(configData))
|
2022-06-18 09:55:39 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-08-05 04:52:50 -04:00
|
|
|
type importMetaReport struct {
|
|
|
|
madmin.BucketMetaImportErrs
|
|
|
|
}
|
|
|
|
|
|
|
|
func (i *importMetaReport) SetStatus(bucket, fname string, err error) {
|
|
|
|
st := i.Buckets[bucket]
|
|
|
|
var errMsg string
|
|
|
|
if err != nil {
|
|
|
|
errMsg = err.Error()
|
|
|
|
}
|
|
|
|
switch fname {
|
|
|
|
case bucketPolicyConfig:
|
|
|
|
st.Policy = madmin.MetaStatus{IsSet: true, Err: errMsg}
|
|
|
|
case bucketNotificationConfig:
|
|
|
|
st.Notification = madmin.MetaStatus{IsSet: true, Err: errMsg}
|
|
|
|
case bucketLifecycleConfig:
|
|
|
|
st.Lifecycle = madmin.MetaStatus{IsSet: true, Err: errMsg}
|
|
|
|
case bucketSSEConfig:
|
|
|
|
st.SSEConfig = madmin.MetaStatus{IsSet: true, Err: errMsg}
|
|
|
|
case bucketTaggingConfig:
|
|
|
|
st.Tagging = madmin.MetaStatus{IsSet: true, Err: errMsg}
|
|
|
|
case bucketQuotaConfigFile:
|
|
|
|
st.Quota = madmin.MetaStatus{IsSet: true, Err: errMsg}
|
|
|
|
case objectLockConfig:
|
|
|
|
st.ObjectLock = madmin.MetaStatus{IsSet: true, Err: errMsg}
|
|
|
|
case bucketVersioningConfig:
|
|
|
|
st.Versioning = madmin.MetaStatus{IsSet: true, Err: errMsg}
|
|
|
|
default:
|
|
|
|
st.Err = errMsg
|
|
|
|
}
|
|
|
|
i.Buckets[bucket] = st
|
|
|
|
}
|
|
|
|
|
2022-06-18 09:55:39 -04:00
|
|
|
// ImportBucketMetadataHandler - imports all bucket metadata from a zipped file and overwrite bucket metadata config
|
|
|
|
// There are some caveats regarding the following:
|
|
|
|
// 1. object lock config - object lock should have been specified at time of bucket creation. Only default retention settings are imported here.
|
|
|
|
// 2. Replication config - is omitted from import as remote target credentials are not available from exported data for security reasons.
|
|
|
|
// 3. lifecycle config - if transition rules are present, tier name needs to have been defined.
|
|
|
|
func (a adminAPIHandlers) ImportBucketMetadataHandler(w http.ResponseWriter, r *http.Request) {
|
2023-07-13 17:52:21 -04:00
|
|
|
ctx := r.Context()
|
2022-06-18 09:55:39 -04:00
|
|
|
|
|
|
|
// Get current object layer instance.
|
2023-09-04 15:57:37 -04:00
|
|
|
objectAPI, _ := validateAdminReq(ctx, w, r, policy.ImportBucketMetadataAction)
|
2022-06-18 09:55:39 -04:00
|
|
|
if objectAPI == nil {
|
|
|
|
return
|
|
|
|
}
|
2022-09-19 14:05:16 -04:00
|
|
|
data, err := io.ReadAll(r.Body)
|
2022-06-18 09:55:39 -04:00
|
|
|
if err != nil {
|
|
|
|
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
reader := bytes.NewReader(data)
|
|
|
|
zr, err := zip.NewReader(reader, int64(len(data)))
|
|
|
|
if err != nil {
|
|
|
|
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL)
|
|
|
|
return
|
|
|
|
}
|
2022-08-05 04:52:50 -04:00
|
|
|
rpt := importMetaReport{
|
|
|
|
madmin.BucketMetaImportErrs{
|
|
|
|
Buckets: make(map[string]madmin.BucketStatus, len(zr.File)),
|
|
|
|
},
|
|
|
|
}
|
2023-08-25 10:59:16 -04:00
|
|
|
|
|
|
|
bucketMap := make(map[string]*BucketMetadata, len(zr.File))
|
|
|
|
|
|
|
|
updatedAt := UTCNow()
|
|
|
|
|
|
|
|
for _, file := range zr.File {
|
|
|
|
slc := strings.Split(file.Name, slashSeparator)
|
|
|
|
if len(slc) != 2 { // expecting bucket/configfile in the zipfile
|
|
|
|
rpt.SetStatus(file.Name, "", fmt.Errorf("malformed zip - expecting format bucket/<config.json>"))
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
bucket := slc[0]
|
|
|
|
meta, err := readBucketMetadata(ctx, objectAPI, bucket)
|
|
|
|
if err == nil {
|
|
|
|
bucketMap[bucket] = &meta
|
|
|
|
} else if err != errConfigNotFound {
|
|
|
|
rpt.SetStatus(bucket, "", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-06-18 09:55:39 -04:00
|
|
|
// import object lock config if any - order of import matters here.
|
|
|
|
for _, file := range zr.File {
|
|
|
|
slc := strings.Split(file.Name, slashSeparator)
|
|
|
|
if len(slc) != 2 { // expecting bucket/configfile in the zipfile
|
2022-08-05 04:52:50 -04:00
|
|
|
rpt.SetStatus(file.Name, "", fmt.Errorf("malformed zip - expecting format bucket/<config.json>"))
|
|
|
|
continue
|
2022-06-18 09:55:39 -04:00
|
|
|
}
|
2022-08-05 04:52:50 -04:00
|
|
|
bucket, fileName := slc[0], slc[1]
|
2023-03-06 11:56:10 -05:00
|
|
|
if fileName == objectLockConfig {
|
2022-06-18 09:55:39 -04:00
|
|
|
reader, err := file.Open()
|
|
|
|
if err != nil {
|
2022-08-05 04:52:50 -04:00
|
|
|
rpt.SetStatus(bucket, fileName, err)
|
|
|
|
continue
|
2022-06-18 09:55:39 -04:00
|
|
|
}
|
|
|
|
config, err := objectlock.ParseObjectLockConfig(reader)
|
|
|
|
if err != nil {
|
2022-08-05 04:52:50 -04:00
|
|
|
rpt.SetStatus(bucket, fileName, fmt.Errorf("%s (%s)", errorCodes[ErrMalformedXML].Description, err))
|
|
|
|
continue
|
2022-06-18 09:55:39 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
configData, err := xml.Marshal(config)
|
|
|
|
if err != nil {
|
2022-08-05 04:52:50 -04:00
|
|
|
rpt.SetStatus(bucket, fileName, err)
|
|
|
|
continue
|
2022-06-18 09:55:39 -04:00
|
|
|
}
|
|
|
|
if _, ok := bucketMap[bucket]; !ok {
|
2022-07-25 20:51:32 -04:00
|
|
|
opts := MakeBucketOptions{
|
2023-05-22 15:05:14 -04:00
|
|
|
LockEnabled: config.Enabled(),
|
2024-01-25 20:20:54 -05:00
|
|
|
ForceCreate: true, // ignore if it already exists
|
2022-06-18 09:55:39 -04:00
|
|
|
}
|
2022-12-23 10:46:00 -05:00
|
|
|
err = objectAPI.MakeBucket(ctx, bucket, opts)
|
2022-06-18 09:55:39 -04:00
|
|
|
if err != nil {
|
2024-01-25 20:20:54 -05:00
|
|
|
rpt.SetStatus(bucket, fileName, err)
|
|
|
|
continue
|
2022-06-18 09:55:39 -04:00
|
|
|
}
|
2024-03-05 19:05:28 -05:00
|
|
|
v, _ := globalBucketMetadataSys.Get(bucket)
|
2023-08-25 10:59:16 -04:00
|
|
|
bucketMap[bucket] = &v
|
2022-06-18 09:55:39 -04:00
|
|
|
}
|
|
|
|
|
2023-08-25 10:59:16 -04:00
|
|
|
bucketMap[bucket].ObjectLockConfigXML = configData
|
|
|
|
bucketMap[bucket].ObjectLockConfigUpdatedAt = updatedAt
|
2022-08-05 04:52:50 -04:00
|
|
|
rpt.SetStatus(bucket, fileName, nil)
|
2022-06-18 09:55:39 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// import versioning metadata
|
|
|
|
for _, file := range zr.File {
|
|
|
|
slc := strings.Split(file.Name, slashSeparator)
|
|
|
|
if len(slc) != 2 { // expecting bucket/configfile in the zipfile
|
2022-08-05 04:52:50 -04:00
|
|
|
rpt.SetStatus(file.Name, "", fmt.Errorf("malformed zip - expecting format bucket/<config.json>"))
|
|
|
|
continue
|
2022-06-18 09:55:39 -04:00
|
|
|
}
|
2022-08-05 04:52:50 -04:00
|
|
|
bucket, fileName := slc[0], slc[1]
|
2023-03-06 11:56:10 -05:00
|
|
|
if fileName == bucketVersioningConfig {
|
2022-06-18 09:55:39 -04:00
|
|
|
reader, err := file.Open()
|
|
|
|
if err != nil {
|
2022-08-05 04:52:50 -04:00
|
|
|
rpt.SetStatus(bucket, fileName, err)
|
|
|
|
continue
|
2022-06-18 09:55:39 -04:00
|
|
|
}
|
|
|
|
v, err := versioning.ParseConfig(io.LimitReader(reader, maxBucketVersioningConfigSize))
|
|
|
|
if err != nil {
|
2022-08-05 04:52:50 -04:00
|
|
|
rpt.SetStatus(bucket, fileName, err)
|
|
|
|
continue
|
2022-06-18 09:55:39 -04:00
|
|
|
}
|
|
|
|
if _, ok := bucketMap[bucket]; !ok {
|
2024-01-25 20:20:54 -05:00
|
|
|
if err = objectAPI.MakeBucket(ctx, bucket, MakeBucketOptions{
|
|
|
|
ForceCreate: true, // ignore if it already exists
|
|
|
|
}); err != nil {
|
|
|
|
rpt.SetStatus(bucket, fileName, err)
|
|
|
|
continue
|
2022-06-18 09:55:39 -04:00
|
|
|
}
|
2024-03-05 19:05:28 -05:00
|
|
|
v, _ := globalBucketMetadataSys.Get(bucket)
|
2023-08-25 10:59:16 -04:00
|
|
|
bucketMap[bucket] = &v
|
2022-06-18 09:55:39 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
if globalSiteReplicationSys.isEnabled() && v.Suspended() {
|
2022-08-05 04:52:50 -04:00
|
|
|
rpt.SetStatus(bucket, fileName, fmt.Errorf("Cluster replication is enabled for this site, so the versioning state cannot be suspended."))
|
|
|
|
continue
|
2022-06-18 09:55:39 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
if rcfg, _ := globalBucketObjectLockSys.Get(bucket); rcfg.LockEnabled && v.Suspended() {
|
2022-08-05 04:52:50 -04:00
|
|
|
rpt.SetStatus(bucket, fileName, fmt.Errorf("An Object Lock configuration is present on this bucket, so the versioning state cannot be suspended."))
|
|
|
|
continue
|
2022-06-18 09:55:39 -04:00
|
|
|
}
|
2024-04-30 21:09:56 -04:00
|
|
|
if rcfg, _ := getReplicationConfig(ctx, bucket); rcfg != nil && v.Suspended() {
|
2022-08-05 04:52:50 -04:00
|
|
|
rpt.SetStatus(bucket, fileName, fmt.Errorf("A replication configuration is present on this bucket, so the versioning state cannot be suspended."))
|
|
|
|
continue
|
2022-06-18 09:55:39 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
configData, err := xml.Marshal(v)
|
|
|
|
if err != nil {
|
2022-08-05 04:52:50 -04:00
|
|
|
rpt.SetStatus(bucket, fileName, fmt.Errorf("%s (%s)", errorCodes[ErrMalformedXML].Description, err))
|
|
|
|
continue
|
2022-06-18 09:55:39 -04:00
|
|
|
}
|
|
|
|
|
2023-08-25 10:59:16 -04:00
|
|
|
bucketMap[bucket].VersioningConfigXML = configData
|
|
|
|
bucketMap[bucket].VersioningConfigUpdatedAt = updatedAt
|
2022-08-05 04:52:50 -04:00
|
|
|
rpt.SetStatus(bucket, fileName, nil)
|
2022-06-18 09:55:39 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, file := range zr.File {
|
|
|
|
reader, err := file.Open()
|
|
|
|
if err != nil {
|
2022-08-05 04:52:50 -04:00
|
|
|
rpt.SetStatus(file.Name, "", err)
|
|
|
|
continue
|
2022-06-18 09:55:39 -04:00
|
|
|
}
|
|
|
|
sz := file.FileInfo().Size()
|
|
|
|
slc := strings.Split(file.Name, slashSeparator)
|
|
|
|
if len(slc) != 2 { // expecting bucket/configfile in the zipfile
|
2022-08-05 04:52:50 -04:00
|
|
|
rpt.SetStatus(file.Name, "", fmt.Errorf("malformed zip - expecting format bucket/<config.json>"))
|
|
|
|
continue
|
2022-06-18 09:55:39 -04:00
|
|
|
}
|
2022-08-05 04:52:50 -04:00
|
|
|
bucket, fileName := slc[0], slc[1]
|
2023-08-25 10:59:16 -04:00
|
|
|
|
2022-06-18 09:55:39 -04:00
|
|
|
// create bucket if it does not exist yet.
|
|
|
|
if _, ok := bucketMap[bucket]; !ok {
|
2024-01-25 20:20:54 -05:00
|
|
|
err = objectAPI.MakeBucket(ctx, bucket, MakeBucketOptions{
|
|
|
|
ForceCreate: true, // ignore if it already exists
|
|
|
|
})
|
2022-06-18 09:55:39 -04:00
|
|
|
if err != nil {
|
2024-01-25 20:20:54 -05:00
|
|
|
rpt.SetStatus(bucket, "", err)
|
|
|
|
continue
|
2022-06-18 09:55:39 -04:00
|
|
|
}
|
2024-03-05 19:05:28 -05:00
|
|
|
v, _ := globalBucketMetadataSys.Get(bucket)
|
2023-08-25 10:59:16 -04:00
|
|
|
bucketMap[bucket] = &v
|
2022-06-18 09:55:39 -04:00
|
|
|
}
|
2022-08-05 04:52:50 -04:00
|
|
|
if _, ok := bucketMap[bucket]; !ok {
|
|
|
|
continue
|
|
|
|
}
|
2022-06-18 09:55:39 -04:00
|
|
|
switch fileName {
|
|
|
|
case bucketNotificationConfig:
|
2024-05-16 19:13:47 -04:00
|
|
|
config, err := event.ParseConfig(io.LimitReader(reader, sz), globalSite.Region(), globalEventNotifier.targetList)
|
2022-06-18 09:55:39 -04:00
|
|
|
if err != nil {
|
2022-08-05 04:52:50 -04:00
|
|
|
rpt.SetStatus(bucket, fileName, fmt.Errorf("%s (%s)", errorCodes[ErrMalformedXML].Description, err))
|
|
|
|
continue
|
2022-06-18 09:55:39 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
configData, err := xml.Marshal(config)
|
|
|
|
if err != nil {
|
2022-08-05 04:52:50 -04:00
|
|
|
rpt.SetStatus(bucket, fileName, err)
|
|
|
|
continue
|
2022-06-18 09:55:39 -04:00
|
|
|
}
|
|
|
|
|
2023-08-25 10:59:16 -04:00
|
|
|
bucketMap[bucket].NotificationConfigXML = configData
|
2022-08-05 04:52:50 -04:00
|
|
|
rpt.SetStatus(bucket, fileName, nil)
|
2022-06-18 09:55:39 -04:00
|
|
|
case bucketPolicyConfig:
|
|
|
|
// Error out if Content-Length is beyond allowed size.
|
|
|
|
if sz > maxBucketPolicySize {
|
2022-08-05 04:52:50 -04:00
|
|
|
rpt.SetStatus(bucket, fileName, fmt.Errorf(ErrPolicyTooLarge.String()))
|
|
|
|
continue
|
2022-06-18 09:55:39 -04:00
|
|
|
}
|
|
|
|
|
2022-09-19 14:05:16 -04:00
|
|
|
bucketPolicyBytes, err := io.ReadAll(io.LimitReader(reader, sz))
|
2022-06-18 09:55:39 -04:00
|
|
|
if err != nil {
|
2022-08-05 04:52:50 -04:00
|
|
|
rpt.SetStatus(bucket, fileName, err)
|
|
|
|
continue
|
2022-06-18 09:55:39 -04:00
|
|
|
}
|
|
|
|
|
2023-09-04 15:57:37 -04:00
|
|
|
bucketPolicy, err := policy.ParseBucketPolicyConfig(bytes.NewReader(bucketPolicyBytes), bucket)
|
2022-06-18 09:55:39 -04:00
|
|
|
if err != nil {
|
2022-08-05 04:52:50 -04:00
|
|
|
rpt.SetStatus(bucket, fileName, err)
|
|
|
|
continue
|
2022-06-18 09:55:39 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// Version in policy must not be empty
|
|
|
|
if bucketPolicy.Version == "" {
|
2022-12-06 11:07:24 -05:00
|
|
|
rpt.SetStatus(bucket, fileName, fmt.Errorf(ErrPolicyInvalidVersion.String()))
|
2022-08-05 04:52:50 -04:00
|
|
|
continue
|
2022-06-18 09:55:39 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
configData, err := json.Marshal(bucketPolicy)
|
|
|
|
if err != nil {
|
2022-08-05 04:52:50 -04:00
|
|
|
rpt.SetStatus(bucket, fileName, err)
|
|
|
|
continue
|
2022-06-18 09:55:39 -04:00
|
|
|
}
|
|
|
|
|
2023-08-25 10:59:16 -04:00
|
|
|
bucketMap[bucket].PolicyConfigJSON = configData
|
|
|
|
bucketMap[bucket].PolicyConfigUpdatedAt = updatedAt
|
2022-08-05 04:52:50 -04:00
|
|
|
rpt.SetStatus(bucket, fileName, nil)
|
2022-06-18 09:55:39 -04:00
|
|
|
case bucketLifecycleConfig:
|
|
|
|
bucketLifecycle, err := lifecycle.ParseLifecycleConfig(io.LimitReader(reader, sz))
|
|
|
|
if err != nil {
|
2022-08-05 04:52:50 -04:00
|
|
|
rpt.SetStatus(bucket, fileName, err)
|
|
|
|
continue
|
2022-06-18 09:55:39 -04:00
|
|
|
}
|
2024-05-22 02:50:03 -04:00
|
|
|
rcfg, err := globalBucketObjectLockSys.Get(bucket)
|
|
|
|
if err != nil {
|
|
|
|
rpt.SetStatus(bucket, fileName, err)
|
|
|
|
continue
|
|
|
|
}
|
2022-06-18 09:55:39 -04:00
|
|
|
// Validate the received bucket policy document
|
2024-05-22 02:50:03 -04:00
|
|
|
if err = bucketLifecycle.Validate(rcfg); err != nil {
|
2022-08-05 04:52:50 -04:00
|
|
|
rpt.SetStatus(bucket, fileName, err)
|
|
|
|
continue
|
2022-06-18 09:55:39 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// Validate the transition storage ARNs
|
|
|
|
if err = validateTransitionTier(bucketLifecycle); err != nil {
|
2022-08-05 04:52:50 -04:00
|
|
|
rpt.SetStatus(bucket, fileName, err)
|
|
|
|
continue
|
2022-06-18 09:55:39 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
configData, err := xml.Marshal(bucketLifecycle)
|
|
|
|
if err != nil {
|
2022-08-05 04:52:50 -04:00
|
|
|
rpt.SetStatus(bucket, fileName, err)
|
|
|
|
continue
|
2022-06-18 09:55:39 -04:00
|
|
|
}
|
|
|
|
|
2023-08-25 10:59:16 -04:00
|
|
|
bucketMap[bucket].LifecycleConfigXML = configData
|
|
|
|
bucketMap[bucket].LifecycleConfigUpdatedAt = updatedAt
|
2022-08-05 04:52:50 -04:00
|
|
|
rpt.SetStatus(bucket, fileName, nil)
|
2022-06-18 09:55:39 -04:00
|
|
|
case bucketSSEConfig:
|
|
|
|
// Parse bucket encryption xml
|
|
|
|
encConfig, err := validateBucketSSEConfig(io.LimitReader(reader, maxBucketSSEConfigSize))
|
|
|
|
if err != nil {
|
2022-08-05 04:52:50 -04:00
|
|
|
rpt.SetStatus(bucket, fileName, fmt.Errorf("%s (%s)", errorCodes[ErrMalformedXML].Description, err))
|
|
|
|
continue
|
2022-06-18 09:55:39 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// Return error if KMS is not initialized
|
|
|
|
if GlobalKMS == nil {
|
2022-08-05 04:52:50 -04:00
|
|
|
rpt.SetStatus(bucket, fileName, fmt.Errorf("%s", errorCodes[ErrKMSNotConfigured].Description))
|
|
|
|
continue
|
2022-06-18 09:55:39 -04:00
|
|
|
}
|
|
|
|
kmsKey := encConfig.KeyID()
|
|
|
|
if kmsKey != "" {
|
2024-05-07 19:55:37 -04:00
|
|
|
_, err := GlobalKMS.GenerateKey(ctx, &kms.GenerateKeyRequest{
|
|
|
|
Name: kmsKey,
|
|
|
|
AssociatedData: kms.Context{"MinIO admin API": "ServerInfoHandler"}, // Context for a test key operation
|
|
|
|
})
|
2022-06-18 09:55:39 -04:00
|
|
|
if err != nil {
|
|
|
|
if errors.Is(err, kes.ErrKeyNotFound) {
|
2022-08-05 04:52:50 -04:00
|
|
|
rpt.SetStatus(bucket, fileName, errKMSKeyNotFound)
|
|
|
|
continue
|
2022-06-18 09:55:39 -04:00
|
|
|
}
|
2022-08-05 04:52:50 -04:00
|
|
|
rpt.SetStatus(bucket, fileName, err)
|
|
|
|
continue
|
2022-06-18 09:55:39 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
configData, err := xml.Marshal(encConfig)
|
|
|
|
if err != nil {
|
2022-08-05 04:52:50 -04:00
|
|
|
rpt.SetStatus(bucket, fileName, err)
|
|
|
|
continue
|
2022-06-18 09:55:39 -04:00
|
|
|
}
|
|
|
|
|
2023-08-25 10:59:16 -04:00
|
|
|
bucketMap[bucket].EncryptionConfigXML = configData
|
|
|
|
bucketMap[bucket].EncryptionConfigUpdatedAt = updatedAt
|
2022-08-05 04:52:50 -04:00
|
|
|
rpt.SetStatus(bucket, fileName, nil)
|
2022-06-18 09:55:39 -04:00
|
|
|
case bucketTaggingConfig:
|
|
|
|
tags, err := tags.ParseBucketXML(io.LimitReader(reader, sz))
|
|
|
|
if err != nil {
|
2022-08-05 04:52:50 -04:00
|
|
|
rpt.SetStatus(bucket, fileName, fmt.Errorf("%s (%s)", errorCodes[ErrMalformedXML].Description, err))
|
|
|
|
continue
|
2022-06-18 09:55:39 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
configData, err := xml.Marshal(tags)
|
|
|
|
if err != nil {
|
2022-08-05 04:52:50 -04:00
|
|
|
rpt.SetStatus(bucket, fileName, err)
|
|
|
|
continue
|
2022-06-18 09:55:39 -04:00
|
|
|
}
|
|
|
|
|
2023-08-25 10:59:16 -04:00
|
|
|
bucketMap[bucket].TaggingConfigXML = configData
|
|
|
|
bucketMap[bucket].TaggingConfigUpdatedAt = updatedAt
|
2022-08-05 04:52:50 -04:00
|
|
|
rpt.SetStatus(bucket, fileName, nil)
|
2022-06-18 09:55:39 -04:00
|
|
|
case bucketQuotaConfigFile:
|
2022-09-19 14:05:16 -04:00
|
|
|
data, err := io.ReadAll(reader)
|
2022-06-18 09:55:39 -04:00
|
|
|
if err != nil {
|
2022-08-05 04:52:50 -04:00
|
|
|
rpt.SetStatus(bucket, fileName, err)
|
|
|
|
continue
|
2022-06-18 09:55:39 -04:00
|
|
|
}
|
|
|
|
|
2023-08-25 10:59:16 -04:00
|
|
|
_, err = parseBucketQuota(bucket, data)
|
2022-06-18 09:55:39 -04:00
|
|
|
if err != nil {
|
2022-08-05 04:52:50 -04:00
|
|
|
rpt.SetStatus(bucket, fileName, err)
|
|
|
|
continue
|
2022-06-18 09:55:39 -04:00
|
|
|
}
|
|
|
|
|
2023-08-25 10:59:16 -04:00
|
|
|
bucketMap[bucket].QuotaConfigJSON = data
|
|
|
|
bucketMap[bucket].QuotaConfigUpdatedAt = updatedAt
|
2022-08-05 04:52:50 -04:00
|
|
|
rpt.SetStatus(bucket, fileName, nil)
|
2023-08-25 10:59:16 -04:00
|
|
|
}
|
|
|
|
}
|
2022-06-18 09:55:39 -04:00
|
|
|
|
2023-08-25 10:59:16 -04:00
|
|
|
enc := func(b []byte) *string {
|
|
|
|
if b == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
v := base64.StdEncoding.EncodeToString(b)
|
|
|
|
return &v
|
|
|
|
}
|
2022-06-18 09:55:39 -04:00
|
|
|
|
2023-08-25 10:59:16 -04:00
|
|
|
for bucket, meta := range bucketMap {
|
|
|
|
err := globalBucketMetadataSys.save(ctx, *meta)
|
|
|
|
if err != nil {
|
|
|
|
rpt.SetStatus(bucket, "", err)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
// Call site replication hook.
|
|
|
|
if err = globalSiteReplicationSys.BucketMetaHook(ctx, madmin.SRBucketMeta{
|
|
|
|
Bucket: bucket,
|
|
|
|
Quota: meta.QuotaConfigJSON,
|
|
|
|
Policy: meta.PolicyConfigJSON,
|
|
|
|
Versioning: enc(meta.VersioningConfigXML),
|
|
|
|
Tags: enc(meta.TaggingConfigXML),
|
|
|
|
ObjectLockConfig: enc(meta.ObjectLockConfigXML),
|
|
|
|
SSEConfig: enc(meta.EncryptionConfigXML),
|
|
|
|
UpdatedAt: updatedAt,
|
|
|
|
}); err != nil {
|
|
|
|
rpt.SetStatus(bucket, "", err)
|
|
|
|
continue
|
2022-06-18 09:55:39 -04:00
|
|
|
}
|
2023-08-25 10:59:16 -04:00
|
|
|
|
2022-06-18 09:55:39 -04:00
|
|
|
}
|
2022-08-05 04:52:50 -04:00
|
|
|
|
|
|
|
rptData, err := json.Marshal(rpt.BucketMetaImportErrs)
|
|
|
|
if err != nil {
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
writeSuccessResponseJSON(w, rptData)
|
2022-06-18 09:55:39 -04:00
|
|
|
}
|
2022-07-21 14:05:44 -04:00
|
|
|
|
|
|
|
// ReplicationDiffHandler - POST returns info on unreplicated versions for a remote target ARN
|
2022-10-04 20:47:31 -04:00
|
|
|
// to the connected HTTP client.
|
2022-07-21 14:05:44 -04:00
|
|
|
func (a adminAPIHandlers) ReplicationDiffHandler(w http.ResponseWriter, r *http.Request) {
|
2023-07-13 17:52:21 -04:00
|
|
|
ctx := r.Context()
|
2022-07-21 14:05:44 -04:00
|
|
|
|
|
|
|
vars := mux.Vars(r)
|
|
|
|
bucket := vars["bucket"]
|
|
|
|
|
2023-09-04 15:57:37 -04:00
|
|
|
objectAPI, _ := validateAdminReq(ctx, w, r, policy.ReplicationDiff)
|
2022-07-21 14:05:44 -04:00
|
|
|
if objectAPI == nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check if bucket exists.
|
2022-07-25 20:51:32 -04:00
|
|
|
if _, err := objectAPI.GetBucketInfo(ctx, bucket, BucketOptions{}); err != nil {
|
2022-07-21 14:05:44 -04:00
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
opts := extractReplicateDiffOpts(r.Form)
|
|
|
|
if opts.ARN != "" {
|
|
|
|
tgt := globalBucketTargetSys.GetRemoteBucketTargetByArn(ctx, bucket, opts.ARN)
|
|
|
|
if tgt.Empty() {
|
|
|
|
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrInvalidRequest, fmt.Errorf("invalid arn : '%s'", opts.ARN)), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
keepAliveTicker := time.NewTicker(500 * time.Millisecond)
|
|
|
|
defer keepAliveTicker.Stop()
|
|
|
|
|
|
|
|
diffCh, err := getReplicationDiff(ctx, objectAPI, bucket, opts)
|
|
|
|
if err != nil {
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
enc := json.NewEncoder(w)
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case entry, ok := <-diffCh:
|
|
|
|
if !ok {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if err := enc.Encode(entry); err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if len(diffCh) == 0 {
|
|
|
|
// Flush if nothing is queued
|
|
|
|
w.(http.Flusher).Flush()
|
|
|
|
}
|
|
|
|
case <-keepAliveTicker.C:
|
|
|
|
if len(diffCh) > 0 {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if _, err := w.Write([]byte(" ")); err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
w.(http.Flusher).Flush()
|
|
|
|
case <-ctx.Done():
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2023-07-13 02:51:33 -04:00
|
|
|
|
|
|
|
// ReplicationMRFHandler - POST returns info on entries in the MRF backlog for a node or all nodes
|
|
|
|
func (a adminAPIHandlers) ReplicationMRFHandler(w http.ResponseWriter, r *http.Request) {
|
2023-07-13 17:52:21 -04:00
|
|
|
ctx := r.Context()
|
2023-07-13 02:51:33 -04:00
|
|
|
|
|
|
|
vars := mux.Vars(r)
|
|
|
|
bucket := vars["bucket"]
|
|
|
|
|
2023-09-04 15:57:37 -04:00
|
|
|
objectAPI, _ := validateAdminReq(ctx, w, r, policy.ReplicationDiff)
|
2023-07-13 02:51:33 -04:00
|
|
|
if objectAPI == nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check if bucket exists.
|
|
|
|
if bucket != "" {
|
|
|
|
if _, err := objectAPI.GetBucketInfo(ctx, bucket, BucketOptions{}); err != nil {
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
q := r.Form
|
|
|
|
node := q.Get("node")
|
|
|
|
|
|
|
|
keepAliveTicker := time.NewTicker(500 * time.Millisecond)
|
|
|
|
defer keepAliveTicker.Stop()
|
|
|
|
|
|
|
|
mrfCh, err := globalNotificationSys.GetReplicationMRF(ctx, bucket, node)
|
|
|
|
if err != nil {
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
enc := json.NewEncoder(w)
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case entry, ok := <-mrfCh:
|
|
|
|
if !ok {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if err := enc.Encode(entry); err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if len(mrfCh) == 0 {
|
|
|
|
// Flush if nothing is queued
|
|
|
|
w.(http.Flusher).Flush()
|
|
|
|
}
|
|
|
|
case <-keepAliveTicker.C:
|
|
|
|
if len(mrfCh) > 0 {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if _, err := w.Write([]byte(" ")); err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
w.(http.Flusher).Flush()
|
|
|
|
case <-ctx.Done():
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|