2022-10-03 05:10:15 -04:00
|
|
|
// Copyright (c) 2015-2022 MinIO, Inc.
|
|
|
|
//
|
|
|
|
// This file is part of MinIO Object Storage stack
|
|
|
|
//
|
|
|
|
// This program is free software: you can redistribute it and/or modify
|
|
|
|
// it under the terms of the GNU Affero General Public License as published by
|
|
|
|
// the Free Software Foundation, either version 3 of the License, or
|
|
|
|
// (at your option) any later version.
|
|
|
|
//
|
|
|
|
// This program is distributed in the hope that it will be useful
|
|
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
// GNU Affero General Public License for more details.
|
|
|
|
//
|
|
|
|
// You should have received a copy of the GNU Affero General Public License
|
|
|
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
|
|
|
|
package cmd
|
|
|
|
|
|
|
|
import (
|
|
|
|
"bytes"
|
|
|
|
"context"
|
|
|
|
"encoding/binary"
|
|
|
|
"encoding/json"
|
|
|
|
"errors"
|
|
|
|
"fmt"
|
|
|
|
"io"
|
|
|
|
"math/rand"
|
|
|
|
"net/http"
|
|
|
|
"net/url"
|
2023-03-31 13:48:36 -04:00
|
|
|
"path"
|
2023-02-13 15:07:58 -05:00
|
|
|
"runtime"
|
|
|
|
"strconv"
|
2022-10-19 00:22:21 -04:00
|
|
|
"strings"
|
2022-10-03 05:10:15 -04:00
|
|
|
"sync"
|
|
|
|
"time"
|
|
|
|
|
|
|
|
"github.com/dustin/go-humanize"
|
|
|
|
"github.com/lithammer/shortuuid/v4"
|
2023-06-19 20:53:08 -04:00
|
|
|
"github.com/minio/madmin-go/v3"
|
2022-10-03 05:10:15 -04:00
|
|
|
miniogo "github.com/minio/minio-go/v7"
|
|
|
|
"github.com/minio/minio-go/v7/pkg/credentials"
|
2023-03-31 13:48:36 -04:00
|
|
|
"github.com/minio/minio-go/v7/pkg/encrypt"
|
2022-10-19 00:22:21 -04:00
|
|
|
"github.com/minio/minio-go/v7/pkg/tags"
|
2022-10-03 05:10:15 -04:00
|
|
|
"github.com/minio/minio/internal/auth"
|
2023-03-31 13:48:36 -04:00
|
|
|
"github.com/minio/minio/internal/crypto"
|
|
|
|
"github.com/minio/minio/internal/hash"
|
2022-10-03 05:10:15 -04:00
|
|
|
xhttp "github.com/minio/minio/internal/http"
|
|
|
|
"github.com/minio/minio/internal/logger"
|
|
|
|
"github.com/minio/pkg/console"
|
2023-02-13 15:07:58 -05:00
|
|
|
"github.com/minio/pkg/env"
|
2022-10-12 18:47:41 -04:00
|
|
|
iampolicy "github.com/minio/pkg/iam/policy"
|
2022-10-03 05:10:15 -04:00
|
|
|
"github.com/minio/pkg/wildcard"
|
2023-04-26 01:57:40 -04:00
|
|
|
"github.com/minio/pkg/workers"
|
2022-10-03 05:10:15 -04:00
|
|
|
"gopkg.in/yaml.v2"
|
|
|
|
)
|
|
|
|
|
|
|
|
// replicate:
|
|
|
|
// # source of the objects to be replicated
|
|
|
|
// source:
|
|
|
|
// type: "minio"
|
|
|
|
// bucket: "testbucket"
|
|
|
|
// prefix: "spark/"
|
|
|
|
//
|
|
|
|
// # optional flags based filtering criteria
|
|
|
|
// # for source objects
|
|
|
|
// flags:
|
|
|
|
// filter:
|
|
|
|
// newerThan: "7d"
|
|
|
|
// olderThan: "7d"
|
|
|
|
// createdAfter: "date"
|
|
|
|
// createdBefore: "date"
|
|
|
|
// tags:
|
|
|
|
// - key: "name"
|
|
|
|
// value: "value*"
|
|
|
|
// metadata:
|
|
|
|
// - key: "content-type"
|
|
|
|
// value: "image/*"
|
|
|
|
// notify:
|
|
|
|
// endpoint: "https://splunk-hec.dev.com"
|
|
|
|
// token: "Splunk ..." # e.g. "Bearer token"
|
|
|
|
//
|
|
|
|
// # target where the objects must be replicated
|
|
|
|
// target:
|
|
|
|
// type: "minio"
|
|
|
|
// bucket: "testbucket1"
|
|
|
|
// endpoint: "https://play.min.io"
|
2023-05-21 18:16:31 -04:00
|
|
|
// path: "on"
|
2022-10-03 05:10:15 -04:00
|
|
|
// credentials:
|
|
|
|
// accessKey: "minioadmin"
|
|
|
|
// secretKey: "minioadmin"
|
|
|
|
// sessionToken: ""
|
|
|
|
|
|
|
|
// BatchJobReplicateKV is a datatype that holds key and values for filtering of objects
|
|
|
|
// used by metadata filter as well as tags based filtering.
|
|
|
|
type BatchJobReplicateKV struct {
|
|
|
|
Key string `yaml:"key" json:"key"`
|
|
|
|
Value string `yaml:"value" json:"value"`
|
|
|
|
}
|
|
|
|
|
|
|
|
// Validate returns an error if key is empty
|
|
|
|
func (kv BatchJobReplicateKV) Validate() error {
|
|
|
|
if kv.Key == "" {
|
|
|
|
return errInvalidArgument
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Empty indicates if kv is not set
|
|
|
|
func (kv BatchJobReplicateKV) Empty() bool {
|
|
|
|
return kv.Key == "" && kv.Value == ""
|
|
|
|
}
|
|
|
|
|
|
|
|
// Match matches input kv with kv, value will be wildcard matched depending on the user input
|
|
|
|
func (kv BatchJobReplicateKV) Match(ikv BatchJobReplicateKV) bool {
|
|
|
|
if kv.Empty() {
|
|
|
|
return true
|
|
|
|
}
|
2022-10-19 00:22:21 -04:00
|
|
|
if strings.EqualFold(kv.Key, ikv.Key) {
|
2022-10-03 05:10:15 -04:00
|
|
|
return wildcard.Match(kv.Value, ikv.Value)
|
|
|
|
}
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
// BatchReplicateRetry datatype represents total retry attempts and delay between each retries.
|
|
|
|
type BatchReplicateRetry struct {
|
|
|
|
Attempts int `yaml:"attempts" json:"attempts"` // number of retry attempts
|
|
|
|
Delay time.Duration `yaml:"delay" json:"delay"` // delay between each retries
|
|
|
|
}
|
|
|
|
|
|
|
|
// Validate validates input replicate retries.
|
|
|
|
func (r BatchReplicateRetry) Validate() error {
|
|
|
|
if r.Attempts < 0 {
|
|
|
|
return errInvalidArgument
|
|
|
|
}
|
|
|
|
|
|
|
|
if r.Delay < 0 {
|
|
|
|
return errInvalidArgument
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// BatchReplicateFilter holds all the filters currently supported for batch replication
|
|
|
|
type BatchReplicateFilter struct {
|
|
|
|
NewerThan time.Duration `yaml:"newerThan,omitempty" json:"newerThan"`
|
|
|
|
OlderThan time.Duration `yaml:"olderThan,omitempty" json:"olderThan"`
|
|
|
|
CreatedAfter time.Time `yaml:"createdAfter,omitempty" json:"createdAfter"`
|
|
|
|
CreatedBefore time.Time `yaml:"createdBefore,omitempty" json:"createdBefore"`
|
|
|
|
Tags []BatchJobReplicateKV `yaml:"tags,omitempty" json:"tags"`
|
|
|
|
Metadata []BatchJobReplicateKV `yaml:"metadata,omitempty" json:"metadata"`
|
|
|
|
}
|
|
|
|
|
|
|
|
// BatchReplicateNotification success or failure notification endpoint for each job attempts
|
|
|
|
type BatchReplicateNotification struct {
|
|
|
|
Endpoint string `yaml:"endpoint" json:"endpoint"`
|
|
|
|
Token string `yaml:"token" json:"token"`
|
|
|
|
}
|
|
|
|
|
|
|
|
// BatchJobReplicateFlags various configurations for replication job definition currently includes
|
|
|
|
// - filter
|
|
|
|
// - notify
|
|
|
|
// - retry
|
|
|
|
type BatchJobReplicateFlags struct {
|
|
|
|
Filter BatchReplicateFilter `yaml:"filter" json:"filter"`
|
|
|
|
Notify BatchReplicateNotification `yaml:"notify" json:"notify"`
|
|
|
|
Retry BatchReplicateRetry `yaml:"retry" json:"retry"`
|
|
|
|
}
|
|
|
|
|
|
|
|
// BatchJobReplicateResourceType defines the type of batch jobs
|
|
|
|
type BatchJobReplicateResourceType string
|
|
|
|
|
|
|
|
// Validate validates if the replicate resource type is recognized and supported
|
|
|
|
func (t BatchJobReplicateResourceType) Validate() error {
|
|
|
|
switch t {
|
|
|
|
case BatchJobReplicateResourceMinIO:
|
2023-05-03 01:52:35 -04:00
|
|
|
case BatchJobReplicateResourceS3:
|
2022-10-03 05:10:15 -04:00
|
|
|
default:
|
|
|
|
return errInvalidArgument
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2023-05-21 18:16:31 -04:00
|
|
|
func (t BatchJobReplicateResourceType) isMinio() bool {
|
|
|
|
return t == BatchJobReplicateResourceMinIO
|
|
|
|
}
|
|
|
|
|
2022-10-03 05:10:15 -04:00
|
|
|
// Different types of batch jobs..
|
|
|
|
const (
|
|
|
|
BatchJobReplicateResourceMinIO BatchJobReplicateResourceType = "minio"
|
2023-05-03 01:52:35 -04:00
|
|
|
BatchJobReplicateResourceS3 BatchJobReplicateResourceType = "s3"
|
|
|
|
|
2022-10-03 05:10:15 -04:00
|
|
|
// add future targets
|
|
|
|
)
|
|
|
|
|
|
|
|
// BatchJobReplicateCredentials access credentials for batch replication it may
|
|
|
|
// be either for target or source.
|
|
|
|
type BatchJobReplicateCredentials struct {
|
|
|
|
AccessKey string `xml:"AccessKeyId" json:"accessKey,omitempty" yaml:"accessKey"`
|
|
|
|
SecretKey string `xml:"SecretAccessKey" json:"secretKey,omitempty" yaml:"secretKey"`
|
|
|
|
SessionToken string `xml:"SessionToken" json:"sessionToken,omitempty" yaml:"sessionToken"`
|
|
|
|
}
|
|
|
|
|
2023-03-31 13:48:36 -04:00
|
|
|
// Empty indicates if credentials are not set
|
|
|
|
func (c BatchJobReplicateCredentials) Empty() bool {
|
|
|
|
return c.AccessKey == "" && c.SecretKey == "" && c.SessionToken == ""
|
|
|
|
}
|
|
|
|
|
2022-10-03 05:10:15 -04:00
|
|
|
// Validate validates if credentials are valid
|
|
|
|
func (c BatchJobReplicateCredentials) Validate() error {
|
|
|
|
if !auth.IsAccessKeyValid(c.AccessKey) || !auth.IsSecretKeyValid(c.SecretKey) {
|
|
|
|
return errInvalidArgument
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// BatchJobReplicateTarget describes target element of the replication job that receives
|
|
|
|
// the filtered data from source
|
|
|
|
type BatchJobReplicateTarget struct {
|
|
|
|
Type BatchJobReplicateResourceType `yaml:"type" json:"type"`
|
|
|
|
Bucket string `yaml:"bucket" json:"bucket"`
|
|
|
|
Prefix string `yaml:"prefix" json:"prefix"`
|
|
|
|
Endpoint string `yaml:"endpoint" json:"endpoint"`
|
2023-05-21 18:16:31 -04:00
|
|
|
Path string `yaml:"path" json:"path"`
|
2022-10-03 05:10:15 -04:00
|
|
|
Creds BatchJobReplicateCredentials `yaml:"credentials" json:"credentials"`
|
|
|
|
}
|
|
|
|
|
2023-05-21 18:16:31 -04:00
|
|
|
// ValidPath returns true if path is valid
|
|
|
|
func (t BatchJobReplicateTarget) ValidPath() bool {
|
|
|
|
return t.Path == "on" || t.Path == "off" || t.Path == "auto" || t.Path == ""
|
|
|
|
}
|
|
|
|
|
2022-10-03 05:10:15 -04:00
|
|
|
// BatchJobReplicateSource describes source element of the replication job that is
|
|
|
|
// the source of the data for the target
|
|
|
|
type BatchJobReplicateSource struct {
|
|
|
|
Type BatchJobReplicateResourceType `yaml:"type" json:"type"`
|
|
|
|
Bucket string `yaml:"bucket" json:"bucket"`
|
|
|
|
Prefix string `yaml:"prefix" json:"prefix"`
|
|
|
|
Endpoint string `yaml:"endpoint" json:"endpoint"`
|
2023-05-21 18:16:31 -04:00
|
|
|
Path string `yaml:"path" json:"path"`
|
2022-10-03 05:10:15 -04:00
|
|
|
Creds BatchJobReplicateCredentials `yaml:"credentials" json:"credentials"`
|
|
|
|
}
|
|
|
|
|
2023-05-21 18:16:31 -04:00
|
|
|
// ValidPath returns true if path is valid
|
|
|
|
func (s BatchJobReplicateSource) ValidPath() bool {
|
|
|
|
switch s.Path {
|
|
|
|
case "on", "off", "auto", "":
|
|
|
|
return true
|
|
|
|
default:
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-10-03 05:10:15 -04:00
|
|
|
// BatchJobReplicateV1 v1 of batch job replication
|
|
|
|
type BatchJobReplicateV1 struct {
|
|
|
|
APIVersion string `yaml:"apiVersion" json:"apiVersion"`
|
|
|
|
Flags BatchJobReplicateFlags `yaml:"flags" json:"flags"`
|
|
|
|
Target BatchJobReplicateTarget `yaml:"target" json:"target"`
|
|
|
|
Source BatchJobReplicateSource `yaml:"source" json:"source"`
|
2022-10-15 14:58:31 -04:00
|
|
|
|
|
|
|
clnt *miniogo.Core `msg:"-"`
|
2022-10-03 05:10:15 -04:00
|
|
|
}
|
|
|
|
|
2023-03-31 13:48:36 -04:00
|
|
|
// RemoteToLocal returns true if source is remote and target is local
|
|
|
|
func (r BatchJobReplicateV1) RemoteToLocal() bool {
|
|
|
|
return !r.Source.Creds.Empty()
|
|
|
|
}
|
|
|
|
|
2022-10-03 05:10:15 -04:00
|
|
|
// BatchJobRequest this is an internal data structure not for external consumption.
|
|
|
|
type BatchJobRequest struct {
|
|
|
|
ID string `yaml:"-" json:"name"`
|
|
|
|
User string `yaml:"-" json:"user"`
|
|
|
|
Started time.Time `yaml:"-" json:"started"`
|
|
|
|
Location string `yaml:"-" json:"location"`
|
|
|
|
Replicate *BatchJobReplicateV1 `yaml:"replicate" json:"replicate"`
|
2023-04-04 13:56:54 -04:00
|
|
|
KeyRotate *BatchJobKeyRotateV1 `yaml:"keyrotate" json:"keyrotate"`
|
2023-03-18 02:42:43 -04:00
|
|
|
ctx context.Context `msg:"-"`
|
2022-10-03 05:10:15 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// Notify notifies notification endpoint if configured regarding job failure or success.
|
|
|
|
func (r BatchJobReplicateV1) Notify(ctx context.Context, body io.Reader) error {
|
|
|
|
if r.Flags.Notify.Endpoint == "" {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
ctx, cancel := context.WithTimeout(ctx, 10*time.Second)
|
|
|
|
defer cancel()
|
|
|
|
|
|
|
|
req, err := http.NewRequestWithContext(ctx, http.MethodPost, r.Flags.Notify.Endpoint, body)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
if r.Flags.Notify.Token != "" {
|
|
|
|
req.Header.Set("Authorization", r.Flags.Notify.Token)
|
|
|
|
}
|
|
|
|
|
|
|
|
clnt := http.Client{Transport: getRemoteInstanceTransport}
|
|
|
|
resp, err := clnt.Do(req)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
xhttp.DrainBody(resp.Body)
|
|
|
|
if resp.StatusCode != http.StatusOK {
|
|
|
|
return errors.New(resp.Status)
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// ReplicateFromSource - this is not implemented yet where source is 'remote' and target is local.
|
2023-06-26 12:21:29 -04:00
|
|
|
func (r *BatchJobReplicateV1) ReplicateFromSource(ctx context.Context, api ObjectLayer, core *miniogo.Core, srcObjInfo ObjectInfo, retry bool) error {
|
2023-03-31 13:48:36 -04:00
|
|
|
srcBucket := r.Source.Bucket
|
|
|
|
tgtBucket := r.Target.Bucket
|
|
|
|
srcObject := srcObjInfo.Name
|
|
|
|
tgtObject := srcObjInfo.Name
|
|
|
|
if r.Target.Prefix != "" {
|
|
|
|
tgtObject = path.Join(r.Target.Prefix, srcObjInfo.Name)
|
|
|
|
}
|
|
|
|
|
|
|
|
versioned := globalBucketVersioningSys.PrefixEnabled(tgtBucket, tgtObject)
|
|
|
|
versionSuspended := globalBucketVersioningSys.PrefixSuspended(tgtBucket, tgtObject)
|
2023-05-03 01:52:35 -04:00
|
|
|
versionID := srcObjInfo.VersionID
|
|
|
|
if r.Target.Type == BatchJobReplicateResourceS3 || r.Source.Type == BatchJobReplicateResourceS3 {
|
|
|
|
versionID = ""
|
|
|
|
}
|
2023-03-31 13:48:36 -04:00
|
|
|
if srcObjInfo.DeleteMarker {
|
|
|
|
_, err := api.DeleteObject(ctx, tgtBucket, tgtObject, ObjectOptions{
|
2023-05-03 01:52:35 -04:00
|
|
|
VersionID: versionID,
|
2023-03-31 13:48:36 -04:00
|
|
|
VersionSuspended: versionSuspended,
|
|
|
|
Versioned: versioned,
|
|
|
|
MTime: srcObjInfo.ModTime,
|
|
|
|
DeleteMarker: srcObjInfo.DeleteMarker,
|
|
|
|
ReplicationRequest: true,
|
|
|
|
})
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
opts := ObjectOptions{
|
|
|
|
VersionID: srcObjInfo.VersionID,
|
|
|
|
Versioned: versioned,
|
|
|
|
VersionSuspended: versionSuspended,
|
|
|
|
MTime: srcObjInfo.ModTime,
|
|
|
|
PreserveETag: srcObjInfo.ETag,
|
|
|
|
UserDefined: srcObjInfo.UserDefined,
|
|
|
|
}
|
2023-05-03 01:52:35 -04:00
|
|
|
if r.Target.Type == BatchJobReplicateResourceS3 || r.Source.Type == BatchJobReplicateResourceS3 {
|
|
|
|
opts.VersionID = ""
|
|
|
|
}
|
2023-03-31 13:48:36 -04:00
|
|
|
if crypto.S3.IsEncrypted(srcObjInfo.UserDefined) {
|
|
|
|
opts.ServerSideEncryption = encrypt.NewSSE()
|
|
|
|
}
|
|
|
|
slc := strings.Split(srcObjInfo.ETag, "-")
|
|
|
|
if len(slc) == 2 {
|
|
|
|
partsCount, err := strconv.Atoi(slc[1])
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
return r.copyWithMultipartfromSource(ctx, api, core, srcObjInfo, opts, partsCount)
|
|
|
|
}
|
2023-06-26 12:21:29 -04:00
|
|
|
gopts := miniogo.GetObjectOptions{
|
2023-03-31 13:48:36 -04:00
|
|
|
VersionID: srcObjInfo.VersionID,
|
|
|
|
}
|
|
|
|
if err := gopts.SetMatchETag(srcObjInfo.ETag); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
rd, objInfo, _, err := core.GetObject(ctx, srcBucket, srcObject, gopts)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
defer rd.Close()
|
|
|
|
|
|
|
|
hr, err := hash.NewReader(rd, objInfo.Size, "", "", objInfo.Size)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
pReader := NewPutObjReader(hr)
|
|
|
|
_, err = api.PutObject(ctx, tgtBucket, tgtObject, pReader, opts)
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2023-06-26 12:21:29 -04:00
|
|
|
func (r *BatchJobReplicateV1) copyWithMultipartfromSource(ctx context.Context, api ObjectLayer, c *miniogo.Core, srcObjInfo ObjectInfo, opts ObjectOptions, partsCount int) (err error) {
|
2023-03-31 13:48:36 -04:00
|
|
|
srcBucket := r.Source.Bucket
|
|
|
|
tgtBucket := r.Target.Bucket
|
|
|
|
srcObject := srcObjInfo.Name
|
|
|
|
tgtObject := srcObjInfo.Name
|
|
|
|
if r.Target.Prefix != "" {
|
|
|
|
tgtObject = path.Join(r.Target.Prefix, srcObjInfo.Name)
|
|
|
|
}
|
2023-05-03 01:52:35 -04:00
|
|
|
if r.Target.Type == BatchJobReplicateResourceS3 || r.Source.Type == BatchJobReplicateResourceS3 {
|
|
|
|
opts.VersionID = ""
|
|
|
|
}
|
2023-03-31 13:48:36 -04:00
|
|
|
var uploadedParts []CompletePart
|
|
|
|
res, err := api.NewMultipartUpload(context.Background(), tgtBucket, tgtObject, opts)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
defer func() {
|
|
|
|
if err != nil {
|
|
|
|
// block and abort remote upload upon failure.
|
|
|
|
attempts := 1
|
|
|
|
for attempts <= 3 {
|
|
|
|
aerr := api.AbortMultipartUpload(ctx, tgtBucket, tgtObject, res.UploadID, ObjectOptions{})
|
|
|
|
if aerr == nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
logger.LogIf(ctx,
|
|
|
|
fmt.Errorf("trying %s: Unable to cleanup failed multipart replication %s on remote %s/%s: %w - this may consume space on remote cluster",
|
|
|
|
humanize.Ordinal(attempts), res.UploadID, tgtBucket, tgtObject, aerr))
|
|
|
|
attempts++
|
|
|
|
time.Sleep(time.Second)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
var (
|
|
|
|
hr *hash.Reader
|
|
|
|
pInfo PartInfo
|
|
|
|
)
|
|
|
|
|
|
|
|
for i := 0; i < partsCount; i++ {
|
2023-06-26 12:21:29 -04:00
|
|
|
gopts := miniogo.GetObjectOptions{
|
2023-03-31 13:48:36 -04:00
|
|
|
VersionID: srcObjInfo.VersionID,
|
|
|
|
PartNumber: i + 1,
|
|
|
|
}
|
|
|
|
if err := gopts.SetMatchETag(srcObjInfo.ETag); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
rd, objInfo, _, err := c.GetObject(ctx, srcBucket, srcObject, gopts)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
defer rd.Close()
|
|
|
|
|
2023-05-16 16:14:37 -04:00
|
|
|
hr, err = hash.NewReader(io.LimitReader(rd, objInfo.Size), objInfo.Size, "", "", objInfo.Size)
|
2023-03-31 13:48:36 -04:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
pReader := NewPutObjReader(hr)
|
|
|
|
opts.PreserveETag = ""
|
|
|
|
pInfo, err = api.PutObjectPart(ctx, tgtBucket, tgtObject, res.UploadID, i+1, pReader, opts)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if pInfo.Size != objInfo.Size {
|
|
|
|
return fmt.Errorf("Part size mismatch: got %d, want %d", pInfo.Size, objInfo.Size)
|
|
|
|
}
|
|
|
|
uploadedParts = append(uploadedParts, CompletePart{
|
|
|
|
PartNumber: pInfo.PartNumber,
|
|
|
|
ETag: pInfo.ETag,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
_, err = api.CompleteMultipartUpload(ctx, tgtBucket, tgtObject, res.UploadID, uploadedParts, opts)
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// StartFromSource starts the batch replication job from remote source, resumes if there was a pending job via "job.ID"
|
|
|
|
func (r *BatchJobReplicateV1) StartFromSource(ctx context.Context, api ObjectLayer, job BatchJobRequest) error {
|
|
|
|
ri := &batchJobInfo{
|
|
|
|
JobID: job.ID,
|
|
|
|
JobType: string(job.Type()),
|
|
|
|
StartTime: job.Started,
|
|
|
|
}
|
|
|
|
if err := ri.load(ctx, api, job); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
globalBatchJobsMetrics.save(job.ID, ri)
|
|
|
|
|
|
|
|
delay := job.Replicate.Flags.Retry.Delay
|
|
|
|
if delay == 0 {
|
|
|
|
delay = batchReplJobDefaultRetryDelay
|
|
|
|
}
|
|
|
|
rnd := rand.New(rand.NewSource(time.Now().UnixNano()))
|
|
|
|
|
|
|
|
skip := func(oi ObjectInfo) (ok bool) {
|
|
|
|
if r.Flags.Filter.OlderThan > 0 && time.Since(oi.ModTime) < r.Flags.Filter.OlderThan {
|
|
|
|
// skip all objects that are newer than specified older duration
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
if r.Flags.Filter.NewerThan > 0 && time.Since(oi.ModTime) >= r.Flags.Filter.NewerThan {
|
|
|
|
// skip all objects that are older than specified newer duration
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
if !r.Flags.Filter.CreatedAfter.IsZero() && r.Flags.Filter.CreatedAfter.Before(oi.ModTime) {
|
|
|
|
// skip all objects that are created before the specified time.
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
if !r.Flags.Filter.CreatedBefore.IsZero() && r.Flags.Filter.CreatedBefore.After(oi.ModTime) {
|
|
|
|
// skip all objects that are created after the specified time.
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
if len(r.Flags.Filter.Tags) > 0 {
|
|
|
|
// Only parse object tags if tags filter is specified.
|
|
|
|
tagMap := map[string]string{}
|
|
|
|
tagStr := oi.UserTags
|
|
|
|
if len(tagStr) != 0 {
|
|
|
|
t, err := tags.ParseObjectTags(tagStr)
|
|
|
|
if err != nil {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
tagMap = t.ToMap()
|
|
|
|
}
|
|
|
|
for _, kv := range r.Flags.Filter.Tags {
|
|
|
|
for t, v := range tagMap {
|
|
|
|
if kv.Match(BatchJobReplicateKV{Key: t, Value: v}) {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// None of the provided tags filter match skip the object
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(r.Flags.Filter.Metadata) > 0 {
|
|
|
|
for _, kv := range r.Flags.Filter.Metadata {
|
|
|
|
for k, v := range oi.UserDefined {
|
2023-07-06 19:02:08 -04:00
|
|
|
if !stringsHasPrefixFold(k, "x-amz-meta-") && !isStandardHeader(k) {
|
2023-03-31 13:48:36 -04:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
// We only need to match x-amz-meta or standardHeaders
|
|
|
|
if kv.Match(BatchJobReplicateKV{Key: k, Value: v}) {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// None of the provided metadata filters match skip the object.
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
u, err := url.Parse(r.Source.Endpoint)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
cred := r.Source.Creds
|
|
|
|
|
|
|
|
c, err := miniogo.New(u.Host, &miniogo.Options{
|
2023-05-21 18:16:31 -04:00
|
|
|
Creds: credentials.NewStaticV4(cred.AccessKey, cred.SecretKey, cred.SessionToken),
|
|
|
|
Secure: u.Scheme == "https",
|
|
|
|
Transport: getRemoteInstanceTransport,
|
|
|
|
BucketLookup: lookupStyle(r.Source.Path),
|
2023-03-31 13:48:36 -04:00
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
c.SetAppInfo("minio-"+batchJobPrefix, r.APIVersion+" "+job.ID)
|
2023-06-26 12:21:29 -04:00
|
|
|
core := &miniogo.Core{Client: c}
|
2023-03-31 13:48:36 -04:00
|
|
|
|
|
|
|
workerSize, err := strconv.Atoi(env.Get("_MINIO_BATCH_REPLICATION_WORKERS", strconv.Itoa(runtime.GOMAXPROCS(0)/2)))
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
wk, err := workers.New(workerSize)
|
|
|
|
if err != nil {
|
|
|
|
// invalid worker size.
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
retryAttempts := ri.RetryAttempts
|
|
|
|
retry := false
|
|
|
|
for attempts := 1; attempts <= retryAttempts; attempts++ {
|
|
|
|
attempts := attempts
|
2023-05-03 01:52:35 -04:00
|
|
|
// one of source/target is s3, skip delete marker and all versions under the same object name.
|
|
|
|
s3Type := r.Target.Type == BatchJobReplicateResourceS3 || r.Source.Type == BatchJobReplicateResourceS3
|
|
|
|
minioSrc := r.Source.Type == BatchJobReplicateResourceMinIO
|
2023-03-31 13:48:36 -04:00
|
|
|
ctx, cancel := context.WithCancel(ctx)
|
2023-06-26 12:21:29 -04:00
|
|
|
objInfoCh := c.ListObjects(ctx, r.Source.Bucket, miniogo.ListObjectsOptions{
|
2023-03-31 13:48:36 -04:00
|
|
|
Prefix: r.Source.Prefix,
|
2023-05-03 01:52:35 -04:00
|
|
|
WithVersions: minioSrc,
|
2023-03-31 13:48:36 -04:00
|
|
|
Recursive: true,
|
|
|
|
WithMetadata: true,
|
|
|
|
})
|
2023-05-03 01:52:35 -04:00
|
|
|
prevObj := ""
|
|
|
|
skipReplicate := false
|
|
|
|
|
2023-03-31 13:48:36 -04:00
|
|
|
for obj := range objInfoCh {
|
|
|
|
oi := toObjectInfo(r.Source.Bucket, obj.Key, obj)
|
2023-05-03 01:52:35 -04:00
|
|
|
if !minioSrc {
|
2023-06-26 12:21:29 -04:00
|
|
|
oi2, err := c.StatObject(ctx, r.Source.Bucket, obj.Key, miniogo.StatObjectOptions{})
|
2023-05-03 01:52:35 -04:00
|
|
|
if err == nil {
|
|
|
|
oi = toObjectInfo(r.Source.Bucket, obj.Key, oi2)
|
|
|
|
} else {
|
|
|
|
if isErrMethodNotAllowed(ErrorRespToObjectError(err, r.Source.Bucket, obj.Key)) ||
|
|
|
|
isErrObjectNotFound(ErrorRespToObjectError(err, r.Source.Bucket, obj.Key)) {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
logger.LogIf(ctx, err)
|
|
|
|
cancel()
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
2023-03-31 13:48:36 -04:00
|
|
|
if skip(oi) {
|
|
|
|
continue
|
|
|
|
}
|
2023-05-03 01:52:35 -04:00
|
|
|
if obj.Key != prevObj {
|
|
|
|
prevObj = obj.Key
|
|
|
|
// skip replication of delete marker and all versions under the same object name if one of source or target is s3.
|
|
|
|
skipReplicate = obj.IsDeleteMarker && s3Type
|
|
|
|
}
|
|
|
|
if skipReplicate {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2023-03-31 13:48:36 -04:00
|
|
|
wk.Take()
|
|
|
|
go func() {
|
|
|
|
defer wk.Give()
|
|
|
|
stopFn := globalBatchJobsMetrics.trace(batchReplicationMetricObject, job.ID, attempts, oi)
|
|
|
|
success := true
|
|
|
|
if err := r.ReplicateFromSource(ctx, api, core, oi, retry); err != nil {
|
|
|
|
// object must be deleted concurrently, allow these failures but do not count them
|
|
|
|
if isErrVersionNotFound(err) || isErrObjectNotFound(err) {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
stopFn(err)
|
|
|
|
logger.LogIf(ctx, err)
|
|
|
|
success = false
|
|
|
|
} else {
|
|
|
|
stopFn(nil)
|
|
|
|
}
|
|
|
|
ri.trackCurrentBucketObject(r.Target.Bucket, oi, success)
|
|
|
|
globalBatchJobsMetrics.save(job.ID, ri)
|
|
|
|
// persist in-memory state to disk after every 10secs.
|
2023-04-04 13:56:54 -04:00
|
|
|
logger.LogIf(ctx, ri.updateAfter(ctx, api, 10*time.Second, job))
|
2023-03-31 13:48:36 -04:00
|
|
|
}()
|
|
|
|
}
|
|
|
|
wk.Wait()
|
|
|
|
|
|
|
|
ri.RetryAttempts = attempts
|
|
|
|
ri.Complete = ri.ObjectsFailed == 0
|
|
|
|
ri.Failed = ri.ObjectsFailed > 0
|
|
|
|
|
|
|
|
globalBatchJobsMetrics.save(job.ID, ri)
|
|
|
|
// persist in-memory state to disk.
|
2023-04-04 13:56:54 -04:00
|
|
|
logger.LogIf(ctx, ri.updateAfter(ctx, api, 0, job))
|
2023-03-31 13:48:36 -04:00
|
|
|
|
|
|
|
buf, _ := json.Marshal(ri)
|
|
|
|
if err := r.Notify(ctx, bytes.NewReader(buf)); err != nil {
|
|
|
|
logger.LogIf(ctx, fmt.Errorf("unable to notify %v", err))
|
|
|
|
}
|
|
|
|
|
|
|
|
cancel()
|
|
|
|
if ri.Failed {
|
|
|
|
ri.ObjectsFailed = 0
|
|
|
|
ri.Bucket = ""
|
|
|
|
ri.Object = ""
|
|
|
|
ri.Objects = 0
|
|
|
|
ri.BytesFailed = 0
|
|
|
|
ri.BytesTransferred = 0
|
|
|
|
retry = true // indicate we are retrying..
|
|
|
|
time.Sleep(delay + time.Duration(rnd.Float64()*float64(delay)))
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
2022-10-03 05:10:15 -04:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2023-03-31 13:48:36 -04:00
|
|
|
// toObjectInfo converts minio.ObjectInfo to ObjectInfo
|
2023-06-26 12:21:29 -04:00
|
|
|
func toObjectInfo(bucket, object string, objInfo miniogo.ObjectInfo) ObjectInfo {
|
2023-03-31 13:48:36 -04:00
|
|
|
tags, _ := tags.MapToObjectTags(objInfo.UserTags)
|
|
|
|
oi := ObjectInfo{
|
|
|
|
Bucket: bucket,
|
|
|
|
Name: object,
|
|
|
|
ModTime: objInfo.LastModified,
|
|
|
|
Size: objInfo.Size,
|
|
|
|
ETag: objInfo.ETag,
|
|
|
|
VersionID: objInfo.VersionID,
|
|
|
|
IsLatest: objInfo.IsLatest,
|
|
|
|
DeleteMarker: objInfo.IsDeleteMarker,
|
|
|
|
ContentType: objInfo.ContentType,
|
|
|
|
Expires: objInfo.Expires,
|
|
|
|
StorageClass: objInfo.StorageClass,
|
|
|
|
ReplicationStatusInternal: objInfo.ReplicationStatus,
|
|
|
|
UserTags: tags.String(),
|
|
|
|
}
|
|
|
|
oi.UserDefined = make(map[string]string, len(objInfo.Metadata))
|
|
|
|
for k, v := range objInfo.Metadata {
|
|
|
|
oi.UserDefined[k] = v[0]
|
|
|
|
}
|
|
|
|
ce, ok := oi.UserDefined[xhttp.ContentEncoding]
|
|
|
|
if !ok {
|
|
|
|
ce, ok = oi.UserDefined[strings.ToLower(xhttp.ContentEncoding)]
|
|
|
|
}
|
|
|
|
if ok {
|
|
|
|
oi.ContentEncoding = ce
|
|
|
|
}
|
|
|
|
return oi
|
|
|
|
}
|
|
|
|
|
2022-10-03 05:10:15 -04:00
|
|
|
// ReplicateToTarget read from source and replicate to configured target
|
|
|
|
func (r *BatchJobReplicateV1) ReplicateToTarget(ctx context.Context, api ObjectLayer, c *miniogo.Core, srcObjInfo ObjectInfo, retry bool) error {
|
|
|
|
srcBucket := r.Source.Bucket
|
|
|
|
tgtBucket := r.Target.Bucket
|
2022-10-11 17:36:06 -04:00
|
|
|
tgtPrefix := r.Target.Prefix
|
2022-10-03 05:10:15 -04:00
|
|
|
srcObject := srcObjInfo.Name
|
2023-05-03 01:52:35 -04:00
|
|
|
s3Type := r.Target.Type == BatchJobReplicateResourceS3 || r.Source.Type == BatchJobReplicateResourceS3
|
2022-10-03 05:10:15 -04:00
|
|
|
|
2022-10-13 17:42:10 -04:00
|
|
|
if srcObjInfo.DeleteMarker || !srcObjInfo.VersionPurgeStatus.Empty() {
|
2023-05-03 01:52:35 -04:00
|
|
|
if retry && !s3Type {
|
2022-10-13 17:42:10 -04:00
|
|
|
if _, err := c.StatObject(ctx, tgtBucket, pathJoin(tgtPrefix, srcObject), miniogo.StatObjectOptions{
|
|
|
|
VersionID: srcObjInfo.VersionID,
|
|
|
|
Internal: miniogo.AdvancedGetOptions{
|
|
|
|
ReplicationProxyRequest: "false",
|
|
|
|
},
|
|
|
|
}); isErrMethodNotAllowed(ErrorRespToObjectError(err, tgtBucket, pathJoin(tgtPrefix, srcObject))) {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
versionID := srcObjInfo.VersionID
|
|
|
|
dmVersionID := ""
|
|
|
|
if srcObjInfo.VersionPurgeStatus.Empty() {
|
|
|
|
dmVersionID = srcObjInfo.VersionID
|
|
|
|
}
|
2023-05-03 01:52:35 -04:00
|
|
|
if r.Target.Type == BatchJobReplicateResourceS3 || r.Source.Type == BatchJobReplicateResourceS3 {
|
|
|
|
dmVersionID = ""
|
|
|
|
versionID = ""
|
|
|
|
}
|
2022-10-13 17:42:10 -04:00
|
|
|
return c.RemoveObject(ctx, tgtBucket, pathJoin(tgtPrefix, srcObject), miniogo.RemoveObjectOptions{
|
|
|
|
VersionID: versionID,
|
|
|
|
Internal: miniogo.AdvancedRemoveOptions{
|
|
|
|
ReplicationDeleteMarker: dmVersionID != "",
|
|
|
|
ReplicationMTime: srcObjInfo.ModTime,
|
|
|
|
ReplicationStatus: miniogo.ReplicationStatusReplica,
|
|
|
|
ReplicationRequest: true, // always set this to distinguish between `mc mirror` replication and serverside
|
|
|
|
},
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2023-05-03 01:52:35 -04:00
|
|
|
if retry && !s3Type { // when we are retrying avoid copying if necessary.
|
2022-10-03 05:10:15 -04:00
|
|
|
gopts := miniogo.GetObjectOptions{}
|
|
|
|
if err := gopts.SetMatchETag(srcObjInfo.ETag); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2022-10-13 17:42:10 -04:00
|
|
|
if _, err := c.StatObject(ctx, tgtBucket, pathJoin(tgtPrefix, srcObject), gopts); err == nil {
|
2022-10-03 05:10:15 -04:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-10-13 17:42:10 -04:00
|
|
|
versioned := globalBucketVersioningSys.PrefixEnabled(srcBucket, srcObject)
|
|
|
|
versionSuspended := globalBucketVersioningSys.PrefixSuspended(srcBucket, srcObject)
|
|
|
|
|
|
|
|
opts := ObjectOptions{
|
|
|
|
VersionID: srcObjInfo.VersionID,
|
|
|
|
Versioned: versioned,
|
|
|
|
VersionSuspended: versionSuspended,
|
|
|
|
}
|
2023-04-17 15:16:37 -04:00
|
|
|
rd, err := api.GetObjectNInfo(ctx, srcBucket, srcObject, nil, http.Header{}, opts)
|
2022-10-03 05:10:15 -04:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
defer rd.Close()
|
|
|
|
objInfo := rd.ObjInfo
|
|
|
|
|
|
|
|
size, err := objInfo.GetActualSize()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
putOpts, err := batchReplicationOpts(ctx, "", objInfo)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2023-05-03 01:52:35 -04:00
|
|
|
if r.Target.Type == BatchJobReplicateResourceS3 || r.Source.Type == BatchJobReplicateResourceS3 {
|
|
|
|
putOpts.Internal = miniogo.AdvancedPutOptions{}
|
|
|
|
}
|
2022-10-03 05:10:15 -04:00
|
|
|
if objInfo.isMultipart() {
|
2022-10-11 17:36:06 -04:00
|
|
|
if err := replicateObjectWithMultipart(ctx, c, tgtBucket, pathJoin(tgtPrefix, objInfo.Name), rd, objInfo, putOpts); err != nil {
|
2022-10-03 05:10:15 -04:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
} else {
|
2022-10-11 17:36:06 -04:00
|
|
|
if _, err = c.PutObject(ctx, tgtBucket, pathJoin(tgtPrefix, objInfo.Name), rd, size, "", "", putOpts); err != nil {
|
2022-10-03 05:10:15 -04:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
//go:generate msgp -file $GOFILE -unexported
|
|
|
|
|
|
|
|
// batchJobInfo current batch replication information
|
|
|
|
type batchJobInfo struct {
|
2023-02-13 15:07:58 -05:00
|
|
|
mu sync.RWMutex `json:"-" msg:"-"`
|
|
|
|
|
2022-10-03 05:10:15 -04:00
|
|
|
Version int `json:"-" msg:"v"`
|
|
|
|
JobID string `json:"jobID" msg:"jid"`
|
|
|
|
JobType string `json:"jobType" msg:"jt"`
|
|
|
|
StartTime time.Time `json:"startTime" msg:"st"`
|
|
|
|
LastUpdate time.Time `json:"lastUpdate" msg:"lu"`
|
|
|
|
RetryAttempts int `json:"retryAttempts" msg:"ra"`
|
|
|
|
|
|
|
|
Complete bool `json:"complete" msg:"cmp"`
|
|
|
|
Failed bool `json:"failed" msg:"fld"`
|
|
|
|
|
|
|
|
// Last bucket/object batch replicated
|
|
|
|
Bucket string `json:"-" msg:"lbkt"`
|
|
|
|
Object string `json:"-" msg:"lobj"`
|
|
|
|
|
|
|
|
// Verbose information
|
2022-10-13 17:42:10 -04:00
|
|
|
Objects int64 `json:"objects" msg:"ob"`
|
|
|
|
DeleteMarkers int64 `json:"deleteMarkers" msg:"dm"`
|
|
|
|
ObjectsFailed int64 `json:"objectsFailed" msg:"obf"`
|
|
|
|
DeleteMarkersFailed int64 `json:"deleteMarkersFailed" msg:"dmf"`
|
|
|
|
BytesTransferred int64 `json:"bytesTransferred" msg:"bt"`
|
|
|
|
BytesFailed int64 `json:"bytesFailed" msg:"bf"`
|
2022-10-03 05:10:15 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
const (
|
|
|
|
batchReplName = "batch-replicate.bin"
|
|
|
|
batchReplFormat = 1
|
|
|
|
batchReplVersionV1 = 1
|
|
|
|
batchReplVersion = batchReplVersionV1
|
|
|
|
batchJobName = "job.bin"
|
|
|
|
batchJobPrefix = "batch-jobs"
|
|
|
|
|
|
|
|
batchReplJobAPIVersion = "v1"
|
|
|
|
batchReplJobDefaultRetries = 3
|
|
|
|
batchReplJobDefaultRetryDelay = 250 * time.Millisecond
|
|
|
|
)
|
|
|
|
|
|
|
|
func (ri *batchJobInfo) load(ctx context.Context, api ObjectLayer, job BatchJobRequest) error {
|
2023-04-04 13:56:54 -04:00
|
|
|
var fileName string
|
|
|
|
var format, version uint16
|
|
|
|
switch {
|
|
|
|
case job.Replicate != nil:
|
|
|
|
fileName = batchReplName
|
|
|
|
version = batchReplVersionV1
|
|
|
|
format = batchReplFormat
|
|
|
|
case job.KeyRotate != nil:
|
|
|
|
fileName = batchKeyRotationName
|
|
|
|
version = batchKeyRotateVersionV1
|
|
|
|
format = batchKeyRotationFormat
|
|
|
|
|
|
|
|
}
|
|
|
|
data, err := readConfig(ctx, api, pathJoin(job.Location, fileName))
|
2022-10-03 05:10:15 -04:00
|
|
|
if err != nil {
|
|
|
|
if errors.Is(err, errConfigNotFound) || isErrObjectNotFound(err) {
|
2023-04-04 13:56:54 -04:00
|
|
|
ri.Version = int(version)
|
|
|
|
switch {
|
|
|
|
case job.Replicate != nil:
|
2022-10-03 05:10:15 -04:00
|
|
|
ri.RetryAttempts = batchReplJobDefaultRetries
|
2023-04-04 13:56:54 -04:00
|
|
|
if job.Replicate.Flags.Retry.Attempts > 0 {
|
|
|
|
ri.RetryAttempts = job.Replicate.Flags.Retry.Attempts
|
|
|
|
}
|
|
|
|
case job.KeyRotate != nil:
|
|
|
|
ri.RetryAttempts = batchKeyRotateJobDefaultRetries
|
|
|
|
if job.KeyRotate.Flags.Retry.Attempts > 0 {
|
|
|
|
ri.RetryAttempts = job.KeyRotate.Flags.Retry.Attempts
|
|
|
|
}
|
2022-10-03 05:10:15 -04:00
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if len(data) == 0 {
|
|
|
|
// Seems to be empty create a new batchRepl object.
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
if len(data) <= 4 {
|
2023-04-04 13:56:54 -04:00
|
|
|
return fmt.Errorf("%s: no data", ri.JobType)
|
2022-10-03 05:10:15 -04:00
|
|
|
}
|
|
|
|
// Read header
|
|
|
|
switch binary.LittleEndian.Uint16(data[0:2]) {
|
2023-04-04 13:56:54 -04:00
|
|
|
case format:
|
2022-10-03 05:10:15 -04:00
|
|
|
default:
|
2023-04-04 13:56:54 -04:00
|
|
|
return fmt.Errorf("%s: unknown format: %d", ri.JobType, binary.LittleEndian.Uint16(data[0:2]))
|
2022-10-03 05:10:15 -04:00
|
|
|
}
|
|
|
|
switch binary.LittleEndian.Uint16(data[2:4]) {
|
2023-04-04 13:56:54 -04:00
|
|
|
case version:
|
2022-10-03 05:10:15 -04:00
|
|
|
default:
|
2023-04-04 13:56:54 -04:00
|
|
|
return fmt.Errorf("%s: unknown version: %d", ri.JobType, binary.LittleEndian.Uint16(data[2:4]))
|
2022-10-03 05:10:15 -04:00
|
|
|
}
|
|
|
|
|
2023-02-13 15:07:58 -05:00
|
|
|
ri.mu.Lock()
|
|
|
|
defer ri.mu.Unlock()
|
|
|
|
|
2022-10-03 05:10:15 -04:00
|
|
|
// OK, parse data.
|
|
|
|
if _, err = ri.UnmarshalMsg(data[4:]); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
switch ri.Version {
|
|
|
|
case batchReplVersionV1:
|
|
|
|
default:
|
2023-04-04 13:56:54 -04:00
|
|
|
return fmt.Errorf("unexpected batch %s meta version: %d", ri.JobType, ri.Version)
|
2022-10-03 05:10:15 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2023-02-13 15:07:58 -05:00
|
|
|
func (ri *batchJobInfo) clone() *batchJobInfo {
|
|
|
|
ri.mu.RLock()
|
|
|
|
defer ri.mu.RUnlock()
|
|
|
|
|
|
|
|
return &batchJobInfo{
|
2022-10-03 05:10:15 -04:00
|
|
|
Version: ri.Version,
|
|
|
|
JobID: ri.JobID,
|
|
|
|
JobType: ri.JobType,
|
|
|
|
RetryAttempts: ri.RetryAttempts,
|
|
|
|
Complete: ri.Complete,
|
|
|
|
Failed: ri.Failed,
|
|
|
|
StartTime: ri.StartTime,
|
|
|
|
LastUpdate: ri.LastUpdate,
|
|
|
|
Bucket: ri.Bucket,
|
|
|
|
Object: ri.Object,
|
|
|
|
Objects: ri.Objects,
|
|
|
|
ObjectsFailed: ri.ObjectsFailed,
|
|
|
|
BytesTransferred: ri.BytesTransferred,
|
|
|
|
BytesFailed: ri.BytesFailed,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-10-13 17:42:10 -04:00
|
|
|
func (ri *batchJobInfo) countItem(size int64, dmarker, success bool) {
|
2022-10-03 05:10:15 -04:00
|
|
|
if ri == nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if success {
|
2022-10-13 17:42:10 -04:00
|
|
|
if dmarker {
|
|
|
|
ri.DeleteMarkers++
|
|
|
|
} else {
|
|
|
|
ri.Objects++
|
|
|
|
ri.BytesTransferred += size
|
|
|
|
}
|
2022-10-03 05:10:15 -04:00
|
|
|
} else {
|
2022-10-13 17:42:10 -04:00
|
|
|
if dmarker {
|
|
|
|
ri.DeleteMarkersFailed++
|
|
|
|
} else {
|
|
|
|
ri.ObjectsFailed++
|
|
|
|
ri.BytesFailed += size
|
|
|
|
}
|
2022-10-03 05:10:15 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-04-04 13:56:54 -04:00
|
|
|
func (ri *batchJobInfo) updateAfter(ctx context.Context, api ObjectLayer, duration time.Duration, job BatchJobRequest) error {
|
2022-10-03 05:10:15 -04:00
|
|
|
if ri == nil {
|
|
|
|
return errInvalidArgument
|
|
|
|
}
|
|
|
|
now := UTCNow()
|
2023-02-13 15:07:58 -05:00
|
|
|
ri.mu.Lock()
|
2023-04-04 13:56:54 -04:00
|
|
|
var (
|
|
|
|
format, version uint16
|
|
|
|
jobTyp, fileName string
|
|
|
|
)
|
|
|
|
|
2022-10-03 05:10:15 -04:00
|
|
|
if now.Sub(ri.LastUpdate) >= duration {
|
2023-04-04 13:56:54 -04:00
|
|
|
switch job.Type() {
|
|
|
|
case madmin.BatchJobReplicate:
|
|
|
|
format = batchReplFormat
|
|
|
|
version = batchReplVersion
|
|
|
|
jobTyp = string(job.Type())
|
|
|
|
fileName = batchReplName
|
|
|
|
ri.Version = batchReplVersionV1
|
|
|
|
case madmin.BatchJobKeyRotate:
|
|
|
|
format = batchKeyRotationFormat
|
|
|
|
version = batchKeyRotateVersion
|
|
|
|
jobTyp = string(job.Type())
|
|
|
|
fileName = batchKeyRotationName
|
|
|
|
ri.Version = batchKeyRotateVersionV1
|
|
|
|
default:
|
|
|
|
return errInvalidArgument
|
|
|
|
}
|
2022-10-03 05:10:15 -04:00
|
|
|
if serverDebugLog {
|
2023-04-04 13:56:54 -04:00
|
|
|
console.Debugf("%s: persisting info on drive: threshold:%s, %s:%#v\n", jobTyp, now.Sub(ri.LastUpdate), jobTyp, ri)
|
2022-10-03 05:10:15 -04:00
|
|
|
}
|
|
|
|
ri.LastUpdate = now
|
2023-02-13 15:07:58 -05:00
|
|
|
|
|
|
|
data := make([]byte, 4, ri.Msgsize()+4)
|
|
|
|
|
|
|
|
// Initialize the header.
|
2023-04-04 13:56:54 -04:00
|
|
|
binary.LittleEndian.PutUint16(data[0:2], format)
|
|
|
|
binary.LittleEndian.PutUint16(data[2:4], version)
|
2023-02-13 15:07:58 -05:00
|
|
|
|
|
|
|
buf, err := ri.MarshalMsg(data)
|
|
|
|
ri.mu.Unlock()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2023-04-04 13:56:54 -04:00
|
|
|
return saveConfig(ctx, api, pathJoin(job.Location, fileName), buf)
|
2022-10-03 05:10:15 -04:00
|
|
|
}
|
2023-02-13 15:07:58 -05:00
|
|
|
ri.mu.Unlock()
|
2022-10-03 05:10:15 -04:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (ri *batchJobInfo) trackCurrentBucketObject(bucket string, info ObjectInfo, failed bool) {
|
|
|
|
if ri == nil {
|
|
|
|
return
|
|
|
|
}
|
2023-02-13 15:07:58 -05:00
|
|
|
|
|
|
|
ri.mu.Lock()
|
|
|
|
defer ri.mu.Unlock()
|
|
|
|
|
2022-10-03 05:10:15 -04:00
|
|
|
ri.Bucket = bucket
|
|
|
|
ri.Object = info.Name
|
2022-10-13 17:42:10 -04:00
|
|
|
ri.countItem(info.Size, info.DeleteMarker, failed)
|
2022-10-03 05:10:15 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// Start start the batch replication job, resumes if there was a pending job via "job.ID"
|
|
|
|
func (r *BatchJobReplicateV1) Start(ctx context.Context, api ObjectLayer, job BatchJobRequest) error {
|
|
|
|
ri := &batchJobInfo{
|
|
|
|
JobID: job.ID,
|
|
|
|
JobType: string(job.Type()),
|
|
|
|
StartTime: job.Started,
|
|
|
|
}
|
|
|
|
if err := ri.load(ctx, api, job); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2023-02-13 15:07:58 -05:00
|
|
|
globalBatchJobsMetrics.save(job.ID, ri)
|
2022-10-03 05:10:15 -04:00
|
|
|
lastObject := ri.Object
|
|
|
|
|
|
|
|
delay := job.Replicate.Flags.Retry.Delay
|
|
|
|
if delay == 0 {
|
|
|
|
delay = batchReplJobDefaultRetryDelay
|
|
|
|
}
|
|
|
|
rnd := rand.New(rand.NewSource(time.Now().UnixNano()))
|
|
|
|
|
|
|
|
skip := func(info FileInfo) (ok bool) {
|
|
|
|
if r.Flags.Filter.OlderThan > 0 && time.Since(info.ModTime) < r.Flags.Filter.OlderThan {
|
|
|
|
// skip all objects that are newer than specified older duration
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
if r.Flags.Filter.NewerThan > 0 && time.Since(info.ModTime) >= r.Flags.Filter.NewerThan {
|
|
|
|
// skip all objects that are older than specified newer duration
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
if !r.Flags.Filter.CreatedAfter.IsZero() && r.Flags.Filter.CreatedAfter.Before(info.ModTime) {
|
|
|
|
// skip all objects that are created before the specified time.
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
if !r.Flags.Filter.CreatedBefore.IsZero() && r.Flags.Filter.CreatedBefore.After(info.ModTime) {
|
|
|
|
// skip all objects that are created after the specified time.
|
|
|
|
return false
|
2022-10-19 00:22:21 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
if len(r.Flags.Filter.Tags) > 0 {
|
|
|
|
// Only parse object tags if tags filter is specified.
|
|
|
|
tagMap := map[string]string{}
|
|
|
|
tagStr := info.Metadata[xhttp.AmzObjectTagging]
|
|
|
|
if len(tagStr) != 0 {
|
|
|
|
t, err := tags.ParseObjectTags(tagStr)
|
|
|
|
if err != nil {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
tagMap = t.ToMap()
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, kv := range r.Flags.Filter.Tags {
|
|
|
|
for t, v := range tagMap {
|
|
|
|
if kv.Match(BatchJobReplicateKV{Key: t, Value: v}) {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// None of the provided tags filter match skip the object
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(r.Flags.Filter.Metadata) > 0 {
|
|
|
|
for _, kv := range r.Flags.Filter.Metadata {
|
|
|
|
for k, v := range info.Metadata {
|
2023-07-06 19:02:08 -04:00
|
|
|
if !stringsHasPrefixFold(k, "x-amz-meta-") && !isStandardHeader(k) {
|
2022-10-19 00:22:21 -04:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
// We only need to match x-amz-meta or standardHeaders
|
|
|
|
if kv.Match(BatchJobReplicateKV{Key: k, Value: v}) {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// None of the provided metadata filters match skip the object.
|
|
|
|
return false
|
2022-10-03 05:10:15 -04:00
|
|
|
}
|
2023-05-03 01:52:35 -04:00
|
|
|
// if one of source or target is non MinIO, just replicate the top most version like `mc mirror`
|
|
|
|
if (r.Target.Type == BatchJobReplicateResourceS3 || r.Source.Type == BatchJobReplicateResourceS3) && !info.IsLatest {
|
|
|
|
return false
|
|
|
|
}
|
2022-10-03 05:10:15 -04:00
|
|
|
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
u, err := url.Parse(r.Target.Endpoint)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
cred := r.Target.Creds
|
|
|
|
|
|
|
|
c, err := miniogo.NewCore(u.Host, &miniogo.Options{
|
2023-05-21 18:16:31 -04:00
|
|
|
Creds: credentials.NewStaticV4(cred.AccessKey, cred.SecretKey, cred.SessionToken),
|
|
|
|
Secure: u.Scheme == "https",
|
|
|
|
Transport: getRemoteInstanceTransport,
|
|
|
|
BucketLookup: lookupStyle(r.Target.Path),
|
2022-10-03 05:10:15 -04:00
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2023-02-14 16:19:30 -05:00
|
|
|
c.SetAppInfo("minio-"+batchJobPrefix, r.APIVersion+" "+job.ID)
|
2022-10-03 05:10:15 -04:00
|
|
|
|
2023-02-13 15:07:58 -05:00
|
|
|
workerSize, err := strconv.Atoi(env.Get("_MINIO_BATCH_REPLICATION_WORKERS", strconv.Itoa(runtime.GOMAXPROCS(0)/2)))
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2023-02-14 16:19:30 -05:00
|
|
|
wk, err := workers.New(workerSize)
|
|
|
|
if err != nil {
|
|
|
|
// invalid worker size.
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2022-10-03 05:10:15 -04:00
|
|
|
retryAttempts := ri.RetryAttempts
|
|
|
|
retry := false
|
|
|
|
for attempts := 1; attempts <= retryAttempts; attempts++ {
|
2023-02-13 15:07:58 -05:00
|
|
|
attempts := attempts
|
|
|
|
|
2022-10-03 05:10:15 -04:00
|
|
|
ctx, cancel := context.WithCancel(ctx)
|
2023-05-03 01:52:35 -04:00
|
|
|
// one of source/target is s3, skip delete marker and all versions under the same object name.
|
|
|
|
s3Type := r.Target.Type == BatchJobReplicateResourceS3 || r.Source.Type == BatchJobReplicateResourceS3
|
2022-10-03 05:10:15 -04:00
|
|
|
|
|
|
|
results := make(chan ObjectInfo, 100)
|
|
|
|
if err := api.Walk(ctx, r.Source.Bucket, r.Source.Prefix, results, ObjectOptions{
|
|
|
|
WalkMarker: lastObject,
|
|
|
|
WalkFilter: skip,
|
|
|
|
}); err != nil {
|
|
|
|
cancel()
|
|
|
|
// Do not need to retry if we can't list objects on source.
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2023-05-03 01:52:35 -04:00
|
|
|
prevObj := ""
|
|
|
|
|
|
|
|
skipReplicate := false
|
2022-10-03 05:10:15 -04:00
|
|
|
for result := range results {
|
2023-02-13 15:07:58 -05:00
|
|
|
result := result
|
2023-05-03 01:52:35 -04:00
|
|
|
if result.Name != prevObj {
|
|
|
|
prevObj = result.Name
|
|
|
|
skipReplicate = result.DeleteMarker && s3Type
|
|
|
|
}
|
|
|
|
if skipReplicate {
|
|
|
|
continue
|
|
|
|
}
|
2023-02-13 15:07:58 -05:00
|
|
|
wk.Take()
|
|
|
|
go func() {
|
|
|
|
defer wk.Give()
|
|
|
|
|
|
|
|
stopFn := globalBatchJobsMetrics.trace(batchReplicationMetricObject, job.ID, attempts, result)
|
|
|
|
success := true
|
|
|
|
if err := r.ReplicateToTarget(ctx, api, c, result, retry); err != nil {
|
2023-02-14 10:22:08 -05:00
|
|
|
if miniogo.ToErrorResponse(err).Code == "PreconditionFailed" {
|
|
|
|
// pre-condition failed means we already have the object copied over.
|
|
|
|
return
|
|
|
|
}
|
|
|
|
// object must be deleted concurrently, allow these failures but do not count them
|
2023-02-13 15:07:58 -05:00
|
|
|
if isErrVersionNotFound(err) || isErrObjectNotFound(err) {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
stopFn(err)
|
|
|
|
logger.LogIf(ctx, err)
|
|
|
|
success = false
|
|
|
|
} else {
|
|
|
|
stopFn(nil)
|
2022-10-13 17:42:10 -04:00
|
|
|
}
|
2023-02-13 15:07:58 -05:00
|
|
|
ri.trackCurrentBucketObject(r.Source.Bucket, result, success)
|
|
|
|
globalBatchJobsMetrics.save(job.ID, ri)
|
|
|
|
// persist in-memory state to disk after every 10secs.
|
2023-04-04 13:56:54 -04:00
|
|
|
logger.LogIf(ctx, ri.updateAfter(ctx, api, 10*time.Second, job))
|
2023-02-13 15:07:58 -05:00
|
|
|
}()
|
2022-10-03 05:10:15 -04:00
|
|
|
}
|
2023-02-13 15:07:58 -05:00
|
|
|
wk.Wait()
|
2022-10-03 05:10:15 -04:00
|
|
|
|
|
|
|
ri.RetryAttempts = attempts
|
|
|
|
ri.Complete = ri.ObjectsFailed == 0
|
|
|
|
ri.Failed = ri.ObjectsFailed > 0
|
|
|
|
|
2023-02-13 15:07:58 -05:00
|
|
|
globalBatchJobsMetrics.save(job.ID, ri)
|
|
|
|
// persist in-memory state to disk.
|
2023-04-04 13:56:54 -04:00
|
|
|
logger.LogIf(ctx, ri.updateAfter(ctx, api, 0, job))
|
2022-10-03 05:10:15 -04:00
|
|
|
|
|
|
|
buf, _ := json.Marshal(ri)
|
|
|
|
if err := r.Notify(ctx, bytes.NewReader(buf)); err != nil {
|
2023-03-31 13:48:36 -04:00
|
|
|
logger.LogIf(ctx, fmt.Errorf("unable to notify %v", err))
|
2022-10-03 05:10:15 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
cancel()
|
|
|
|
if ri.Failed {
|
|
|
|
ri.ObjectsFailed = 0
|
|
|
|
ri.Bucket = ""
|
|
|
|
ri.Object = ""
|
|
|
|
ri.Objects = 0
|
|
|
|
ri.BytesFailed = 0
|
|
|
|
ri.BytesTransferred = 0
|
|
|
|
retry = true // indicate we are retrying..
|
|
|
|
time.Sleep(delay + time.Duration(rnd.Float64()*float64(delay)))
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2022-10-15 14:58:31 -04:00
|
|
|
//msgp:ignore batchReplicationJobError
|
|
|
|
type batchReplicationJobError struct {
|
|
|
|
Code string
|
|
|
|
Description string
|
|
|
|
HTTPStatusCode int
|
|
|
|
}
|
|
|
|
|
|
|
|
func (e batchReplicationJobError) Error() string {
|
|
|
|
return e.Description
|
|
|
|
}
|
|
|
|
|
2022-10-03 05:10:15 -04:00
|
|
|
// Validate validates the job definition input
|
2023-02-14 16:19:30 -05:00
|
|
|
func (r *BatchJobReplicateV1) Validate(ctx context.Context, job BatchJobRequest, o ObjectLayer) error {
|
2022-10-03 05:10:15 -04:00
|
|
|
if r == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
if r.APIVersion != batchReplJobAPIVersion {
|
|
|
|
return errInvalidArgument
|
|
|
|
}
|
|
|
|
|
|
|
|
if r.Source.Bucket == "" {
|
|
|
|
return errInvalidArgument
|
|
|
|
}
|
2023-04-04 13:50:11 -04:00
|
|
|
var isRemoteToLocal bool
|
2023-03-31 13:48:36 -04:00
|
|
|
localBkt := r.Source.Bucket
|
|
|
|
if r.Source.Endpoint != "" {
|
|
|
|
localBkt = r.Target.Bucket
|
2023-04-04 13:50:11 -04:00
|
|
|
isRemoteToLocal = true
|
2023-03-31 13:48:36 -04:00
|
|
|
}
|
|
|
|
info, err := o.GetBucketInfo(ctx, localBkt, BucketOptions{})
|
2022-10-03 05:10:15 -04:00
|
|
|
if err != nil {
|
2022-10-15 14:58:31 -04:00
|
|
|
if isErrBucketNotFound(err) {
|
|
|
|
return batchReplicationJobError{
|
|
|
|
Code: "NoSuchSourceBucket",
|
2023-04-04 13:50:11 -04:00
|
|
|
Description: fmt.Sprintf("The specified bucket %s does not exist", localBkt),
|
2022-10-15 14:58:31 -04:00
|
|
|
HTTPStatusCode: http.StatusNotFound,
|
|
|
|
}
|
|
|
|
}
|
2022-10-03 05:10:15 -04:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := r.Source.Type.Validate(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2023-03-31 13:48:36 -04:00
|
|
|
if r.Source.Creds.Empty() && r.Target.Creds.Empty() {
|
|
|
|
return errInvalidArgument
|
|
|
|
}
|
|
|
|
|
|
|
|
if !r.Source.Creds.Empty() {
|
|
|
|
if err := r.Source.Creds.Validate(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if r.Target.Endpoint == "" && !r.Target.Creds.Empty() {
|
|
|
|
return errInvalidArgument
|
|
|
|
}
|
2022-10-03 05:10:15 -04:00
|
|
|
|
2023-03-31 13:48:36 -04:00
|
|
|
if r.Source.Endpoint == "" && !r.Source.Creds.Empty() {
|
2022-10-03 05:10:15 -04:00
|
|
|
return errInvalidArgument
|
|
|
|
}
|
|
|
|
|
2023-05-21 18:16:31 -04:00
|
|
|
if r.Source.Endpoint != "" && !r.Source.Type.isMinio() && !r.Source.ValidPath() {
|
|
|
|
return errInvalidArgument
|
|
|
|
}
|
|
|
|
|
|
|
|
if r.Target.Endpoint != "" && !r.Target.Type.isMinio() && !r.Target.ValidPath() {
|
|
|
|
return errInvalidArgument
|
|
|
|
}
|
2022-10-03 05:10:15 -04:00
|
|
|
if r.Target.Bucket == "" {
|
|
|
|
return errInvalidArgument
|
|
|
|
}
|
|
|
|
|
2023-03-31 13:48:36 -04:00
|
|
|
if !r.Target.Creds.Empty() {
|
|
|
|
if err := r.Target.Creds.Validate(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if r.Source.Creds.Empty() && r.Target.Creds.Empty() {
|
|
|
|
return errInvalidArgument
|
2022-10-03 05:10:15 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
if err := r.Target.Type.Validate(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, tag := range r.Flags.Filter.Tags {
|
|
|
|
if err := tag.Validate(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, meta := range r.Flags.Filter.Metadata {
|
|
|
|
if err := meta.Validate(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := r.Flags.Retry.Validate(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2023-03-31 13:48:36 -04:00
|
|
|
remoteEp := r.Target.Endpoint
|
|
|
|
remoteBkt := r.Target.Bucket
|
|
|
|
cred := r.Target.Creds
|
2023-05-21 18:16:31 -04:00
|
|
|
pathStyle := r.Target.Path
|
2023-03-31 13:48:36 -04:00
|
|
|
|
|
|
|
if r.Source.Endpoint != "" {
|
|
|
|
remoteEp = r.Source.Endpoint
|
|
|
|
cred = r.Source.Creds
|
|
|
|
remoteBkt = r.Source.Bucket
|
2023-05-21 18:16:31 -04:00
|
|
|
pathStyle = r.Source.Path
|
|
|
|
|
2023-03-31 13:48:36 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
u, err := url.Parse(remoteEp)
|
2022-10-15 14:58:31 -04:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
c, err := miniogo.NewCore(u.Host, &miniogo.Options{
|
2023-05-21 18:16:31 -04:00
|
|
|
Creds: credentials.NewStaticV4(cred.AccessKey, cred.SecretKey, cred.SessionToken),
|
|
|
|
Secure: u.Scheme == "https",
|
|
|
|
Transport: getRemoteInstanceTransport,
|
|
|
|
BucketLookup: lookupStyle(pathStyle),
|
2022-10-15 14:58:31 -04:00
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2023-02-14 16:19:30 -05:00
|
|
|
c.SetAppInfo("minio-"+batchJobPrefix, r.APIVersion+" "+job.ID)
|
2022-10-15 14:58:31 -04:00
|
|
|
|
2023-03-31 13:48:36 -04:00
|
|
|
vcfg, err := c.GetBucketVersioning(ctx, remoteBkt)
|
2022-10-15 14:58:31 -04:00
|
|
|
if err != nil {
|
|
|
|
if miniogo.ToErrorResponse(err).Code == "NoSuchBucket" {
|
|
|
|
return batchReplicationJobError{
|
|
|
|
Code: "NoSuchTargetBucket",
|
|
|
|
Description: "The specified target bucket does not exist",
|
|
|
|
HTTPStatusCode: http.StatusNotFound,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return err
|
|
|
|
}
|
2023-05-03 01:52:35 -04:00
|
|
|
// if both source and target are minio instances
|
|
|
|
minioType := r.Target.Type == BatchJobReplicateResourceMinIO && r.Source.Type == BatchJobReplicateResourceMinIO
|
2023-04-04 13:50:11 -04:00
|
|
|
// If source has versioning enabled, target must have versioning enabled
|
2023-05-03 01:52:35 -04:00
|
|
|
if minioType && ((info.Versioning && !vcfg.Enabled() && !isRemoteToLocal) || (!info.Versioning && vcfg.Enabled() && isRemoteToLocal)) {
|
2022-10-15 14:58:31 -04:00
|
|
|
return batchReplicationJobError{
|
|
|
|
Code: "InvalidBucketState",
|
|
|
|
Description: fmt.Sprintf("The source '%s' has versioning enabled, target '%s' must have versioning enabled",
|
|
|
|
r.Source.Bucket, r.Target.Bucket),
|
|
|
|
HTTPStatusCode: http.StatusBadRequest,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
r.clnt = c
|
2022-10-03 05:10:15 -04:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Type returns type of batch job, currently only supports 'replicate'
|
|
|
|
func (j BatchJobRequest) Type() madmin.BatchJobType {
|
2023-04-04 13:56:54 -04:00
|
|
|
switch {
|
|
|
|
case j.Replicate != nil:
|
2022-10-03 05:10:15 -04:00
|
|
|
return madmin.BatchJobReplicate
|
2023-04-04 13:56:54 -04:00
|
|
|
case j.KeyRotate != nil:
|
|
|
|
return madmin.BatchJobKeyRotate
|
2022-10-03 05:10:15 -04:00
|
|
|
}
|
|
|
|
return madmin.BatchJobType("unknown")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Validate validates the current job, used by 'save()' before
|
|
|
|
// persisting the job request
|
|
|
|
func (j BatchJobRequest) Validate(ctx context.Context, o ObjectLayer) error {
|
2023-04-04 13:56:54 -04:00
|
|
|
switch {
|
|
|
|
case j.Replicate != nil:
|
2023-02-14 16:19:30 -05:00
|
|
|
return j.Replicate.Validate(ctx, j, o)
|
2023-04-04 13:56:54 -04:00
|
|
|
case j.KeyRotate != nil:
|
|
|
|
return j.KeyRotate.Validate(ctx, j, o)
|
2022-10-03 05:10:15 -04:00
|
|
|
}
|
|
|
|
return errInvalidArgument
|
|
|
|
}
|
|
|
|
|
|
|
|
func (j BatchJobRequest) delete(ctx context.Context, api ObjectLayer) {
|
2023-04-04 13:56:54 -04:00
|
|
|
switch {
|
|
|
|
case j.Replicate != nil:
|
|
|
|
deleteConfig(ctx, api, pathJoin(j.Location, batchReplName))
|
|
|
|
case j.KeyRotate != nil:
|
|
|
|
deleteConfig(ctx, api, pathJoin(j.Location, batchKeyRotationName))
|
|
|
|
}
|
2022-10-03 05:10:15 -04:00
|
|
|
globalBatchJobsMetrics.delete(j.ID)
|
|
|
|
deleteConfig(ctx, api, j.Location)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (j *BatchJobRequest) save(ctx context.Context, api ObjectLayer) error {
|
2023-04-04 13:56:54 -04:00
|
|
|
if j.Replicate == nil && j.KeyRotate == nil {
|
2022-10-03 05:10:15 -04:00
|
|
|
return errInvalidArgument
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := j.Validate(ctx, api); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
j.Location = pathJoin(batchJobPrefix, j.ID)
|
|
|
|
job, err := j.MarshalMsg(nil)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
return saveConfig(ctx, api, j.Location, job)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (j *BatchJobRequest) load(ctx context.Context, api ObjectLayer, name string) error {
|
|
|
|
if j == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
job, err := readConfig(ctx, api, name)
|
|
|
|
if err != nil {
|
|
|
|
if errors.Is(err, errConfigNotFound) || isErrObjectNotFound(err) {
|
|
|
|
err = errNoSuchJob
|
|
|
|
}
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
_, err = j.UnmarshalMsg(job)
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
func batchReplicationOpts(ctx context.Context, sc string, objInfo ObjectInfo) (putOpts miniogo.PutObjectOptions, err error) {
|
|
|
|
// TODO: support custom storage class for remote replication
|
|
|
|
putOpts, err = putReplicationOpts(ctx, "", objInfo)
|
|
|
|
if err != nil {
|
|
|
|
return putOpts, err
|
|
|
|
}
|
|
|
|
putOpts.Internal = miniogo.AdvancedPutOptions{
|
2023-04-27 16:43:18 -04:00
|
|
|
SourceVersionID: objInfo.VersionID,
|
|
|
|
SourceMTime: objInfo.ModTime,
|
|
|
|
SourceETag: objInfo.ETag,
|
|
|
|
ReplicationRequest: true,
|
2022-10-03 05:10:15 -04:00
|
|
|
}
|
|
|
|
return putOpts, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// ListBatchJobs - lists all currently active batch jobs, optionally takes {jobType}
|
|
|
|
// input to list only active batch jobs of 'jobType'
|
|
|
|
func (a adminAPIHandlers) ListBatchJobs(w http.ResponseWriter, r *http.Request) {
|
|
|
|
ctx := newContext(r, w, "ListBatchJobs")
|
|
|
|
|
|
|
|
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
|
|
|
|
2022-10-12 18:47:41 -04:00
|
|
|
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.ListBatchJobsAction)
|
2022-10-03 05:10:15 -04:00
|
|
|
if objectAPI == nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
jobType := r.Form.Get("jobType")
|
|
|
|
if jobType == "" {
|
|
|
|
jobType = string(madmin.BatchJobReplicate)
|
|
|
|
}
|
|
|
|
|
|
|
|
resultCh := make(chan ObjectInfo)
|
|
|
|
|
|
|
|
ctx, cancel := context.WithCancel(ctx)
|
|
|
|
defer cancel()
|
|
|
|
|
|
|
|
if err := objectAPI.Walk(ctx, minioMetaBucket, batchJobPrefix, resultCh, ObjectOptions{}); err != nil {
|
|
|
|
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
listResult := madmin.ListBatchJobsResult{}
|
|
|
|
for result := range resultCh {
|
|
|
|
req := &BatchJobRequest{}
|
|
|
|
if err := req.load(ctx, objectAPI, result.Name); err != nil {
|
|
|
|
if !errors.Is(err, errNoSuchJob) {
|
|
|
|
logger.LogIf(ctx, err)
|
|
|
|
}
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
if jobType == string(req.Type()) {
|
|
|
|
listResult.Jobs = append(listResult.Jobs, madmin.BatchJobResult{
|
|
|
|
ID: req.ID,
|
|
|
|
Type: req.Type(),
|
|
|
|
Started: req.Started,
|
|
|
|
User: req.User,
|
|
|
|
Elapsed: time.Since(req.Started),
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
logger.LogIf(ctx, json.NewEncoder(w).Encode(&listResult))
|
|
|
|
}
|
|
|
|
|
|
|
|
var errNoSuchJob = errors.New("no such job")
|
|
|
|
|
|
|
|
// DescribeBatchJob returns the currently active batch job definition
|
|
|
|
func (a adminAPIHandlers) DescribeBatchJob(w http.ResponseWriter, r *http.Request) {
|
|
|
|
ctx := newContext(r, w, "DescribeBatchJob")
|
|
|
|
|
|
|
|
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
|
|
|
|
2022-10-12 18:47:41 -04:00
|
|
|
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.DescribeBatchJobAction)
|
2022-10-03 05:10:15 -04:00
|
|
|
if objectAPI == nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
id := r.Form.Get("jobId")
|
|
|
|
if id == "" {
|
|
|
|
writeErrorResponseJSON(ctx, w, toAPIError(ctx, errInvalidArgument), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
req := &BatchJobRequest{}
|
|
|
|
if err := req.load(ctx, objectAPI, pathJoin(batchJobPrefix, id)); err != nil {
|
|
|
|
if !errors.Is(err, errNoSuchJob) {
|
|
|
|
logger.LogIf(ctx, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
buf, err := yaml.Marshal(req)
|
|
|
|
if err != nil {
|
|
|
|
logger.LogIf(ctx, err)
|
|
|
|
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
w.Write(buf)
|
|
|
|
}
|
|
|
|
|
|
|
|
// StarBatchJob queue a new job for execution
|
|
|
|
func (a adminAPIHandlers) StartBatchJob(w http.ResponseWriter, r *http.Request) {
|
|
|
|
ctx := newContext(r, w, "StartBatchJob")
|
|
|
|
|
|
|
|
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
|
|
|
|
2022-10-12 18:47:41 -04:00
|
|
|
objectAPI, creds := validateAdminReq(ctx, w, r, iampolicy.StartBatchJobAction)
|
2022-10-03 05:10:15 -04:00
|
|
|
if objectAPI == nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
buf, err := io.ReadAll(r.Body)
|
|
|
|
if err != nil {
|
|
|
|
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
user := creds.AccessKey
|
|
|
|
if creds.ParentUser != "" {
|
|
|
|
user = creds.ParentUser
|
|
|
|
}
|
|
|
|
|
|
|
|
job := &BatchJobRequest{}
|
|
|
|
if err = yaml.Unmarshal(buf, job); err != nil {
|
|
|
|
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
job.ID = shortuuid.New()
|
|
|
|
job.User = user
|
|
|
|
job.Started = time.Now()
|
|
|
|
|
|
|
|
if err := job.save(ctx, objectAPI); err != nil {
|
|
|
|
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
if err = globalBatchJobPool.queueJob(job); err != nil {
|
|
|
|
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
buf, err = json.Marshal(&madmin.BatchJobResult{
|
|
|
|
ID: job.ID,
|
|
|
|
Type: job.Type(),
|
|
|
|
Started: job.Started,
|
|
|
|
User: job.User,
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
writeSuccessResponseJSON(w, buf)
|
|
|
|
}
|
|
|
|
|
2023-03-18 02:42:43 -04:00
|
|
|
// CancelBatchJob cancels a job in progress
|
|
|
|
func (a adminAPIHandlers) CancelBatchJob(w http.ResponseWriter, r *http.Request) {
|
|
|
|
ctx := newContext(r, w, "CancelBatchJob")
|
|
|
|
|
|
|
|
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
|
|
|
|
|
|
|
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.CancelBatchJobAction)
|
|
|
|
if objectAPI == nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
jobID := r.Form.Get("id")
|
|
|
|
if jobID == "" {
|
|
|
|
writeErrorResponseJSON(ctx, w, toAPIError(ctx, errInvalidArgument), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if err := globalBatchJobPool.canceler(jobID, true); err != nil {
|
|
|
|
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrInvalidRequest, err), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
j := BatchJobRequest{
|
|
|
|
ID: jobID,
|
|
|
|
Location: pathJoin(batchJobPrefix, jobID),
|
|
|
|
}
|
|
|
|
j.delete(ctx, objectAPI)
|
|
|
|
|
|
|
|
writeSuccessNoContent(w)
|
|
|
|
}
|
|
|
|
|
2022-10-03 05:10:15 -04:00
|
|
|
//msgp:ignore BatchJobPool
|
|
|
|
|
|
|
|
// BatchJobPool batch job pool
|
|
|
|
type BatchJobPool struct {
|
|
|
|
ctx context.Context
|
|
|
|
objLayer ObjectLayer
|
|
|
|
once sync.Once
|
|
|
|
mu sync.Mutex
|
|
|
|
jobCh chan *BatchJobRequest
|
2023-03-18 02:42:43 -04:00
|
|
|
jmu sync.Mutex // protects jobCancelers
|
|
|
|
jobCancelers map[string]context.CancelFunc
|
2022-10-03 05:10:15 -04:00
|
|
|
workerKillCh chan struct{}
|
|
|
|
workerSize int
|
|
|
|
}
|
|
|
|
|
|
|
|
var globalBatchJobPool *BatchJobPool
|
|
|
|
|
|
|
|
// newBatchJobPool creates a pool of job manifest workers of specified size
|
|
|
|
func newBatchJobPool(ctx context.Context, o ObjectLayer, workers int) *BatchJobPool {
|
|
|
|
jpool := &BatchJobPool{
|
|
|
|
ctx: ctx,
|
|
|
|
objLayer: o,
|
|
|
|
jobCh: make(chan *BatchJobRequest, 10000),
|
|
|
|
workerKillCh: make(chan struct{}, workers),
|
2023-03-18 02:42:43 -04:00
|
|
|
jobCancelers: make(map[string]context.CancelFunc),
|
2022-10-03 05:10:15 -04:00
|
|
|
}
|
|
|
|
jpool.ResizeWorkers(workers)
|
|
|
|
jpool.resume()
|
|
|
|
return jpool
|
|
|
|
}
|
|
|
|
|
|
|
|
func (j *BatchJobPool) resume() {
|
|
|
|
results := make(chan ObjectInfo, 100)
|
|
|
|
ctx, cancel := context.WithCancel(j.ctx)
|
|
|
|
defer cancel()
|
|
|
|
if err := j.objLayer.Walk(ctx, minioMetaBucket, batchJobPrefix, results, ObjectOptions{}); err != nil {
|
|
|
|
logger.LogIf(j.ctx, err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
for result := range results {
|
2023-04-04 13:56:54 -04:00
|
|
|
// ignore batch-replicate.bin and batch-rotate.bin entries
|
|
|
|
if strings.HasSuffix(result.Name, slashSeparator) {
|
|
|
|
continue
|
|
|
|
}
|
2022-10-03 05:10:15 -04:00
|
|
|
req := &BatchJobRequest{}
|
|
|
|
if err := req.load(ctx, j.objLayer, result.Name); err != nil {
|
|
|
|
logger.LogIf(ctx, err)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if err := j.queueJob(req); err != nil {
|
|
|
|
logger.LogIf(ctx, err)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// AddWorker adds a replication worker to the pool
|
|
|
|
func (j *BatchJobPool) AddWorker() {
|
|
|
|
if j == nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-j.ctx.Done():
|
|
|
|
return
|
|
|
|
case job, ok := <-j.jobCh:
|
|
|
|
if !ok {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if job.Replicate != nil {
|
2023-03-31 13:48:36 -04:00
|
|
|
if job.Replicate.RemoteToLocal() {
|
|
|
|
if err := job.Replicate.StartFromSource(job.ctx, j.objLayer, *job); err != nil {
|
|
|
|
if !isErrBucketNotFound(err) {
|
|
|
|
logger.LogIf(j.ctx, err)
|
|
|
|
j.canceler(job.ID, false)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
// Bucket not found proceed to delete such a job.
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if err := job.Replicate.Start(job.ctx, j.objLayer, *job); err != nil {
|
|
|
|
if !isErrBucketNotFound(err) {
|
|
|
|
logger.LogIf(j.ctx, err)
|
|
|
|
j.canceler(job.ID, false)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
// Bucket not found proceed to delete such a job.
|
2022-10-03 05:10:15 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2023-04-04 13:56:54 -04:00
|
|
|
if job.KeyRotate != nil {
|
|
|
|
if err := job.KeyRotate.Start(job.ctx, j.objLayer, *job); err != nil {
|
|
|
|
if !isErrBucketNotFound(err) {
|
|
|
|
logger.LogIf(j.ctx, err)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2022-10-03 05:10:15 -04:00
|
|
|
job.delete(j.ctx, j.objLayer)
|
2023-03-18 02:42:43 -04:00
|
|
|
j.canceler(job.ID, false)
|
2022-10-03 05:10:15 -04:00
|
|
|
case <-j.workerKillCh:
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// ResizeWorkers sets replication workers pool to new size
|
|
|
|
func (j *BatchJobPool) ResizeWorkers(n int) {
|
|
|
|
if j == nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
j.mu.Lock()
|
|
|
|
defer j.mu.Unlock()
|
|
|
|
|
|
|
|
for j.workerSize < n {
|
|
|
|
j.workerSize++
|
|
|
|
go j.AddWorker()
|
|
|
|
}
|
|
|
|
for j.workerSize > n {
|
|
|
|
j.workerSize--
|
|
|
|
go func() { j.workerKillCh <- struct{}{} }()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (j *BatchJobPool) queueJob(req *BatchJobRequest) error {
|
|
|
|
if j == nil {
|
|
|
|
return errInvalidArgument
|
|
|
|
}
|
2023-03-18 02:42:43 -04:00
|
|
|
jctx, jcancel := context.WithCancel(j.ctx)
|
|
|
|
j.jmu.Lock()
|
|
|
|
j.jobCancelers[req.ID] = jcancel
|
|
|
|
j.jmu.Unlock()
|
|
|
|
req.ctx = jctx
|
|
|
|
|
2022-10-03 05:10:15 -04:00
|
|
|
select {
|
|
|
|
case <-j.ctx.Done():
|
|
|
|
j.once.Do(func() {
|
|
|
|
close(j.jobCh)
|
|
|
|
})
|
|
|
|
case j.jobCh <- req:
|
|
|
|
default:
|
|
|
|
return fmt.Errorf("batch job queue is currently full please try again later %#v", req)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2023-03-18 02:42:43 -04:00
|
|
|
// delete canceler from the map, cancel job if requested
|
|
|
|
func (j *BatchJobPool) canceler(jobID string, cancel bool) error {
|
|
|
|
if j == nil {
|
|
|
|
return errInvalidArgument
|
|
|
|
}
|
|
|
|
j.jmu.Lock()
|
|
|
|
defer j.jmu.Unlock()
|
|
|
|
if canceler, ok := j.jobCancelers[jobID]; ok {
|
|
|
|
if cancel {
|
|
|
|
canceler()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
delete(j.jobCancelers, jobID)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2022-10-03 05:10:15 -04:00
|
|
|
//msgp:ignore batchJobMetrics
|
|
|
|
type batchJobMetrics struct {
|
|
|
|
sync.RWMutex
|
2023-02-13 15:07:58 -05:00
|
|
|
metrics map[string]*batchJobInfo
|
2022-10-03 05:10:15 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
var globalBatchJobsMetrics = batchJobMetrics{
|
2023-02-13 15:07:58 -05:00
|
|
|
metrics: make(map[string]*batchJobInfo),
|
2022-10-03 05:10:15 -04:00
|
|
|
}
|
|
|
|
|
2023-04-04 13:56:54 -04:00
|
|
|
//msgp:ignore batchJobMetric
|
|
|
|
//go:generate stringer -type=batchJobMetric -trimprefix=batchJobMetric $GOFILE
|
|
|
|
type batchJobMetric uint8
|
2022-10-03 05:10:15 -04:00
|
|
|
|
|
|
|
const (
|
2023-04-04 13:56:54 -04:00
|
|
|
batchReplicationMetricObject batchJobMetric = iota
|
|
|
|
batchKeyRotationMetricObject
|
2022-10-03 05:10:15 -04:00
|
|
|
)
|
|
|
|
|
2023-04-04 13:56:54 -04:00
|
|
|
func batchJobTrace(d batchJobMetric, job string, startTime time.Time, duration time.Duration, info ObjectInfo, attempts int, err error) madmin.TraceInfo {
|
2022-10-03 05:10:15 -04:00
|
|
|
var errStr string
|
|
|
|
if err != nil {
|
|
|
|
errStr = err.Error()
|
|
|
|
}
|
2023-04-04 13:56:54 -04:00
|
|
|
jobKind := "batchReplication"
|
|
|
|
traceType := madmin.TraceBatchReplication
|
|
|
|
if d == batchKeyRotationMetricObject {
|
|
|
|
jobKind = "batchKeyRotation"
|
|
|
|
traceType = madmin.TraceBatchKeyRotation
|
|
|
|
}
|
|
|
|
funcName := fmt.Sprintf("%s.%s (job-name=%s)", jobKind, d.String(), job)
|
2022-10-03 05:10:15 -04:00
|
|
|
if attempts > 0 {
|
2023-04-04 13:56:54 -04:00
|
|
|
funcName = fmt.Sprintf("%s.%s (job-name=%s,attempts=%s)", jobKind, d.String(), job, humanize.Ordinal(attempts))
|
2022-10-03 05:10:15 -04:00
|
|
|
}
|
|
|
|
return madmin.TraceInfo{
|
2023-04-04 13:56:54 -04:00
|
|
|
TraceType: traceType,
|
2022-10-03 05:10:15 -04:00
|
|
|
Time: startTime,
|
|
|
|
NodeName: globalLocalNodeName,
|
|
|
|
FuncName: funcName,
|
|
|
|
Duration: duration,
|
|
|
|
Path: info.Name,
|
|
|
|
Error: errStr,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *batchJobMetrics) report(jobID string) (metrics *madmin.BatchJobMetrics) {
|
|
|
|
metrics = &madmin.BatchJobMetrics{CollectedAt: time.Now(), Jobs: make(map[string]madmin.JobMetric)}
|
|
|
|
m.RLock()
|
|
|
|
defer m.RUnlock()
|
|
|
|
for id, job := range m.metrics {
|
|
|
|
match := jobID != "" && id == jobID
|
|
|
|
metrics.Jobs[id] = madmin.JobMetric{
|
|
|
|
JobID: job.JobID,
|
|
|
|
JobType: job.JobType,
|
|
|
|
StartTime: job.StartTime,
|
|
|
|
LastUpdate: job.LastUpdate,
|
|
|
|
RetryAttempts: job.RetryAttempts,
|
|
|
|
Complete: job.Complete,
|
|
|
|
Failed: job.Failed,
|
|
|
|
Replicate: &madmin.ReplicateInfo{
|
|
|
|
Bucket: job.Bucket,
|
|
|
|
Object: job.Object,
|
|
|
|
Objects: job.Objects,
|
|
|
|
ObjectsFailed: job.ObjectsFailed,
|
|
|
|
BytesTransferred: job.BytesTransferred,
|
|
|
|
BytesFailed: job.BytesFailed,
|
|
|
|
},
|
2023-04-04 13:56:54 -04:00
|
|
|
KeyRotate: &madmin.KeyRotationInfo{
|
|
|
|
Bucket: job.Bucket,
|
|
|
|
Object: job.Object,
|
|
|
|
Objects: job.Objects,
|
|
|
|
ObjectsFailed: job.ObjectsFailed,
|
|
|
|
},
|
2022-10-03 05:10:15 -04:00
|
|
|
}
|
|
|
|
if match {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return metrics
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *batchJobMetrics) delete(jobID string) {
|
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
|
|
|
|
delete(m.metrics, jobID)
|
|
|
|
}
|
|
|
|
|
2023-02-13 15:07:58 -05:00
|
|
|
func (m *batchJobMetrics) save(jobID string, ri *batchJobInfo) {
|
2022-10-03 05:10:15 -04:00
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
|
2023-02-13 15:07:58 -05:00
|
|
|
m.metrics[jobID] = ri.clone()
|
2022-10-03 05:10:15 -04:00
|
|
|
}
|
|
|
|
|
2023-04-04 13:56:54 -04:00
|
|
|
func (m *batchJobMetrics) trace(d batchJobMetric, job string, attempts int, info ObjectInfo) func(err error) {
|
2022-10-03 05:10:15 -04:00
|
|
|
startTime := time.Now()
|
|
|
|
return func(err error) {
|
|
|
|
duration := time.Since(startTime)
|
2023-04-04 13:56:54 -04:00
|
|
|
switch d {
|
|
|
|
case batchReplicationMetricObject:
|
|
|
|
if globalTrace.NumSubscribers(madmin.TraceBatchReplication) > 0 {
|
|
|
|
globalTrace.Publish(batchJobTrace(d, job, startTime, duration, info, attempts, err))
|
|
|
|
}
|
|
|
|
case batchKeyRotationMetricObject:
|
|
|
|
if globalTrace.NumSubscribers(madmin.TraceBatchKeyRotation) > 0 {
|
|
|
|
globalTrace.Publish(batchJobTrace(d, job, startTime, duration, info, attempts, err))
|
|
|
|
}
|
2022-10-03 05:10:15 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2023-05-21 18:16:31 -04:00
|
|
|
|
|
|
|
func lookupStyle(s string) miniogo.BucketLookupType {
|
|
|
|
var lookup miniogo.BucketLookupType
|
|
|
|
switch s {
|
|
|
|
case "on":
|
|
|
|
lookup = miniogo.BucketLookupPath
|
|
|
|
case "off":
|
|
|
|
lookup = miniogo.BucketLookupDNS
|
|
|
|
default:
|
|
|
|
lookup = miniogo.BucketLookupAuto
|
|
|
|
|
|
|
|
}
|
|
|
|
return lookup
|
|
|
|
}
|