mirror of https://github.com/minio/minio.git
feat: implement support batch replication (#15554)
This commit is contained in:
parent
4d761fda81
commit
2a13cc28f2
|
@ -1,14 +1,7 @@
|
|||
linters-settings:
|
||||
golint:
|
||||
min-confidence: 0
|
||||
|
||||
gofumpt:
|
||||
lang-version: "1.18"
|
||||
|
||||
# Choose whether or not to use the extra rules that are disabled
|
||||
# by default
|
||||
extra-rules: false
|
||||
|
||||
misspell:
|
||||
locale: US
|
||||
|
||||
|
|
|
@ -411,6 +411,7 @@ func (a adminAPIHandlers) MetricsHandler(w http.ResponseWriter, r *http.Request)
|
|||
}
|
||||
}
|
||||
}
|
||||
jobID := r.Form.Get("by-jobID")
|
||||
|
||||
hosts := strings.Split(r.Form.Get("hosts"), ",")
|
||||
byHost := strings.EqualFold(r.Form.Get("by-host"), "true")
|
||||
|
@ -432,12 +433,20 @@ func (a adminAPIHandlers) MetricsHandler(w http.ResponseWriter, r *http.Request)
|
|||
enc := json.NewEncoder(w)
|
||||
for n > 0 {
|
||||
var m madmin.RealtimeMetrics
|
||||
mLocal := collectLocalMetrics(types, hostMap, diskMap)
|
||||
mLocal := collectLocalMetrics(types, collectMetricsOpts{
|
||||
hosts: hostMap,
|
||||
disks: diskMap,
|
||||
jobID: jobID,
|
||||
})
|
||||
m.Merge(&mLocal)
|
||||
|
||||
// Allow half the interval for collecting remote...
|
||||
cctx, cancel := context.WithTimeout(ctx, interval/2)
|
||||
mRemote := collectRemoteMetrics(cctx, types, hostMap, diskMap)
|
||||
mRemote := collectRemoteMetrics(cctx, types, collectMetricsOpts{
|
||||
hosts: hostMap,
|
||||
disks: diskMap,
|
||||
jobID: jobID,
|
||||
})
|
||||
cancel()
|
||||
m.Merge(&mRemote)
|
||||
if !byHost {
|
||||
|
@ -449,7 +458,7 @@ func (a adminAPIHandlers) MetricsHandler(w http.ResponseWriter, r *http.Request)
|
|||
|
||||
m.Final = n <= 1
|
||||
|
||||
// Marshal API response
|
||||
// Marshal API reesponse
|
||||
if err := enc.Encode(&m); err != nil {
|
||||
n = 0
|
||||
}
|
||||
|
|
|
@ -206,6 +206,16 @@ func registerAdminRouter(router *mux.Router, enableConfigOps bool) {
|
|||
adminRouter.Methods(http.MethodPost).Path(adminVersion+"/replication/diff").HandlerFunc(
|
||||
gz(httpTraceHdrs(adminAPI.ReplicationDiffHandler))).Queries("bucket", "{bucket:.*}")
|
||||
|
||||
// Batch job operations
|
||||
adminRouter.Methods(http.MethodPost).Path(adminVersion + "/start-job").HandlerFunc(
|
||||
gz(httpTraceHdrs(adminAPI.StartBatchJob)))
|
||||
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/list-jobs").HandlerFunc(
|
||||
gz(httpTraceHdrs(adminAPI.ListBatchJobs)))
|
||||
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/describe-job").HandlerFunc(
|
||||
gz(httpTraceHdrs(adminAPI.DescribeBatchJob)))
|
||||
|
||||
// Bucket migration operations
|
||||
// ExportBucketMetaHandler
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/export-bucket-metadata").HandlerFunc(
|
||||
|
|
|
@ -263,6 +263,7 @@ const (
|
|||
ErrAdminNoSuchUser
|
||||
ErrAdminNoSuchGroup
|
||||
ErrAdminGroupNotEmpty
|
||||
ErrAdminNoSuchJob
|
||||
ErrAdminNoSuchPolicy
|
||||
ErrAdminInvalidArgument
|
||||
ErrAdminInvalidAccessKey
|
||||
|
@ -1226,6 +1227,11 @@ var errorCodes = errorCodeMap{
|
|||
Description: "The specified group does not exist.",
|
||||
HTTPStatusCode: http.StatusNotFound,
|
||||
},
|
||||
ErrAdminNoSuchJob: {
|
||||
Code: "XMinioAdminNoSuchJob",
|
||||
Description: "The specified job does not exist.",
|
||||
HTTPStatusCode: http.StatusNotFound,
|
||||
},
|
||||
ErrAdminGroupNotEmpty: {
|
||||
Code: "XMinioAdminGroupNotEmpty",
|
||||
Description: "The specified group is not empty - cannot remove it.",
|
||||
|
@ -1923,6 +1929,8 @@ func toAPIErrorCode(ctx context.Context, err error) (apiErr APIErrorCode) {
|
|||
apiErr = ErrAdminNoSuchGroup
|
||||
case errGroupNotEmpty:
|
||||
apiErr = ErrAdminGroupNotEmpty
|
||||
case errNoSuchJob:
|
||||
apiErr = ErrAdminNoSuchJob
|
||||
case errNoSuchPolicy:
|
||||
apiErr = ErrAdminNoSuchPolicy
|
||||
case errSignatureMismatch:
|
||||
|
|
File diff suppressed because one or more lines are too long
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,23 @@
|
|||
// Code generated by "stringer -type=batchReplicationMetric -trimprefix=batchReplicationMetric batch-handlers.go"; DO NOT EDIT.
|
||||
|
||||
package cmd
|
||||
|
||||
import "strconv"
|
||||
|
||||
func _() {
|
||||
// An "invalid array index" compiler error signifies that the constant values have changed.
|
||||
// Re-run the stringer command to generate them again.
|
||||
var x [1]struct{}
|
||||
_ = x[batchReplicationMetricObject-0]
|
||||
}
|
||||
|
||||
const _batchReplicationMetric_name = "Object"
|
||||
|
||||
var _batchReplicationMetric_index = [...]uint8{0, 6}
|
||||
|
||||
func (i batchReplicationMetric) String() string {
|
||||
if i >= batchReplicationMetric(len(_batchReplicationMetric_index)-1) {
|
||||
return "batchReplicationMetric(" + strconv.FormatInt(int64(i), 10) + ")"
|
||||
}
|
||||
return _batchReplicationMetric_name[_batchReplicationMetric_index[i]:_batchReplicationMetric_index[i+1]]
|
||||
}
|
|
@ -1875,12 +1875,22 @@ func (z *erasureServerPools) Walk(ctx context.Context, bucket, prefix string, re
|
|||
versionsSorter(fivs.Versions).reverse()
|
||||
|
||||
for _, version := range fivs.Versions {
|
||||
send := true
|
||||
if opts.WalkFilter != nil && !opts.WalkFilter(version) {
|
||||
send = false
|
||||
}
|
||||
|
||||
if !send {
|
||||
continue
|
||||
}
|
||||
|
||||
versioned := vcfg != nil && vcfg.Versioned(version.Name)
|
||||
objInfo := version.ToObjectInfo(bucket, version.Name, versioned)
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case results <- version.ToObjectInfo(bucket, version.Name, versioned):
|
||||
case results <- objInfo:
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1904,7 +1914,7 @@ func (z *erasureServerPools) Walk(ctx context.Context, bucket, prefix string, re
|
|||
path: path,
|
||||
filterPrefix: filterPrefix,
|
||||
recursive: true,
|
||||
forwardTo: "",
|
||||
forwardTo: opts.WalkMarker,
|
||||
minDisks: 1,
|
||||
reportNotFound: false,
|
||||
agreed: loadEntry,
|
||||
|
|
|
@ -3050,12 +3050,22 @@ func (es *erasureSingle) Walk(ctx context.Context, bucket, prefix string, result
|
|||
versionsSorter(fivs.Versions).reverse()
|
||||
|
||||
for _, version := range fivs.Versions {
|
||||
send := true
|
||||
if opts.WalkFilter != nil && !opts.WalkFilter(version) {
|
||||
send = false
|
||||
}
|
||||
|
||||
if !send {
|
||||
continue
|
||||
}
|
||||
|
||||
versioned := vcfg != nil && vcfg.Versioned(version.Name)
|
||||
objInfo := version.ToObjectInfo(bucket, version.Name, versioned)
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case results <- version.ToObjectInfo(bucket, version.Name, versioned):
|
||||
case results <- objInfo:
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -3079,7 +3089,7 @@ func (es *erasureSingle) Walk(ctx context.Context, bucket, prefix string, result
|
|||
path: path,
|
||||
filterPrefix: filterPrefix,
|
||||
recursive: true,
|
||||
forwardTo: "",
|
||||
forwardTo: opts.WalkMarker,
|
||||
minDisks: 1,
|
||||
reportNotFound: false,
|
||||
agreed: loadEntry,
|
||||
|
|
|
@ -25,23 +25,29 @@ import (
|
|||
"github.com/minio/minio/internal/disk"
|
||||
)
|
||||
|
||||
func collectLocalMetrics(types madmin.MetricType, hosts map[string]struct{}, disks map[string]struct{}) (m madmin.RealtimeMetrics) {
|
||||
type collectMetricsOpts struct {
|
||||
hosts map[string]struct{}
|
||||
disks map[string]struct{}
|
||||
jobID string
|
||||
}
|
||||
|
||||
func collectLocalMetrics(types madmin.MetricType, opts collectMetricsOpts) (m madmin.RealtimeMetrics) {
|
||||
if types == madmin.MetricsNone {
|
||||
return
|
||||
}
|
||||
|
||||
if len(hosts) > 0 {
|
||||
if _, ok := hosts[globalMinioAddr]; !ok {
|
||||
if len(opts.hosts) > 0 {
|
||||
if _, ok := opts.hosts[globalMinioAddr]; !ok {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if types.Contains(madmin.MetricsDisk) && !globalIsGateway {
|
||||
if types.Contains(madmin.MetricsDisk) {
|
||||
m.ByDisk = make(map[string]madmin.DiskMetric)
|
||||
aggr := madmin.DiskMetric{
|
||||
CollectedAt: time.Now(),
|
||||
}
|
||||
for name, disk := range collectLocalDisksMetrics(disks) {
|
||||
for name, disk := range collectLocalDisksMetrics(opts.disks) {
|
||||
m.ByDisk[name] = disk
|
||||
aggr.Merge(&disk)
|
||||
}
|
||||
|
@ -56,6 +62,10 @@ func collectLocalMetrics(types madmin.MetricType, hosts map[string]struct{}, dis
|
|||
metrics := globalOSMetrics.report()
|
||||
m.Aggregated.OS = &metrics
|
||||
}
|
||||
if types.Contains(madmin.MetricsBatchJobs) {
|
||||
m.Aggregated.BatchJobs = globalBatchJobsMetrics.report(opts.jobID)
|
||||
}
|
||||
|
||||
// Add types...
|
||||
|
||||
// ByHost is a shallow reference, so careful about sharing.
|
||||
|
@ -143,11 +153,11 @@ func collectLocalDisksMetrics(disks map[string]struct{}) map[string]madmin.DiskM
|
|||
return metrics
|
||||
}
|
||||
|
||||
func collectRemoteMetrics(ctx context.Context, types madmin.MetricType, hosts map[string]struct{}, disks map[string]struct{}) (m madmin.RealtimeMetrics) {
|
||||
func collectRemoteMetrics(ctx context.Context, types madmin.MetricType, opts collectMetricsOpts) (m madmin.RealtimeMetrics) {
|
||||
if !globalIsDistErasure {
|
||||
return
|
||||
}
|
||||
all := globalNotificationSys.GetMetrics(ctx, types, hosts, disks)
|
||||
all := globalNotificationSys.GetMetrics(ctx, types, opts)
|
||||
for _, remote := range all {
|
||||
m.Merge(&remote)
|
||||
}
|
||||
|
|
|
@ -731,7 +731,7 @@ func (sys *NotificationSys) GetOSInfo(ctx context.Context) []madmin.OSInfo {
|
|||
}
|
||||
|
||||
// GetMetrics - Get metrics from all peers.
|
||||
func (sys *NotificationSys) GetMetrics(ctx context.Context, t madmin.MetricType, hosts map[string]struct{}, disks map[string]struct{}) []madmin.RealtimeMetrics {
|
||||
func (sys *NotificationSys) GetMetrics(ctx context.Context, t madmin.MetricType, opts collectMetricsOpts) []madmin.RealtimeMetrics {
|
||||
reply := make([]madmin.RealtimeMetrics, len(sys.peerClients))
|
||||
|
||||
g := errgroup.WithNErrs(len(sys.peerClients))
|
||||
|
@ -740,8 +740,8 @@ func (sys *NotificationSys) GetMetrics(ctx context.Context, t madmin.MetricType,
|
|||
continue
|
||||
}
|
||||
host := client.host.String()
|
||||
if len(hosts) > 0 {
|
||||
if _, ok := hosts[host]; !ok {
|
||||
if len(opts.hosts) > 0 {
|
||||
if _, ok := opts.hosts[host]; !ok {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
@ -749,7 +749,7 @@ func (sys *NotificationSys) GetMetrics(ctx context.Context, t madmin.MetricType,
|
|||
index := index
|
||||
g.Go(func() error {
|
||||
var err error
|
||||
reply[index], err = sys.peerClients[index].GetMetrics(ctx, t, disks)
|
||||
reply[index], err = sys.peerClients[index].GetMetrics(ctx, t, opts)
|
||||
return err
|
||||
}, index)
|
||||
}
|
||||
|
|
|
@ -85,6 +85,8 @@ type ObjectOptions struct {
|
|||
// mainly set for certain WRITE operations.
|
||||
SkipDecommissioned bool
|
||||
|
||||
WalkFilter func(info FileInfo) bool // return WalkFilter returns 'true/false'
|
||||
WalkMarker string // set to skip until this object
|
||||
PrefixEnabledFn func(prefix string) bool // function which returns true if versioning is enabled on prefix
|
||||
|
||||
// IndexCB will return any index created but the compression.
|
||||
|
|
|
@ -195,12 +195,13 @@ func (client *peerRESTClient) GetMemInfo(ctx context.Context) (info madmin.MemIn
|
|||
}
|
||||
|
||||
// GetMetrics - fetch metrics from a remote node.
|
||||
func (client *peerRESTClient) GetMetrics(ctx context.Context, t madmin.MetricType, diskMap map[string]struct{}) (info madmin.RealtimeMetrics, err error) {
|
||||
func (client *peerRESTClient) GetMetrics(ctx context.Context, t madmin.MetricType, opts collectMetricsOpts) (info madmin.RealtimeMetrics, err error) {
|
||||
values := make(url.Values)
|
||||
values.Set(peerRESTTypes, strconv.FormatUint(uint64(t), 10))
|
||||
for disk := range diskMap {
|
||||
values.Set(peerRESTMetricsTypes, strconv.FormatUint(uint64(t), 10))
|
||||
for disk := range opts.disks {
|
||||
values.Set(peerRESTDisk, disk)
|
||||
}
|
||||
values.Set(peerRESTJobID, opts.jobID)
|
||||
respBody, err := client.callWithContext(ctx, peerRESTMethodMetrics, values, nil, -1)
|
||||
if err != nil {
|
||||
return
|
||||
|
|
|
@ -91,8 +91,9 @@ const (
|
|||
peerRESTConcurrent = "concurrent"
|
||||
peerRESTDuration = "duration"
|
||||
peerRESTStorageClass = "storage-class"
|
||||
peerRESTTypes = "types"
|
||||
peerRESTMetricsTypes = "types"
|
||||
peerRESTDisk = "disk"
|
||||
peerRESTJobID = "job-id"
|
||||
|
||||
peerRESTListenBucket = "bucket"
|
||||
peerRESTListenPrefix = "prefix"
|
||||
|
|
|
@ -425,23 +425,25 @@ func (s *peerRESTServer) GetMetricsHandler(w http.ResponseWriter, r *http.Reques
|
|||
}
|
||||
|
||||
var types madmin.MetricType
|
||||
if t, _ := strconv.ParseUint(r.Form.Get(peerRESTTypes), 10, 64); t != 0 {
|
||||
if t, _ := strconv.ParseUint(r.Form.Get(peerRESTMetricsTypes), 10, 64); t != 0 {
|
||||
types = madmin.MetricType(t)
|
||||
} else {
|
||||
types = madmin.MetricsAll
|
||||
}
|
||||
|
||||
diskMap := make(map[string]struct{})
|
||||
if r.Form != nil {
|
||||
for _, disk := range r.Form[peerRESTDisk] {
|
||||
diskMap[disk] = struct{}{}
|
||||
}
|
||||
for _, disk := range r.Form[peerRESTDisk] {
|
||||
diskMap[disk] = struct{}{}
|
||||
}
|
||||
jobID := r.Form.Get(peerRESTJobID)
|
||||
|
||||
ctx, cancel := context.WithCancel(r.Context())
|
||||
defer cancel()
|
||||
|
||||
info := collectLocalMetrics(types, nil, diskMap)
|
||||
info := collectLocalMetrics(types, collectMetricsOpts{
|
||||
disks: diskMap,
|
||||
jobID: jobID,
|
||||
})
|
||||
|
||||
logger.LogIf(ctx, gob.NewEncoder(w).Encode(info))
|
||||
}
|
||||
|
@ -1308,7 +1310,7 @@ func registerPeerRESTHandlers(router *mux.Router) {
|
|||
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodServerInfo).HandlerFunc(httpTraceHdrs(server.ServerInfoHandler))
|
||||
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodProcInfo).HandlerFunc(httpTraceHdrs(server.GetProcInfoHandler))
|
||||
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodMemInfo).HandlerFunc(httpTraceHdrs(server.GetMemInfoHandler))
|
||||
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodMetrics).HandlerFunc(httpTraceHdrs(server.GetMetricsHandler)).Queries(restQueries(peerRESTTypes)...)
|
||||
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodMetrics).HandlerFunc(httpTraceHdrs(server.GetMetricsHandler)).Queries(restQueries(peerRESTMetricsTypes)...)
|
||||
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodSysErrors).HandlerFunc(httpTraceHdrs(server.GetSysErrorsHandler))
|
||||
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodSysServices).HandlerFunc(httpTraceHdrs(server.GetSysServicesHandler))
|
||||
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodSysConfig).HandlerFunc(httpTraceHdrs(server.GetSysConfigHandler))
|
||||
|
|
|
@ -601,6 +601,8 @@ func serverMain(ctx *cli.Context) {
|
|||
initBackgroundReplication(GlobalContext, newObject)
|
||||
initBackgroundTransition(GlobalContext, newObject)
|
||||
|
||||
globalBatchJobPool = newBatchJobPool(GlobalContext, newObject, 100)
|
||||
|
||||
go func() {
|
||||
err := globalTierConfigMgr.Init(GlobalContext, newObject)
|
||||
if err != nil {
|
||||
|
|
|
@ -45,8 +45,8 @@ func oldLinux() bool {
|
|||
|
||||
func setMaxResources() (err error) {
|
||||
// Set the Go runtime max threads threshold to 90% of kernel setting.
|
||||
sysMaxThreads, mErr := sys.GetMaxThreads()
|
||||
if mErr == nil {
|
||||
sysMaxThreads, err := sys.GetMaxThreads()
|
||||
if err == nil {
|
||||
minioMaxThreads := (sysMaxThreads * 90) / 100
|
||||
// Only set max threads if it is greater than the default one
|
||||
if minioMaxThreads > 10000 {
|
||||
|
|
|
@ -0,0 +1,152 @@
|
|||
# MinIO Batch Job
|
||||
MinIO Batch jobs is an MinIO object management feature that lets you manage objects at scale. Jobs currently supported by MinIO
|
||||
|
||||
- Replicate objects between buckets on multiple sites
|
||||
|
||||
Upcoming Jobs
|
||||
|
||||
- Copy objects from NAS to MinIO
|
||||
- Copy objects from HDFS to MinIO
|
||||
|
||||
## Replication Job
|
||||
To perform replication via batch jobs, you create a job. The job consists of a job description YAML that describes
|
||||
|
||||
- Source location from where the objects must be copied from
|
||||
- Target location from where the objects must be copied to
|
||||
- Fine grained filtering is available to pick relevant objects from source to copy from
|
||||
|
||||
MinIO batch jobs framework also provides
|
||||
|
||||
- Retrying a failed job automatically driven by user input
|
||||
- Monitoring job progress in real-time
|
||||
- Send notifications upon completion or failure to user configured target
|
||||
|
||||
Following YAML describes the structure of a replication job, each value is documented and self-describing.
|
||||
|
||||
```yaml
|
||||
replicate:
|
||||
apiVersion: v1
|
||||
# source of the objects to be replicated
|
||||
source:
|
||||
type: TYPE # valid values are "minio"
|
||||
bucket: BUCKET
|
||||
prefix: PREFIX
|
||||
# NOTE: if source is remote then target must be "local"
|
||||
# endpoint: ENDPOINT
|
||||
# credentials:
|
||||
# accessKey: ACCESS-KEY
|
||||
# secretKey: SECRET-KEY
|
||||
# sessionToken: SESSION-TOKEN # Available when rotating credentials are used
|
||||
|
||||
# target where the objects must be replicated
|
||||
target:
|
||||
type: TYPE # valid values are "minio"
|
||||
bucket: BUCKET
|
||||
prefix: PREFIX
|
||||
# NOTE: if target is remote then source must be "local"
|
||||
# endpoint: ENDPOINT
|
||||
# credentials:
|
||||
# accessKey: ACCESS-KEY
|
||||
# secretKey: SECRET-KEY
|
||||
# sessionToken: SESSION-TOKEN # Available when rotating credentials are used
|
||||
|
||||
# optional flags based filtering criteria
|
||||
# for all source objects
|
||||
flags:
|
||||
filter:
|
||||
newerThan: "7d" # match objects newer than this value (e.g. 7d10h31s)
|
||||
olderThan: "7d" # match objects older than this value (e.g. 7d10h31s)
|
||||
createdAfter: "date" # match objects created after "date"
|
||||
createdBefore: "date" # match objects created before "date"
|
||||
|
||||
## NOTE: tags are not supported when "source" is remote.
|
||||
# tags:
|
||||
# - key: "name"
|
||||
# value: "pick*" # match objects with tag 'name', with all values starting with 'pick'
|
||||
|
||||
## NOTE: metadata filter not supported when "source" is non MinIO.
|
||||
# metadata:
|
||||
# - key: "content-type"
|
||||
# value: "image/*" # match objects with 'content-type', with all values starting with 'image/'
|
||||
|
||||
notify:
|
||||
endpoint: "https://notify.endpoint" # notification endpoint to receive job status events
|
||||
token: "Bearer xxxxx" # optional authentication token for the notification endpoint
|
||||
|
||||
retry:
|
||||
attempts: 10 # number of retries for the job before giving up
|
||||
delay: "500ms" # least amount of delay between each retry
|
||||
```
|
||||
|
||||
You can create and run multiple 'replication' jobs at a time there are no predefined limits set.
|
||||
|
||||
## Batch Jobs Terminology
|
||||
|
||||
### Job
|
||||
A job is the basic unit of work for MinIO Batch Job. A job is a self describing YAML, once this YAML is submitted and evaluated - MinIO performs the requested actions on each of the objects obtained under the described criteria in job YAML file.
|
||||
|
||||
### Type
|
||||
Type describes the job type, such as replicating objects between MinIO sites. Each job performs a single type of operation across all objects that match the job description criteria.
|
||||
|
||||
## Batch Jobs via Commandline
|
||||
[mc](http://github.com/minio/mc) provides 'mc batch' command to create, start and manage submitted jobs.
|
||||
|
||||
```
|
||||
NAME:
|
||||
mc batch - manage batch jobs
|
||||
|
||||
USAGE:
|
||||
mc batch COMMAND [COMMAND FLAGS | -h] [ARGUMENTS...]
|
||||
|
||||
COMMANDS:
|
||||
generate generate a new batch job definition
|
||||
start start a new batch job
|
||||
list, ls list all current batch jobs
|
||||
status summarize job events on MinIO server in real-time
|
||||
describe describe job definition for a job
|
||||
```
|
||||
|
||||
### Generate a job yaml
|
||||
```
|
||||
mc batch generate alias/ replicate
|
||||
```
|
||||
|
||||
### Start the batch job (returns back the JID)
|
||||
```
|
||||
mc batch start alias/ ./replicate.yaml
|
||||
Successfully start 'replicate' job `E24HH4nNMcgY5taynaPfxu` on '2022-09-26 17:19:06.296974771 -0700 PDT'
|
||||
```
|
||||
|
||||
### List all batch jobs
|
||||
```
|
||||
mc batch list alias/
|
||||
ID TYPE USER STARTED
|
||||
E24HH4nNMcgY5taynaPfxu replicate minioadmin 1 minute ago
|
||||
```
|
||||
|
||||
### List all 'replicate' batch jobs
|
||||
```
|
||||
mc batch list alias/ --type replicate
|
||||
ID TYPE USER STARTED
|
||||
E24HH4nNMcgY5taynaPfxu replicate minioadmin 1 minute ago
|
||||
```
|
||||
|
||||
### Real-time 'status' for a batch job
|
||||
```
|
||||
mc batch status myminio/ E24HH4nNMcgY5taynaPfxu
|
||||
●∙∙
|
||||
Objects: 28766
|
||||
Versions: 28766
|
||||
Throughput: 3.0 MiB/s
|
||||
Transferred: 406 MiB
|
||||
Elapsed: 2m14.227222868s
|
||||
CurrObjName: share/doc/xml-core/examples/foo.xmlcatalogs
|
||||
```
|
||||
|
||||
### 'describe' the batch job yaml.
|
||||
```
|
||||
mc batch describe myminio/ E24HH4nNMcgY5taynaPfxu
|
||||
replicate:
|
||||
apiVersion: v1
|
||||
...
|
||||
```
|
5
go.mod
5
go.mod
|
@ -40,6 +40,7 @@ require (
|
|||
github.com/klauspost/readahead v1.4.0
|
||||
github.com/klauspost/reedsolomon v1.11.0
|
||||
github.com/lib/pq v1.10.7
|
||||
github.com/lithammer/shortuuid/v4 v4.0.0
|
||||
github.com/miekg/dns v1.1.50
|
||||
github.com/minio/cli v1.24.0
|
||||
github.com/minio/console v0.20.6-0.20221003060434-413870e995a9
|
||||
|
@ -74,7 +75,7 @@ require (
|
|||
github.com/secure-io/sio-go v0.3.1
|
||||
github.com/shirou/gopsutil/v3 v3.22.8
|
||||
github.com/streadway/amqp v1.0.0
|
||||
github.com/tinylib/msgp v1.1.7-0.20211026165309-e818a1881b0e
|
||||
github.com/tinylib/msgp v1.1.7-0.20220719154719-f3635b96e483
|
||||
github.com/valyala/bytebufferpool v1.0.0
|
||||
github.com/xdg/scram v1.0.5
|
||||
github.com/yargevad/filepathx v1.0.0
|
||||
|
@ -88,6 +89,7 @@ require (
|
|||
golang.org/x/sys v0.0.0-20220915200043-7b5979e65e41
|
||||
golang.org/x/time v0.0.0-20220722155302-e5dcc9cfc0b9
|
||||
google.golang.org/api v0.96.0
|
||||
gopkg.in/yaml.v2 v2.4.0
|
||||
)
|
||||
|
||||
require (
|
||||
|
@ -222,6 +224,5 @@ require (
|
|||
gopkg.in/h2non/filetype.v1 v1.0.5 // indirect
|
||||
gopkg.in/ini.v1 v1.67.0 // indirect
|
||||
gopkg.in/square/go-jose.v2 v2.6.0 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
)
|
||||
|
|
6
go.sum
6
go.sum
|
@ -584,6 +584,8 @@ github.com/lestrrat-go/pdebug/v3 v3.0.1/go.mod h1:za+m+Ve24yCxTEhR59N7UlnJomWwCi
|
|||
github.com/lib/pq v1.10.4/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
|
||||
github.com/lib/pq v1.10.7 h1:p7ZhMD+KsSRozJr34udlUrhboJwWAgCg34+/ZZNvZZw=
|
||||
github.com/lib/pq v1.10.7/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
|
||||
github.com/lithammer/shortuuid/v4 v4.0.0 h1:QRbbVkfgNippHOS8PXDkti4NaWeyYfcBTHtw7k08o4c=
|
||||
github.com/lithammer/shortuuid/v4 v4.0.0/go.mod h1:Zs8puNcrvf2rV9rTH51ZLLcj7ZXqQI3lv67aw4KiB1Y=
|
||||
github.com/lucasb-eyer/go-colorful v1.2.0 h1:1nnpGOrhyZZuNyfu1QjKiUICQ74+3FNCN69Aj6K7nkY=
|
||||
github.com/lucasb-eyer/go-colorful v1.2.0/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0=
|
||||
github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I=
|
||||
|
@ -899,8 +901,8 @@ github.com/tidwall/pretty v1.2.0 h1:RWIZEg2iJ8/g6fDDYzMpobmaoGh5OLl4AXtGUGPcqCs=
|
|||
github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU=
|
||||
github.com/tinylib/msgp v1.1.3/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE=
|
||||
github.com/tinylib/msgp v1.1.6/go.mod h1:75BAfg2hauQhs3qedfdDZmWAPcFMAvJE5b9rGOMufyw=
|
||||
github.com/tinylib/msgp v1.1.7-0.20211026165309-e818a1881b0e h1:P5tyWbssToKowBPTA1/EzqPXwrZNc8ZeNPdjgpcDEoI=
|
||||
github.com/tinylib/msgp v1.1.7-0.20211026165309-e818a1881b0e/go.mod h1:g7jEyb18KPe65d9RRhGw+ThaJr5duyBH8eaFgBUor7Y=
|
||||
github.com/tinylib/msgp v1.1.7-0.20220719154719-f3635b96e483 h1:dV39KLgsNZGri7Hn5QhHwRQzGf7kHOki2vZujFXDFhI=
|
||||
github.com/tinylib/msgp v1.1.7-0.20220719154719-f3635b96e483/go.mod h1:g7jEyb18KPe65d9RRhGw+ThaJr5duyBH8eaFgBUor7Y=
|
||||
github.com/tklauser/go-sysconf v0.3.6/go.mod h1:MkWzOF4RMCshBAMXuhXJs64Rte09mITnppBXY/rYEFI=
|
||||
github.com/tklauser/go-sysconf v0.3.10 h1:IJ1AZGZRWbY8T5Vfk04D9WOA5WSejdflXxP03OUqALw=
|
||||
github.com/tklauser/go-sysconf v0.3.10/go.mod h1:C8XykCvCb+Gn0oNCWPIlcb0RuglQTYaQ2hGm7jmxEFk=
|
||||
|
|
|
@ -94,10 +94,10 @@ const (
|
|||
|
||||
// Credentials holds access and secret keys.
|
||||
type Credentials struct {
|
||||
AccessKey string `xml:"AccessKeyId" json:"accessKey,omitempty"`
|
||||
SecretKey string `xml:"SecretAccessKey" json:"secretKey,omitempty"`
|
||||
Expiration time.Time `xml:"Expiration" json:"expiration,omitempty"`
|
||||
SessionToken string `xml:"SessionToken" json:"sessionToken,omitempty"`
|
||||
AccessKey string `xml:"AccessKeyId" json:"accessKey,omitempty" yaml:"accessKey"`
|
||||
SecretKey string `xml:"SecretAccessKey" json:"secretKey,omitempty" yaml:"secretKey"`
|
||||
SessionToken string `xml:"SessionToken" json:"sessionToken,omitempty" yaml:"sessionToken"`
|
||||
Expiration time.Time `xml:"Expiration" json:"expiration,omitempty" yaml:"-"`
|
||||
Status string `xml:"-" json:"status,omitempty"`
|
||||
ParentUser string `xml:"-" json:"parentUser,omitempty"`
|
||||
Groups []string `xml:"-" json:"groups,omitempty"`
|
||||
|
|
Loading…
Reference in New Issue