diff --git a/.golangci.yml b/.golangci.yml index 199eab226..53d567882 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -1,14 +1,7 @@ linters-settings: - golint: - min-confidence: 0 - gofumpt: lang-version: "1.18" - # Choose whether or not to use the extra rules that are disabled - # by default - extra-rules: false - misspell: locale: US diff --git a/cmd/admin-handlers.go b/cmd/admin-handlers.go index a07b3eb78..25e8a0686 100644 --- a/cmd/admin-handlers.go +++ b/cmd/admin-handlers.go @@ -411,6 +411,7 @@ func (a adminAPIHandlers) MetricsHandler(w http.ResponseWriter, r *http.Request) } } } + jobID := r.Form.Get("by-jobID") hosts := strings.Split(r.Form.Get("hosts"), ",") byHost := strings.EqualFold(r.Form.Get("by-host"), "true") @@ -432,12 +433,20 @@ func (a adminAPIHandlers) MetricsHandler(w http.ResponseWriter, r *http.Request) enc := json.NewEncoder(w) for n > 0 { var m madmin.RealtimeMetrics - mLocal := collectLocalMetrics(types, hostMap, diskMap) + mLocal := collectLocalMetrics(types, collectMetricsOpts{ + hosts: hostMap, + disks: diskMap, + jobID: jobID, + }) m.Merge(&mLocal) // Allow half the interval for collecting remote... cctx, cancel := context.WithTimeout(ctx, interval/2) - mRemote := collectRemoteMetrics(cctx, types, hostMap, diskMap) + mRemote := collectRemoteMetrics(cctx, types, collectMetricsOpts{ + hosts: hostMap, + disks: diskMap, + jobID: jobID, + }) cancel() m.Merge(&mRemote) if !byHost { @@ -449,7 +458,7 @@ func (a adminAPIHandlers) MetricsHandler(w http.ResponseWriter, r *http.Request) m.Final = n <= 1 - // Marshal API response + // Marshal API reesponse if err := enc.Encode(&m); err != nil { n = 0 } diff --git a/cmd/admin-router.go b/cmd/admin-router.go index 4650e87af..c40ccb755 100644 --- a/cmd/admin-router.go +++ b/cmd/admin-router.go @@ -206,6 +206,16 @@ func registerAdminRouter(router *mux.Router, enableConfigOps bool) { adminRouter.Methods(http.MethodPost).Path(adminVersion+"/replication/diff").HandlerFunc( gz(httpTraceHdrs(adminAPI.ReplicationDiffHandler))).Queries("bucket", "{bucket:.*}") + // Batch job operations + adminRouter.Methods(http.MethodPost).Path(adminVersion + "/start-job").HandlerFunc( + gz(httpTraceHdrs(adminAPI.StartBatchJob))) + + adminRouter.Methods(http.MethodGet).Path(adminVersion + "/list-jobs").HandlerFunc( + gz(httpTraceHdrs(adminAPI.ListBatchJobs))) + + adminRouter.Methods(http.MethodGet).Path(adminVersion + "/describe-job").HandlerFunc( + gz(httpTraceHdrs(adminAPI.DescribeBatchJob))) + // Bucket migration operations // ExportBucketMetaHandler adminRouter.Methods(http.MethodGet).Path(adminVersion + "/export-bucket-metadata").HandlerFunc( diff --git a/cmd/api-errors.go b/cmd/api-errors.go index 08d0f913e..a59b0a810 100644 --- a/cmd/api-errors.go +++ b/cmd/api-errors.go @@ -263,6 +263,7 @@ const ( ErrAdminNoSuchUser ErrAdminNoSuchGroup ErrAdminGroupNotEmpty + ErrAdminNoSuchJob ErrAdminNoSuchPolicy ErrAdminInvalidArgument ErrAdminInvalidAccessKey @@ -1226,6 +1227,11 @@ var errorCodes = errorCodeMap{ Description: "The specified group does not exist.", HTTPStatusCode: http.StatusNotFound, }, + ErrAdminNoSuchJob: { + Code: "XMinioAdminNoSuchJob", + Description: "The specified job does not exist.", + HTTPStatusCode: http.StatusNotFound, + }, ErrAdminGroupNotEmpty: { Code: "XMinioAdminGroupNotEmpty", Description: "The specified group is not empty - cannot remove it.", @@ -1923,6 +1929,8 @@ func toAPIErrorCode(ctx context.Context, err error) (apiErr APIErrorCode) { apiErr = ErrAdminNoSuchGroup case errGroupNotEmpty: apiErr = ErrAdminGroupNotEmpty + case errNoSuchJob: + apiErr = ErrAdminNoSuchJob case errNoSuchPolicy: apiErr = ErrAdminNoSuchPolicy case errSignatureMismatch: diff --git a/cmd/apierrorcode_string.go b/cmd/apierrorcode_string.go index a9bf5f27f..2e93c7804 100644 --- a/cmd/apierrorcode_string.go +++ b/cmd/apierrorcode_string.go @@ -179,135 +179,136 @@ func _() { _ = x[ErrAdminNoSuchUser-168] _ = x[ErrAdminNoSuchGroup-169] _ = x[ErrAdminGroupNotEmpty-170] - _ = x[ErrAdminNoSuchPolicy-171] - _ = x[ErrAdminInvalidArgument-172] - _ = x[ErrAdminInvalidAccessKey-173] - _ = x[ErrAdminInvalidSecretKey-174] - _ = x[ErrAdminConfigNoQuorum-175] - _ = x[ErrAdminConfigTooLarge-176] - _ = x[ErrAdminConfigBadJSON-177] - _ = x[ErrAdminNoSuchConfigTarget-178] - _ = x[ErrAdminConfigEnvOverridden-179] - _ = x[ErrAdminConfigDuplicateKeys-180] - _ = x[ErrAdminCredentialsMismatch-181] - _ = x[ErrInsecureClientRequest-182] - _ = x[ErrObjectTampered-183] - _ = x[ErrSiteReplicationInvalidRequest-184] - _ = x[ErrSiteReplicationPeerResp-185] - _ = x[ErrSiteReplicationBackendIssue-186] - _ = x[ErrSiteReplicationServiceAccountError-187] - _ = x[ErrSiteReplicationBucketConfigError-188] - _ = x[ErrSiteReplicationBucketMetaError-189] - _ = x[ErrSiteReplicationIAMError-190] - _ = x[ErrSiteReplicationConfigMissing-191] - _ = x[ErrAdminBucketQuotaExceeded-192] - _ = x[ErrAdminNoSuchQuotaConfiguration-193] - _ = x[ErrHealNotImplemented-194] - _ = x[ErrHealNoSuchProcess-195] - _ = x[ErrHealInvalidClientToken-196] - _ = x[ErrHealMissingBucket-197] - _ = x[ErrHealAlreadyRunning-198] - _ = x[ErrHealOverlappingPaths-199] - _ = x[ErrIncorrectContinuationToken-200] - _ = x[ErrEmptyRequestBody-201] - _ = x[ErrUnsupportedFunction-202] - _ = x[ErrInvalidExpressionType-203] - _ = x[ErrBusy-204] - _ = x[ErrUnauthorizedAccess-205] - _ = x[ErrExpressionTooLong-206] - _ = x[ErrIllegalSQLFunctionArgument-207] - _ = x[ErrInvalidKeyPath-208] - _ = x[ErrInvalidCompressionFormat-209] - _ = x[ErrInvalidFileHeaderInfo-210] - _ = x[ErrInvalidJSONType-211] - _ = x[ErrInvalidQuoteFields-212] - _ = x[ErrInvalidRequestParameter-213] - _ = x[ErrInvalidDataType-214] - _ = x[ErrInvalidTextEncoding-215] - _ = x[ErrInvalidDataSource-216] - _ = x[ErrInvalidTableAlias-217] - _ = x[ErrMissingRequiredParameter-218] - _ = x[ErrObjectSerializationConflict-219] - _ = x[ErrUnsupportedSQLOperation-220] - _ = x[ErrUnsupportedSQLStructure-221] - _ = x[ErrUnsupportedSyntax-222] - _ = x[ErrUnsupportedRangeHeader-223] - _ = x[ErrLexerInvalidChar-224] - _ = x[ErrLexerInvalidOperator-225] - _ = x[ErrLexerInvalidLiteral-226] - _ = x[ErrLexerInvalidIONLiteral-227] - _ = x[ErrParseExpectedDatePart-228] - _ = x[ErrParseExpectedKeyword-229] - _ = x[ErrParseExpectedTokenType-230] - _ = x[ErrParseExpected2TokenTypes-231] - _ = x[ErrParseExpectedNumber-232] - _ = x[ErrParseExpectedRightParenBuiltinFunctionCall-233] - _ = x[ErrParseExpectedTypeName-234] - _ = x[ErrParseExpectedWhenClause-235] - _ = x[ErrParseUnsupportedToken-236] - _ = x[ErrParseUnsupportedLiteralsGroupBy-237] - _ = x[ErrParseExpectedMember-238] - _ = x[ErrParseUnsupportedSelect-239] - _ = x[ErrParseUnsupportedCase-240] - _ = x[ErrParseUnsupportedCaseClause-241] - _ = x[ErrParseUnsupportedAlias-242] - _ = x[ErrParseUnsupportedSyntax-243] - _ = x[ErrParseUnknownOperator-244] - _ = x[ErrParseMissingIdentAfterAt-245] - _ = x[ErrParseUnexpectedOperator-246] - _ = x[ErrParseUnexpectedTerm-247] - _ = x[ErrParseUnexpectedToken-248] - _ = x[ErrParseUnexpectedKeyword-249] - _ = x[ErrParseExpectedExpression-250] - _ = x[ErrParseExpectedLeftParenAfterCast-251] - _ = x[ErrParseExpectedLeftParenValueConstructor-252] - _ = x[ErrParseExpectedLeftParenBuiltinFunctionCall-253] - _ = x[ErrParseExpectedArgumentDelimiter-254] - _ = x[ErrParseCastArity-255] - _ = x[ErrParseInvalidTypeParam-256] - _ = x[ErrParseEmptySelect-257] - _ = x[ErrParseSelectMissingFrom-258] - _ = x[ErrParseExpectedIdentForGroupName-259] - _ = x[ErrParseExpectedIdentForAlias-260] - _ = x[ErrParseUnsupportedCallWithStar-261] - _ = x[ErrParseNonUnaryAgregateFunctionCall-262] - _ = x[ErrParseMalformedJoin-263] - _ = x[ErrParseExpectedIdentForAt-264] - _ = x[ErrParseAsteriskIsNotAloneInSelectList-265] - _ = x[ErrParseCannotMixSqbAndWildcardInSelectList-266] - _ = x[ErrParseInvalidContextForWildcardInSelectList-267] - _ = x[ErrIncorrectSQLFunctionArgumentType-268] - _ = x[ErrValueParseFailure-269] - _ = x[ErrEvaluatorInvalidArguments-270] - _ = x[ErrIntegerOverflow-271] - _ = x[ErrLikeInvalidInputs-272] - _ = x[ErrCastFailed-273] - _ = x[ErrInvalidCast-274] - _ = x[ErrEvaluatorInvalidTimestampFormatPattern-275] - _ = x[ErrEvaluatorInvalidTimestampFormatPatternSymbolForParsing-276] - _ = x[ErrEvaluatorTimestampFormatPatternDuplicateFields-277] - _ = x[ErrEvaluatorTimestampFormatPatternHourClockAmPmMismatch-278] - _ = x[ErrEvaluatorUnterminatedTimestampFormatPatternToken-279] - _ = x[ErrEvaluatorInvalidTimestampFormatPatternToken-280] - _ = x[ErrEvaluatorInvalidTimestampFormatPatternSymbol-281] - _ = x[ErrEvaluatorBindingDoesNotExist-282] - _ = x[ErrMissingHeaders-283] - _ = x[ErrInvalidColumnIndex-284] - _ = x[ErrAdminConfigNotificationTargetsFailed-285] - _ = x[ErrAdminProfilerNotEnabled-286] - _ = x[ErrInvalidDecompressedSize-287] - _ = x[ErrAddUserInvalidArgument-288] - _ = x[ErrAdminResourceInvalidArgument-289] - _ = x[ErrAdminAccountNotEligible-290] - _ = x[ErrAccountNotEligible-291] - _ = x[ErrAdminServiceAccountNotFound-292] - _ = x[ErrPostPolicyConditionInvalidFormat-293] - _ = x[ErrInvalidChecksum-294] + _ = x[ErrAdminNoSuchJob-171] + _ = x[ErrAdminNoSuchPolicy-172] + _ = x[ErrAdminInvalidArgument-173] + _ = x[ErrAdminInvalidAccessKey-174] + _ = x[ErrAdminInvalidSecretKey-175] + _ = x[ErrAdminConfigNoQuorum-176] + _ = x[ErrAdminConfigTooLarge-177] + _ = x[ErrAdminConfigBadJSON-178] + _ = x[ErrAdminNoSuchConfigTarget-179] + _ = x[ErrAdminConfigEnvOverridden-180] + _ = x[ErrAdminConfigDuplicateKeys-181] + _ = x[ErrAdminCredentialsMismatch-182] + _ = x[ErrInsecureClientRequest-183] + _ = x[ErrObjectTampered-184] + _ = x[ErrSiteReplicationInvalidRequest-185] + _ = x[ErrSiteReplicationPeerResp-186] + _ = x[ErrSiteReplicationBackendIssue-187] + _ = x[ErrSiteReplicationServiceAccountError-188] + _ = x[ErrSiteReplicationBucketConfigError-189] + _ = x[ErrSiteReplicationBucketMetaError-190] + _ = x[ErrSiteReplicationIAMError-191] + _ = x[ErrSiteReplicationConfigMissing-192] + _ = x[ErrAdminBucketQuotaExceeded-193] + _ = x[ErrAdminNoSuchQuotaConfiguration-194] + _ = x[ErrHealNotImplemented-195] + _ = x[ErrHealNoSuchProcess-196] + _ = x[ErrHealInvalidClientToken-197] + _ = x[ErrHealMissingBucket-198] + _ = x[ErrHealAlreadyRunning-199] + _ = x[ErrHealOverlappingPaths-200] + _ = x[ErrIncorrectContinuationToken-201] + _ = x[ErrEmptyRequestBody-202] + _ = x[ErrUnsupportedFunction-203] + _ = x[ErrInvalidExpressionType-204] + _ = x[ErrBusy-205] + _ = x[ErrUnauthorizedAccess-206] + _ = x[ErrExpressionTooLong-207] + _ = x[ErrIllegalSQLFunctionArgument-208] + _ = x[ErrInvalidKeyPath-209] + _ = x[ErrInvalidCompressionFormat-210] + _ = x[ErrInvalidFileHeaderInfo-211] + _ = x[ErrInvalidJSONType-212] + _ = x[ErrInvalidQuoteFields-213] + _ = x[ErrInvalidRequestParameter-214] + _ = x[ErrInvalidDataType-215] + _ = x[ErrInvalidTextEncoding-216] + _ = x[ErrInvalidDataSource-217] + _ = x[ErrInvalidTableAlias-218] + _ = x[ErrMissingRequiredParameter-219] + _ = x[ErrObjectSerializationConflict-220] + _ = x[ErrUnsupportedSQLOperation-221] + _ = x[ErrUnsupportedSQLStructure-222] + _ = x[ErrUnsupportedSyntax-223] + _ = x[ErrUnsupportedRangeHeader-224] + _ = x[ErrLexerInvalidChar-225] + _ = x[ErrLexerInvalidOperator-226] + _ = x[ErrLexerInvalidLiteral-227] + _ = x[ErrLexerInvalidIONLiteral-228] + _ = x[ErrParseExpectedDatePart-229] + _ = x[ErrParseExpectedKeyword-230] + _ = x[ErrParseExpectedTokenType-231] + _ = x[ErrParseExpected2TokenTypes-232] + _ = x[ErrParseExpectedNumber-233] + _ = x[ErrParseExpectedRightParenBuiltinFunctionCall-234] + _ = x[ErrParseExpectedTypeName-235] + _ = x[ErrParseExpectedWhenClause-236] + _ = x[ErrParseUnsupportedToken-237] + _ = x[ErrParseUnsupportedLiteralsGroupBy-238] + _ = x[ErrParseExpectedMember-239] + _ = x[ErrParseUnsupportedSelect-240] + _ = x[ErrParseUnsupportedCase-241] + _ = x[ErrParseUnsupportedCaseClause-242] + _ = x[ErrParseUnsupportedAlias-243] + _ = x[ErrParseUnsupportedSyntax-244] + _ = x[ErrParseUnknownOperator-245] + _ = x[ErrParseMissingIdentAfterAt-246] + _ = x[ErrParseUnexpectedOperator-247] + _ = x[ErrParseUnexpectedTerm-248] + _ = x[ErrParseUnexpectedToken-249] + _ = x[ErrParseUnexpectedKeyword-250] + _ = x[ErrParseExpectedExpression-251] + _ = x[ErrParseExpectedLeftParenAfterCast-252] + _ = x[ErrParseExpectedLeftParenValueConstructor-253] + _ = x[ErrParseExpectedLeftParenBuiltinFunctionCall-254] + _ = x[ErrParseExpectedArgumentDelimiter-255] + _ = x[ErrParseCastArity-256] + _ = x[ErrParseInvalidTypeParam-257] + _ = x[ErrParseEmptySelect-258] + _ = x[ErrParseSelectMissingFrom-259] + _ = x[ErrParseExpectedIdentForGroupName-260] + _ = x[ErrParseExpectedIdentForAlias-261] + _ = x[ErrParseUnsupportedCallWithStar-262] + _ = x[ErrParseNonUnaryAgregateFunctionCall-263] + _ = x[ErrParseMalformedJoin-264] + _ = x[ErrParseExpectedIdentForAt-265] + _ = x[ErrParseAsteriskIsNotAloneInSelectList-266] + _ = x[ErrParseCannotMixSqbAndWildcardInSelectList-267] + _ = x[ErrParseInvalidContextForWildcardInSelectList-268] + _ = x[ErrIncorrectSQLFunctionArgumentType-269] + _ = x[ErrValueParseFailure-270] + _ = x[ErrEvaluatorInvalidArguments-271] + _ = x[ErrIntegerOverflow-272] + _ = x[ErrLikeInvalidInputs-273] + _ = x[ErrCastFailed-274] + _ = x[ErrInvalidCast-275] + _ = x[ErrEvaluatorInvalidTimestampFormatPattern-276] + _ = x[ErrEvaluatorInvalidTimestampFormatPatternSymbolForParsing-277] + _ = x[ErrEvaluatorTimestampFormatPatternDuplicateFields-278] + _ = x[ErrEvaluatorTimestampFormatPatternHourClockAmPmMismatch-279] + _ = x[ErrEvaluatorUnterminatedTimestampFormatPatternToken-280] + _ = x[ErrEvaluatorInvalidTimestampFormatPatternToken-281] + _ = x[ErrEvaluatorInvalidTimestampFormatPatternSymbol-282] + _ = x[ErrEvaluatorBindingDoesNotExist-283] + _ = x[ErrMissingHeaders-284] + _ = x[ErrInvalidColumnIndex-285] + _ = x[ErrAdminConfigNotificationTargetsFailed-286] + _ = x[ErrAdminProfilerNotEnabled-287] + _ = x[ErrInvalidDecompressedSize-288] + _ = x[ErrAddUserInvalidArgument-289] + _ = x[ErrAdminResourceInvalidArgument-290] + _ = x[ErrAdminAccountNotEligible-291] + _ = x[ErrAccountNotEligible-292] + _ = x[ErrAdminServiceAccountNotFound-293] + _ = x[ErrPostPolicyConditionInvalidFormat-294] + _ = x[ErrInvalidChecksum-295] } -const _APIErrorCode_name = "NoneAccessDeniedBadDigestEntityTooSmallEntityTooLargePolicyTooLargeIncompleteBodyInternalErrorInvalidAccessKeyIDAccessKeyDisabledInvalidBucketNameInvalidDigestInvalidRangeInvalidRangePartNumberInvalidCopyPartRangeInvalidCopyPartRangeSourceInvalidMaxKeysInvalidEncodingMethodInvalidMaxUploadsInvalidMaxPartsInvalidPartNumberMarkerInvalidPartNumberInvalidRequestBodyInvalidCopySourceInvalidMetadataDirectiveInvalidCopyDestInvalidPolicyDocumentInvalidObjectStateMalformedXMLMissingContentLengthMissingContentMD5MissingRequestBodyErrorMissingSecurityHeaderNoSuchBucketNoSuchBucketPolicyNoSuchBucketLifecycleNoSuchLifecycleConfigurationInvalidLifecycleWithObjectLockNoSuchBucketSSEConfigNoSuchCORSConfigurationNoSuchWebsiteConfigurationReplicationConfigurationNotFoundErrorRemoteDestinationNotFoundErrorReplicationDestinationMissingLockRemoteTargetNotFoundErrorReplicationRemoteConnectionErrorReplicationBandwidthLimitErrorBucketRemoteIdenticalToSourceBucketRemoteAlreadyExistsBucketRemoteLabelInUseBucketRemoteArnTypeInvalidBucketRemoteArnInvalidBucketRemoteRemoveDisallowedRemoteTargetNotVersionedErrorReplicationSourceNotVersionedErrorReplicationNeedsVersioningErrorReplicationBucketNeedsVersioningErrorReplicationDenyEditErrorReplicationNoExistingObjectsObjectRestoreAlreadyInProgressNoSuchKeyNoSuchUploadInvalidVersionIDNoSuchVersionNotImplementedPreconditionFailedRequestTimeTooSkewedSignatureDoesNotMatchMethodNotAllowedInvalidPartInvalidPartOrderAuthorizationHeaderMalformedMalformedPOSTRequestPOSTFileRequiredSignatureVersionNotSupportedBucketNotEmptyAllAccessDisabledMalformedPolicyMissingFieldsMissingCredTagCredMalformedInvalidRegionInvalidServiceS3InvalidServiceSTSInvalidRequestVersionMissingSignTagMissingSignHeadersTagMalformedDateMalformedPresignedDateMalformedCredentialDateMalformedCredentialRegionMalformedExpiresNegativeExpiresAuthHeaderEmptyExpiredPresignRequestRequestNotReadyYetUnsignedHeadersMissingDateHeaderInvalidQuerySignatureAlgoInvalidQueryParamsBucketAlreadyOwnedByYouInvalidDurationBucketAlreadyExistsTooManyBucketsMetadataTooLargeUnsupportedMetadataMaximumExpiresSlowDownInvalidPrefixMarkerBadRequestKeyTooLongErrorInvalidBucketObjectLockConfigurationObjectLockConfigurationNotFoundObjectLockConfigurationNotAllowedNoSuchObjectLockConfigurationObjectLockedInvalidRetentionDatePastObjectLockRetainDateUnknownWORMModeDirectiveBucketTaggingNotFoundObjectLockInvalidHeadersInvalidTagDirectiveInvalidEncryptionMethodInvalidEncryptionKeyIDInsecureSSECustomerRequestSSEMultipartEncryptedSSEEncryptedObjectInvalidEncryptionParametersInvalidSSECustomerAlgorithmInvalidSSECustomerKeyMissingSSECustomerKeyMissingSSECustomerKeyMD5SSECustomerKeyMD5MismatchInvalidSSECustomerParametersIncompatibleEncryptionMethodKMSNotConfiguredKMSKeyNotFoundExceptionNoAccessKeyInvalidTokenEventNotificationARNNotificationRegionNotificationOverlappingFilterNotificationFilterNameInvalidFilterNamePrefixFilterNameSuffixFilterValueInvalidOverlappingConfigsUnsupportedNotificationContentSHA256MismatchContentChecksumMismatchReadQuorumWriteQuorumStorageFullRequestBodyParseObjectExistsAsDirectoryInvalidObjectNameInvalidObjectNamePrefixSlashInvalidResourceNameServerNotInitializedOperationTimedOutClientDisconnectedOperationMaxedOutInvalidRequestTransitionStorageClassNotFoundErrorInvalidStorageClassBackendDownMalformedJSONAdminNoSuchUserAdminNoSuchGroupAdminGroupNotEmptyAdminNoSuchPolicyAdminInvalidArgumentAdminInvalidAccessKeyAdminInvalidSecretKeyAdminConfigNoQuorumAdminConfigTooLargeAdminConfigBadJSONAdminNoSuchConfigTargetAdminConfigEnvOverriddenAdminConfigDuplicateKeysAdminCredentialsMismatchInsecureClientRequestObjectTamperedSiteReplicationInvalidRequestSiteReplicationPeerRespSiteReplicationBackendIssueSiteReplicationServiceAccountErrorSiteReplicationBucketConfigErrorSiteReplicationBucketMetaErrorSiteReplicationIAMErrorSiteReplicationConfigMissingAdminBucketQuotaExceededAdminNoSuchQuotaConfigurationHealNotImplementedHealNoSuchProcessHealInvalidClientTokenHealMissingBucketHealAlreadyRunningHealOverlappingPathsIncorrectContinuationTokenEmptyRequestBodyUnsupportedFunctionInvalidExpressionTypeBusyUnauthorizedAccessExpressionTooLongIllegalSQLFunctionArgumentInvalidKeyPathInvalidCompressionFormatInvalidFileHeaderInfoInvalidJSONTypeInvalidQuoteFieldsInvalidRequestParameterInvalidDataTypeInvalidTextEncodingInvalidDataSourceInvalidTableAliasMissingRequiredParameterObjectSerializationConflictUnsupportedSQLOperationUnsupportedSQLStructureUnsupportedSyntaxUnsupportedRangeHeaderLexerInvalidCharLexerInvalidOperatorLexerInvalidLiteralLexerInvalidIONLiteralParseExpectedDatePartParseExpectedKeywordParseExpectedTokenTypeParseExpected2TokenTypesParseExpectedNumberParseExpectedRightParenBuiltinFunctionCallParseExpectedTypeNameParseExpectedWhenClauseParseUnsupportedTokenParseUnsupportedLiteralsGroupByParseExpectedMemberParseUnsupportedSelectParseUnsupportedCaseParseUnsupportedCaseClauseParseUnsupportedAliasParseUnsupportedSyntaxParseUnknownOperatorParseMissingIdentAfterAtParseUnexpectedOperatorParseUnexpectedTermParseUnexpectedTokenParseUnexpectedKeywordParseExpectedExpressionParseExpectedLeftParenAfterCastParseExpectedLeftParenValueConstructorParseExpectedLeftParenBuiltinFunctionCallParseExpectedArgumentDelimiterParseCastArityParseInvalidTypeParamParseEmptySelectParseSelectMissingFromParseExpectedIdentForGroupNameParseExpectedIdentForAliasParseUnsupportedCallWithStarParseNonUnaryAgregateFunctionCallParseMalformedJoinParseExpectedIdentForAtParseAsteriskIsNotAloneInSelectListParseCannotMixSqbAndWildcardInSelectListParseInvalidContextForWildcardInSelectListIncorrectSQLFunctionArgumentTypeValueParseFailureEvaluatorInvalidArgumentsIntegerOverflowLikeInvalidInputsCastFailedInvalidCastEvaluatorInvalidTimestampFormatPatternEvaluatorInvalidTimestampFormatPatternSymbolForParsingEvaluatorTimestampFormatPatternDuplicateFieldsEvaluatorTimestampFormatPatternHourClockAmPmMismatchEvaluatorUnterminatedTimestampFormatPatternTokenEvaluatorInvalidTimestampFormatPatternTokenEvaluatorInvalidTimestampFormatPatternSymbolEvaluatorBindingDoesNotExistMissingHeadersInvalidColumnIndexAdminConfigNotificationTargetsFailedAdminProfilerNotEnabledInvalidDecompressedSizeAddUserInvalidArgumentAdminResourceInvalidArgumentAdminAccountNotEligibleAccountNotEligibleAdminServiceAccountNotFoundPostPolicyConditionInvalidFormatInvalidChecksum" +const _APIErrorCode_name = "NoneAccessDeniedBadDigestEntityTooSmallEntityTooLargePolicyTooLargeIncompleteBodyInternalErrorInvalidAccessKeyIDAccessKeyDisabledInvalidBucketNameInvalidDigestInvalidRangeInvalidRangePartNumberInvalidCopyPartRangeInvalidCopyPartRangeSourceInvalidMaxKeysInvalidEncodingMethodInvalidMaxUploadsInvalidMaxPartsInvalidPartNumberMarkerInvalidPartNumberInvalidRequestBodyInvalidCopySourceInvalidMetadataDirectiveInvalidCopyDestInvalidPolicyDocumentInvalidObjectStateMalformedXMLMissingContentLengthMissingContentMD5MissingRequestBodyErrorMissingSecurityHeaderNoSuchBucketNoSuchBucketPolicyNoSuchBucketLifecycleNoSuchLifecycleConfigurationInvalidLifecycleWithObjectLockNoSuchBucketSSEConfigNoSuchCORSConfigurationNoSuchWebsiteConfigurationReplicationConfigurationNotFoundErrorRemoteDestinationNotFoundErrorReplicationDestinationMissingLockRemoteTargetNotFoundErrorReplicationRemoteConnectionErrorReplicationBandwidthLimitErrorBucketRemoteIdenticalToSourceBucketRemoteAlreadyExistsBucketRemoteLabelInUseBucketRemoteArnTypeInvalidBucketRemoteArnInvalidBucketRemoteRemoveDisallowedRemoteTargetNotVersionedErrorReplicationSourceNotVersionedErrorReplicationNeedsVersioningErrorReplicationBucketNeedsVersioningErrorReplicationDenyEditErrorReplicationNoExistingObjectsObjectRestoreAlreadyInProgressNoSuchKeyNoSuchUploadInvalidVersionIDNoSuchVersionNotImplementedPreconditionFailedRequestTimeTooSkewedSignatureDoesNotMatchMethodNotAllowedInvalidPartInvalidPartOrderAuthorizationHeaderMalformedMalformedPOSTRequestPOSTFileRequiredSignatureVersionNotSupportedBucketNotEmptyAllAccessDisabledMalformedPolicyMissingFieldsMissingCredTagCredMalformedInvalidRegionInvalidServiceS3InvalidServiceSTSInvalidRequestVersionMissingSignTagMissingSignHeadersTagMalformedDateMalformedPresignedDateMalformedCredentialDateMalformedCredentialRegionMalformedExpiresNegativeExpiresAuthHeaderEmptyExpiredPresignRequestRequestNotReadyYetUnsignedHeadersMissingDateHeaderInvalidQuerySignatureAlgoInvalidQueryParamsBucketAlreadyOwnedByYouInvalidDurationBucketAlreadyExistsTooManyBucketsMetadataTooLargeUnsupportedMetadataMaximumExpiresSlowDownInvalidPrefixMarkerBadRequestKeyTooLongErrorInvalidBucketObjectLockConfigurationObjectLockConfigurationNotFoundObjectLockConfigurationNotAllowedNoSuchObjectLockConfigurationObjectLockedInvalidRetentionDatePastObjectLockRetainDateUnknownWORMModeDirectiveBucketTaggingNotFoundObjectLockInvalidHeadersInvalidTagDirectiveInvalidEncryptionMethodInvalidEncryptionKeyIDInsecureSSECustomerRequestSSEMultipartEncryptedSSEEncryptedObjectInvalidEncryptionParametersInvalidSSECustomerAlgorithmInvalidSSECustomerKeyMissingSSECustomerKeyMissingSSECustomerKeyMD5SSECustomerKeyMD5MismatchInvalidSSECustomerParametersIncompatibleEncryptionMethodKMSNotConfiguredKMSKeyNotFoundExceptionNoAccessKeyInvalidTokenEventNotificationARNNotificationRegionNotificationOverlappingFilterNotificationFilterNameInvalidFilterNamePrefixFilterNameSuffixFilterValueInvalidOverlappingConfigsUnsupportedNotificationContentSHA256MismatchContentChecksumMismatchReadQuorumWriteQuorumStorageFullRequestBodyParseObjectExistsAsDirectoryInvalidObjectNameInvalidObjectNamePrefixSlashInvalidResourceNameServerNotInitializedOperationTimedOutClientDisconnectedOperationMaxedOutInvalidRequestTransitionStorageClassNotFoundErrorInvalidStorageClassBackendDownMalformedJSONAdminNoSuchUserAdminNoSuchGroupAdminGroupNotEmptyAdminNoSuchJobAdminNoSuchPolicyAdminInvalidArgumentAdminInvalidAccessKeyAdminInvalidSecretKeyAdminConfigNoQuorumAdminConfigTooLargeAdminConfigBadJSONAdminNoSuchConfigTargetAdminConfigEnvOverriddenAdminConfigDuplicateKeysAdminCredentialsMismatchInsecureClientRequestObjectTamperedSiteReplicationInvalidRequestSiteReplicationPeerRespSiteReplicationBackendIssueSiteReplicationServiceAccountErrorSiteReplicationBucketConfigErrorSiteReplicationBucketMetaErrorSiteReplicationIAMErrorSiteReplicationConfigMissingAdminBucketQuotaExceededAdminNoSuchQuotaConfigurationHealNotImplementedHealNoSuchProcessHealInvalidClientTokenHealMissingBucketHealAlreadyRunningHealOverlappingPathsIncorrectContinuationTokenEmptyRequestBodyUnsupportedFunctionInvalidExpressionTypeBusyUnauthorizedAccessExpressionTooLongIllegalSQLFunctionArgumentInvalidKeyPathInvalidCompressionFormatInvalidFileHeaderInfoInvalidJSONTypeInvalidQuoteFieldsInvalidRequestParameterInvalidDataTypeInvalidTextEncodingInvalidDataSourceInvalidTableAliasMissingRequiredParameterObjectSerializationConflictUnsupportedSQLOperationUnsupportedSQLStructureUnsupportedSyntaxUnsupportedRangeHeaderLexerInvalidCharLexerInvalidOperatorLexerInvalidLiteralLexerInvalidIONLiteralParseExpectedDatePartParseExpectedKeywordParseExpectedTokenTypeParseExpected2TokenTypesParseExpectedNumberParseExpectedRightParenBuiltinFunctionCallParseExpectedTypeNameParseExpectedWhenClauseParseUnsupportedTokenParseUnsupportedLiteralsGroupByParseExpectedMemberParseUnsupportedSelectParseUnsupportedCaseParseUnsupportedCaseClauseParseUnsupportedAliasParseUnsupportedSyntaxParseUnknownOperatorParseMissingIdentAfterAtParseUnexpectedOperatorParseUnexpectedTermParseUnexpectedTokenParseUnexpectedKeywordParseExpectedExpressionParseExpectedLeftParenAfterCastParseExpectedLeftParenValueConstructorParseExpectedLeftParenBuiltinFunctionCallParseExpectedArgumentDelimiterParseCastArityParseInvalidTypeParamParseEmptySelectParseSelectMissingFromParseExpectedIdentForGroupNameParseExpectedIdentForAliasParseUnsupportedCallWithStarParseNonUnaryAgregateFunctionCallParseMalformedJoinParseExpectedIdentForAtParseAsteriskIsNotAloneInSelectListParseCannotMixSqbAndWildcardInSelectListParseInvalidContextForWildcardInSelectListIncorrectSQLFunctionArgumentTypeValueParseFailureEvaluatorInvalidArgumentsIntegerOverflowLikeInvalidInputsCastFailedInvalidCastEvaluatorInvalidTimestampFormatPatternEvaluatorInvalidTimestampFormatPatternSymbolForParsingEvaluatorTimestampFormatPatternDuplicateFieldsEvaluatorTimestampFormatPatternHourClockAmPmMismatchEvaluatorUnterminatedTimestampFormatPatternTokenEvaluatorInvalidTimestampFormatPatternTokenEvaluatorInvalidTimestampFormatPatternSymbolEvaluatorBindingDoesNotExistMissingHeadersInvalidColumnIndexAdminConfigNotificationTargetsFailedAdminProfilerNotEnabledInvalidDecompressedSizeAddUserInvalidArgumentAdminResourceInvalidArgumentAdminAccountNotEligibleAccountNotEligibleAdminServiceAccountNotFoundPostPolicyConditionInvalidFormatInvalidChecksum" -var _APIErrorCode_index = [...]uint16{0, 4, 16, 25, 39, 53, 67, 81, 94, 112, 129, 146, 159, 171, 193, 213, 239, 253, 274, 291, 306, 329, 346, 364, 381, 405, 420, 441, 459, 471, 491, 508, 531, 552, 564, 582, 603, 631, 661, 682, 705, 731, 768, 798, 831, 856, 888, 918, 947, 972, 994, 1020, 1042, 1070, 1099, 1133, 1164, 1201, 1225, 1253, 1283, 1292, 1304, 1320, 1333, 1347, 1365, 1385, 1406, 1422, 1433, 1449, 1477, 1497, 1513, 1541, 1555, 1572, 1587, 1600, 1614, 1627, 1640, 1656, 1673, 1694, 1708, 1729, 1742, 1764, 1787, 1812, 1828, 1843, 1858, 1879, 1897, 1912, 1929, 1954, 1972, 1995, 2010, 2029, 2043, 2059, 2078, 2092, 2100, 2119, 2129, 2144, 2180, 2211, 2244, 2273, 2285, 2305, 2329, 2353, 2374, 2398, 2417, 2440, 2462, 2488, 2509, 2527, 2554, 2581, 2602, 2623, 2647, 2672, 2700, 2728, 2744, 2767, 2778, 2790, 2807, 2822, 2840, 2869, 2886, 2902, 2918, 2936, 2954, 2977, 2998, 3021, 3031, 3042, 3053, 3069, 3092, 3109, 3137, 3156, 3176, 3193, 3211, 3228, 3242, 3277, 3296, 3307, 3320, 3335, 3351, 3369, 3386, 3406, 3427, 3448, 3467, 3486, 3504, 3527, 3551, 3575, 3599, 3620, 3634, 3663, 3686, 3713, 3747, 3779, 3809, 3832, 3860, 3884, 3913, 3931, 3948, 3970, 3987, 4005, 4025, 4051, 4067, 4086, 4107, 4111, 4129, 4146, 4172, 4186, 4210, 4231, 4246, 4264, 4287, 4302, 4321, 4338, 4355, 4379, 4406, 4429, 4452, 4469, 4491, 4507, 4527, 4546, 4568, 4589, 4609, 4631, 4655, 4674, 4716, 4737, 4760, 4781, 4812, 4831, 4853, 4873, 4899, 4920, 4942, 4962, 4986, 5009, 5028, 5048, 5070, 5093, 5124, 5162, 5203, 5233, 5247, 5268, 5284, 5306, 5336, 5362, 5390, 5423, 5441, 5464, 5499, 5539, 5581, 5613, 5630, 5655, 5670, 5687, 5697, 5708, 5746, 5800, 5846, 5898, 5946, 5989, 6033, 6061, 6075, 6093, 6129, 6152, 6175, 6197, 6225, 6248, 6266, 6293, 6325, 6340} +var _APIErrorCode_index = [...]uint16{0, 4, 16, 25, 39, 53, 67, 81, 94, 112, 129, 146, 159, 171, 193, 213, 239, 253, 274, 291, 306, 329, 346, 364, 381, 405, 420, 441, 459, 471, 491, 508, 531, 552, 564, 582, 603, 631, 661, 682, 705, 731, 768, 798, 831, 856, 888, 918, 947, 972, 994, 1020, 1042, 1070, 1099, 1133, 1164, 1201, 1225, 1253, 1283, 1292, 1304, 1320, 1333, 1347, 1365, 1385, 1406, 1422, 1433, 1449, 1477, 1497, 1513, 1541, 1555, 1572, 1587, 1600, 1614, 1627, 1640, 1656, 1673, 1694, 1708, 1729, 1742, 1764, 1787, 1812, 1828, 1843, 1858, 1879, 1897, 1912, 1929, 1954, 1972, 1995, 2010, 2029, 2043, 2059, 2078, 2092, 2100, 2119, 2129, 2144, 2180, 2211, 2244, 2273, 2285, 2305, 2329, 2353, 2374, 2398, 2417, 2440, 2462, 2488, 2509, 2527, 2554, 2581, 2602, 2623, 2647, 2672, 2700, 2728, 2744, 2767, 2778, 2790, 2807, 2822, 2840, 2869, 2886, 2902, 2918, 2936, 2954, 2977, 2998, 3021, 3031, 3042, 3053, 3069, 3092, 3109, 3137, 3156, 3176, 3193, 3211, 3228, 3242, 3277, 3296, 3307, 3320, 3335, 3351, 3369, 3383, 3400, 3420, 3441, 3462, 3481, 3500, 3518, 3541, 3565, 3589, 3613, 3634, 3648, 3677, 3700, 3727, 3761, 3793, 3823, 3846, 3874, 3898, 3927, 3945, 3962, 3984, 4001, 4019, 4039, 4065, 4081, 4100, 4121, 4125, 4143, 4160, 4186, 4200, 4224, 4245, 4260, 4278, 4301, 4316, 4335, 4352, 4369, 4393, 4420, 4443, 4466, 4483, 4505, 4521, 4541, 4560, 4582, 4603, 4623, 4645, 4669, 4688, 4730, 4751, 4774, 4795, 4826, 4845, 4867, 4887, 4913, 4934, 4956, 4976, 5000, 5023, 5042, 5062, 5084, 5107, 5138, 5176, 5217, 5247, 5261, 5282, 5298, 5320, 5350, 5376, 5404, 5437, 5455, 5478, 5513, 5553, 5595, 5627, 5644, 5669, 5684, 5701, 5711, 5722, 5760, 5814, 5860, 5912, 5960, 6003, 6047, 6075, 6089, 6107, 6143, 6166, 6189, 6211, 6239, 6262, 6280, 6307, 6339, 6354} func (i APIErrorCode) String() string { if i < 0 || i >= APIErrorCode(len(_APIErrorCode_index)-1) { diff --git a/cmd/batch-handlers.go b/cmd/batch-handlers.go new file mode 100644 index 000000000..9ce973a25 --- /dev/null +++ b/cmd/batch-handlers.go @@ -0,0 +1,1079 @@ +// Copyright (c) 2015-2022 MinIO, Inc. +// +// This file is part of MinIO Object Storage stack +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with this program. If not, see . + +package cmd + +import ( + "bytes" + "context" + "encoding/binary" + "encoding/json" + "errors" + "fmt" + "io" + "math/rand" + "net/http" + "net/url" + "sync" + "time" + + "github.com/dustin/go-humanize" + "github.com/lithammer/shortuuid/v4" + "github.com/minio/madmin-go" + miniogo "github.com/minio/minio-go/v7" + "github.com/minio/minio-go/v7/pkg/credentials" + "github.com/minio/minio/internal/auth" + xhttp "github.com/minio/minio/internal/http" + "github.com/minio/minio/internal/logger" + "github.com/minio/pkg/console" + "github.com/minio/pkg/wildcard" + "gopkg.in/yaml.v2" +) + +// replicate: +// # source of the objects to be replicated +// source: +// type: "minio" +// bucket: "testbucket" +// prefix: "spark/" +// +// # optional flags based filtering criteria +// # for source objects +// flags: +// filter: +// newerThan: "7d" +// olderThan: "7d" +// createdAfter: "date" +// createdBefore: "date" +// tags: +// - key: "name" +// value: "value*" +// metadata: +// - key: "content-type" +// value: "image/*" +// notify: +// endpoint: "https://splunk-hec.dev.com" +// token: "Splunk ..." # e.g. "Bearer token" +// +// # target where the objects must be replicated +// target: +// type: "minio" +// bucket: "testbucket1" +// endpoint: "https://play.min.io" +// credentials: +// accessKey: "minioadmin" +// secretKey: "minioadmin" +// sessionToken: "" + +// BatchJobReplicateKV is a datatype that holds key and values for filtering of objects +// used by metadata filter as well as tags based filtering. +type BatchJobReplicateKV struct { + Key string `yaml:"key" json:"key"` + Value string `yaml:"value" json:"value"` +} + +// Validate returns an error if key is empty +func (kv BatchJobReplicateKV) Validate() error { + if kv.Key == "" { + return errInvalidArgument + } + return nil +} + +// Empty indicates if kv is not set +func (kv BatchJobReplicateKV) Empty() bool { + return kv.Key == "" && kv.Value == "" +} + +// Match matches input kv with kv, value will be wildcard matched depending on the user input +func (kv BatchJobReplicateKV) Match(ikv BatchJobReplicateKV) bool { + if kv.Empty() { + return true + } + if kv.Key == ikv.Key { + return wildcard.Match(kv.Value, ikv.Value) + } + return false +} + +// BatchReplicateRetry datatype represents total retry attempts and delay between each retries. +type BatchReplicateRetry struct { + Attempts int `yaml:"attempts" json:"attempts"` // number of retry attempts + Delay time.Duration `yaml:"delay" json:"delay"` // delay between each retries +} + +// Validate validates input replicate retries. +func (r BatchReplicateRetry) Validate() error { + if r.Attempts < 0 { + return errInvalidArgument + } + + if r.Delay < 0 { + return errInvalidArgument + } + + return nil +} + +// BatchReplicateFilter holds all the filters currently supported for batch replication +type BatchReplicateFilter struct { + NewerThan time.Duration `yaml:"newerThan,omitempty" json:"newerThan"` + OlderThan time.Duration `yaml:"olderThan,omitempty" json:"olderThan"` + CreatedAfter time.Time `yaml:"createdAfter,omitempty" json:"createdAfter"` + CreatedBefore time.Time `yaml:"createdBefore,omitempty" json:"createdBefore"` + Tags []BatchJobReplicateKV `yaml:"tags,omitempty" json:"tags"` + Metadata []BatchJobReplicateKV `yaml:"metadata,omitempty" json:"metadata"` +} + +// BatchReplicateNotification success or failure notification endpoint for each job attempts +type BatchReplicateNotification struct { + Endpoint string `yaml:"endpoint" json:"endpoint"` + Token string `yaml:"token" json:"token"` +} + +// BatchJobReplicateFlags various configurations for replication job definition currently includes +// - filter +// - notify +// - retry +type BatchJobReplicateFlags struct { + Filter BatchReplicateFilter `yaml:"filter" json:"filter"` + Notify BatchReplicateNotification `yaml:"notify" json:"notify"` + Retry BatchReplicateRetry `yaml:"retry" json:"retry"` +} + +// BatchJobReplicateResourceType defines the type of batch jobs +type BatchJobReplicateResourceType string + +// Validate validates if the replicate resource type is recognized and supported +func (t BatchJobReplicateResourceType) Validate() error { + switch t { + case BatchJobReplicateResourceMinIO: + default: + return errInvalidArgument + } + return nil +} + +// Different types of batch jobs.. +const ( + BatchJobReplicateResourceMinIO BatchJobReplicateResourceType = "minio" + // add future targets +) + +// BatchJobReplicateCredentials access credentials for batch replication it may +// be either for target or source. +type BatchJobReplicateCredentials struct { + AccessKey string `xml:"AccessKeyId" json:"accessKey,omitempty" yaml:"accessKey"` + SecretKey string `xml:"SecretAccessKey" json:"secretKey,omitempty" yaml:"secretKey"` + SessionToken string `xml:"SessionToken" json:"sessionToken,omitempty" yaml:"sessionToken"` +} + +// Validate validates if credentials are valid +func (c BatchJobReplicateCredentials) Validate() error { + if !auth.IsAccessKeyValid(c.AccessKey) || !auth.IsSecretKeyValid(c.SecretKey) { + return errInvalidArgument + } + return nil +} + +// BatchJobReplicateTarget describes target element of the replication job that receives +// the filtered data from source +type BatchJobReplicateTarget struct { + Type BatchJobReplicateResourceType `yaml:"type" json:"type"` + Bucket string `yaml:"bucket" json:"bucket"` + Prefix string `yaml:"prefix" json:"prefix"` + Endpoint string `yaml:"endpoint" json:"endpoint"` + Creds BatchJobReplicateCredentials `yaml:"credentials" json:"credentials"` +} + +// BatchJobReplicateSource describes source element of the replication job that is +// the source of the data for the target +type BatchJobReplicateSource struct { + Type BatchJobReplicateResourceType `yaml:"type" json:"type"` + Bucket string `yaml:"bucket" json:"bucket"` + Prefix string `yaml:"prefix" json:"prefix"` + Endpoint string `yaml:"endpoint" json:"endpoint"` + Creds BatchJobReplicateCredentials `yaml:"credentials" json:"credentials"` +} + +// BatchJobReplicateV1 v1 of batch job replication +type BatchJobReplicateV1 struct { + APIVersion string `yaml:"apiVersion" json:"apiVersion"` + Flags BatchJobReplicateFlags `yaml:"flags" json:"flags"` + Target BatchJobReplicateTarget `yaml:"target" json:"target"` + Source BatchJobReplicateSource `yaml:"source" json:"source"` +} + +// BatchJobRequest this is an internal data structure not for external consumption. +type BatchJobRequest struct { + ID string `yaml:"-" json:"name"` + User string `yaml:"-" json:"user"` + Started time.Time `yaml:"-" json:"started"` + Location string `yaml:"-" json:"location"` + Replicate *BatchJobReplicateV1 `yaml:"replicate" json:"replicate"` +} + +// Notify notifies notification endpoint if configured regarding job failure or success. +func (r BatchJobReplicateV1) Notify(ctx context.Context, body io.Reader) error { + if r.Flags.Notify.Endpoint == "" { + return nil + } + + ctx, cancel := context.WithTimeout(ctx, 10*time.Second) + defer cancel() + + req, err := http.NewRequestWithContext(ctx, http.MethodPost, r.Flags.Notify.Endpoint, body) + if err != nil { + return err + } + + if r.Flags.Notify.Token != "" { + req.Header.Set("Authorization", r.Flags.Notify.Token) + } + + clnt := http.Client{Transport: getRemoteInstanceTransport} + resp, err := clnt.Do(req) + if err != nil { + return err + } + + xhttp.DrainBody(resp.Body) + if resp.StatusCode != http.StatusOK { + return errors.New(resp.Status) + } + + return nil +} + +// ReplicateFromSource - this is not implemented yet where source is 'remote' and target is local. +func (r *BatchJobReplicateV1) ReplicateFromSource(ctx context.Context, api ObjectLayer, c *miniogo.Core, srcObject string) error { + return nil +} + +// ReplicateToTarget read from source and replicate to configured target +func (r *BatchJobReplicateV1) ReplicateToTarget(ctx context.Context, api ObjectLayer, c *miniogo.Core, srcObjInfo ObjectInfo, retry bool) error { + srcBucket := r.Source.Bucket + tgtBucket := r.Target.Bucket + srcObject := srcObjInfo.Name + + if retry { // when we are retrying avoid copying if necessary. + gopts := miniogo.GetObjectOptions{} + if err := gopts.SetMatchETag(srcObjInfo.ETag); err != nil { + return err + } + if _, err := c.StatObject(ctx, tgtBucket, srcObject, gopts); err == nil { + return nil + } + } + + opts := ObjectOptions{} + rd, err := api.GetObjectNInfo(ctx, srcBucket, srcObject, nil, http.Header{}, readLock, opts) + if err != nil { + return err + } + defer rd.Close() + objInfo := rd.ObjInfo + + size, err := objInfo.GetActualSize() + if err != nil { + return err + } + + putOpts, err := batchReplicationOpts(ctx, "", objInfo) + if err != nil { + return err + } + + if objInfo.isMultipart() { + if err := replicateObjectWithMultipart(ctx, c, tgtBucket, objInfo.Name, rd, objInfo, putOpts); err != nil { + return err + } + } else { + if _, err = c.PutObject(ctx, tgtBucket, objInfo.Name, rd, size, "", "", putOpts); err != nil { + return err + } + } + return nil +} + +//go:generate msgp -file $GOFILE -unexported + +// batchJobInfo current batch replication information +type batchJobInfo struct { + Version int `json:"-" msg:"v"` + JobID string `json:"jobID" msg:"jid"` + JobType string `json:"jobType" msg:"jt"` + StartTime time.Time `json:"startTime" msg:"st"` + LastUpdate time.Time `json:"lastUpdate" msg:"lu"` + RetryAttempts int `json:"retryAttempts" msg:"ra"` + + Complete bool `json:"complete" msg:"cmp"` + Failed bool `json:"failed" msg:"fld"` + + // Last bucket/object batch replicated + Bucket string `json:"-" msg:"lbkt"` + Object string `json:"-" msg:"lobj"` + + // Verbose information + Objects int64 `json:"objects" msg:"ob"` + ObjectsFailed int64 `json:"objectsFailed" msg:"obf"` + BytesTransferred int64 `json:"bytesTransferred" msg:"bt"` + BytesFailed int64 `json:"bytesFailed" msg:"bf"` +} + +const ( + batchReplName = "batch-replicate.bin" + batchReplFormat = 1 + batchReplVersionV1 = 1 + batchReplVersion = batchReplVersionV1 + batchJobName = "job.bin" + batchJobPrefix = "batch-jobs" + + batchReplJobAPIVersion = "v1" + batchReplJobDefaultRetries = 3 + batchReplJobDefaultRetryDelay = 250 * time.Millisecond +) + +func (ri *batchJobInfo) load(ctx context.Context, api ObjectLayer, job BatchJobRequest) error { + data, err := readConfig(ctx, api, pathJoin(job.Location, batchReplName)) + if err != nil { + if errors.Is(err, errConfigNotFound) || isErrObjectNotFound(err) { + ri.Version = batchReplVersionV1 + if job.Replicate.Flags.Retry.Attempts > 0 { + ri.RetryAttempts = job.Replicate.Flags.Retry.Attempts + } else { + ri.RetryAttempts = batchReplJobDefaultRetries + } + return nil + } + return err + } + if len(data) == 0 { + // Seems to be empty create a new batchRepl object. + return nil + } + if len(data) <= 4 { + return fmt.Errorf("batchRepl: no data") + } + // Read header + switch binary.LittleEndian.Uint16(data[0:2]) { + case batchReplFormat: + default: + return fmt.Errorf("batchRepl: unknown format: %d", binary.LittleEndian.Uint16(data[0:2])) + } + switch binary.LittleEndian.Uint16(data[2:4]) { + case batchReplVersion: + default: + return fmt.Errorf("batchRepl: unknown version: %d", binary.LittleEndian.Uint16(data[2:4])) + } + + // OK, parse data. + if _, err = ri.UnmarshalMsg(data[4:]); err != nil { + return err + } + + switch ri.Version { + case batchReplVersionV1: + default: + return fmt.Errorf("unexpected batch repl meta version: %d", ri.Version) + } + + return nil +} + +func (ri batchJobInfo) clone() batchJobInfo { + return batchJobInfo{ + Version: ri.Version, + JobID: ri.JobID, + JobType: ri.JobType, + RetryAttempts: ri.RetryAttempts, + Complete: ri.Complete, + Failed: ri.Failed, + StartTime: ri.StartTime, + LastUpdate: ri.LastUpdate, + Bucket: ri.Bucket, + Object: ri.Object, + Objects: ri.Objects, + ObjectsFailed: ri.ObjectsFailed, + BytesTransferred: ri.BytesTransferred, + BytesFailed: ri.BytesFailed, + } +} + +func (ri batchJobInfo) save(ctx context.Context, api ObjectLayer, jobLocation string) error { + data := make([]byte, 4, ri.Msgsize()+4) + + // Initialize the header. + binary.LittleEndian.PutUint16(data[0:2], batchReplFormat) + binary.LittleEndian.PutUint16(data[2:4], batchReplVersion) + + buf, err := ri.MarshalMsg(data) + if err != nil { + return err + } + + return saveConfig(ctx, api, pathJoin(jobLocation, batchReplName), buf) +} + +func (ri *batchJobInfo) countItem(size int64, success bool) { + if ri == nil { + return + } + if success { + ri.Objects++ + ri.BytesTransferred += size + } else { + ri.ObjectsFailed++ + ri.BytesFailed += size + } +} + +func (ri *batchJobInfo) updateAfter(ctx context.Context, api ObjectLayer, duration time.Duration, jobLocation string) error { + if ri == nil { + return errInvalidArgument + } + now := UTCNow() + if now.Sub(ri.LastUpdate) >= duration { + if serverDebugLog { + console.Debugf("batchReplicate: persisting batchReplication info on drive: threshold:%s, batchRepl:%#v\n", now.Sub(ri.LastUpdate), ri) + } + ri.LastUpdate = now + ri.Version = batchReplVersionV1 + return ri.save(ctx, api, jobLocation) + } + return nil +} + +func (ri *batchJobInfo) trackCurrentBucketObject(bucket string, info ObjectInfo, failed bool) { + if ri == nil { + return + } + ri.Bucket = bucket + ri.Object = info.Name + ri.countItem(info.Size, failed) +} + +// Start start the batch replication job, resumes if there was a pending job via "job.ID" +func (r *BatchJobReplicateV1) Start(ctx context.Context, api ObjectLayer, job BatchJobRequest) error { + ri := &batchJobInfo{ + JobID: job.ID, + JobType: string(job.Type()), + StartTime: job.Started, + } + if err := ri.load(ctx, api, job); err != nil { + return err + } + globalBatchJobsMetrics.save(job.ID, ri.clone()) + lastObject := ri.Object + + delay := job.Replicate.Flags.Retry.Delay + if delay == 0 { + delay = batchReplJobDefaultRetryDelay + } + rnd := rand.New(rand.NewSource(time.Now().UnixNano())) + + skip := func(info FileInfo) (ok bool) { + if r.Flags.Filter.OlderThan > 0 && time.Since(info.ModTime) < r.Flags.Filter.OlderThan { + // skip all objects that are newer than specified older duration + return false + } + + if r.Flags.Filter.NewerThan > 0 && time.Since(info.ModTime) >= r.Flags.Filter.NewerThan { + // skip all objects that are older than specified newer duration + return false + } + + if !r.Flags.Filter.CreatedAfter.IsZero() && r.Flags.Filter.CreatedAfter.Before(info.ModTime) { + // skip all objects that are created before the specified time. + return false + } + + if !r.Flags.Filter.CreatedBefore.IsZero() && r.Flags.Filter.CreatedBefore.After(info.ModTime) { + // skip all objects that are created after the specified time. + return false + } + + return true + } + + u, err := url.Parse(r.Target.Endpoint) + if err != nil { + return err + } + + cred := r.Target.Creds + + c, err := miniogo.NewCore(u.Host, &miniogo.Options{ + Creds: credentials.NewStaticV4(cred.AccessKey, cred.SecretKey, cred.SessionToken), + Secure: u.Scheme == "https", + Transport: getRemoteInstanceTransport, + }) + if err != nil { + return err + } + + retryAttempts := ri.RetryAttempts + retry := false + for attempts := 1; attempts <= retryAttempts; attempts++ { + ctx, cancel := context.WithCancel(ctx) + + results := make(chan ObjectInfo, 100) + if err := api.Walk(ctx, r.Source.Bucket, r.Source.Prefix, results, ObjectOptions{ + WalkMarker: lastObject, + WalkFilter: skip, + }); err != nil { + cancel() + // Do not need to retry if we can't list objects on source. + return err + } + + for result := range results { + if result.DeleteMarker { + // delete-markers will never be 'replicated' + continue + } + + stopFn := globalBatchJobsMetrics.trace(batchReplicationMetricObject, job.ID, attempts, result) + success := true + if err := r.ReplicateToTarget(ctx, api, c, result, retry); err != nil { + stopFn(err) + logger.LogIf(ctx, err) + success = false + } else { + stopFn(nil) + } + ri.trackCurrentBucketObject(r.Source.Bucket, result, success) + globalBatchJobsMetrics.save(job.ID, ri.clone()) + // persist in-memory state to disk after every 10secs. + logger.LogIf(ctx, ri.updateAfter(ctx, api, 10*time.Second, job.Location)) + } + + ri.RetryAttempts = attempts + ri.Complete = ri.ObjectsFailed == 0 + ri.Failed = ri.ObjectsFailed > 0 + + globalBatchJobsMetrics.save(job.ID, ri.clone()) + + buf, _ := json.Marshal(ri) + if err := r.Notify(ctx, bytes.NewReader(buf)); err != nil { + logger.LogIf(ctx, fmt.Errorf("Unable to notify %v", err)) + } + + cancel() + if ri.Failed { + ri.ObjectsFailed = 0 + ri.Bucket = "" + ri.Object = "" + ri.Objects = 0 + ri.BytesFailed = 0 + ri.BytesTransferred = 0 + retry = true // indicate we are retrying.. + time.Sleep(delay + time.Duration(rnd.Float64()*float64(delay))) + continue + } + + break + } + + return nil +} + +// Validate validates the job definition input +func (r *BatchJobReplicateV1) Validate(ctx context.Context, o ObjectLayer) error { + if r == nil { + return nil + } + + if r.APIVersion != batchReplJobAPIVersion { + return errInvalidArgument + } + + if r.Source.Bucket == "" { + return errInvalidArgument + } + + _, err := o.GetBucketInfo(ctx, r.Source.Bucket, BucketOptions{}) + if err != nil { + return err + } + + if err := r.Source.Type.Validate(); err != nil { + return err + } + + if r.Target.Endpoint == "" { + return errInvalidArgument + } + + if r.Target.Bucket == "" { + return errInvalidArgument + } + + if err := r.Target.Creds.Validate(); err != nil { + return err + } + + if err := r.Target.Type.Validate(); err != nil { + return err + } + + for _, tag := range r.Flags.Filter.Tags { + if err := tag.Validate(); err != nil { + return err + } + } + + for _, meta := range r.Flags.Filter.Metadata { + if err := meta.Validate(); err != nil { + return err + } + } + + if err := r.Flags.Retry.Validate(); err != nil { + return err + } + + return nil +} + +// Type returns type of batch job, currently only supports 'replicate' +func (j BatchJobRequest) Type() madmin.BatchJobType { + if j.Replicate != nil { + return madmin.BatchJobReplicate + } + return madmin.BatchJobType("unknown") +} + +// Validate validates the current job, used by 'save()' before +// persisting the job request +func (j BatchJobRequest) Validate(ctx context.Context, o ObjectLayer) error { + if j.Replicate != nil { + return j.Replicate.Validate(ctx, o) + } + return errInvalidArgument +} + +func (j BatchJobRequest) delete(ctx context.Context, api ObjectLayer) { + if j.Replicate != nil { + deleteConfig(ctx, api, pathJoin(j.Location, batchReplName)) + } + globalBatchJobsMetrics.delete(j.ID) + deleteConfig(ctx, api, j.Location) +} + +func (j *BatchJobRequest) save(ctx context.Context, api ObjectLayer) error { + if j.Replicate == nil { + return errInvalidArgument + } + + if err := j.Validate(ctx, api); err != nil { + return err + } + + j.Location = pathJoin(batchJobPrefix, j.ID) + job, err := j.MarshalMsg(nil) + if err != nil { + return err + } + + return saveConfig(ctx, api, j.Location, job) +} + +func (j *BatchJobRequest) load(ctx context.Context, api ObjectLayer, name string) error { + if j == nil { + return nil + } + + job, err := readConfig(ctx, api, name) + if err != nil { + if errors.Is(err, errConfigNotFound) || isErrObjectNotFound(err) { + err = errNoSuchJob + } + return err + } + + _, err = j.UnmarshalMsg(job) + return err +} + +func batchReplicationOpts(ctx context.Context, sc string, objInfo ObjectInfo) (putOpts miniogo.PutObjectOptions, err error) { + // TODO: support custom storage class for remote replication + putOpts, err = putReplicationOpts(ctx, "", objInfo) + if err != nil { + return putOpts, err + } + putOpts.Internal = miniogo.AdvancedPutOptions{ + SourceVersionID: objInfo.VersionID, + SourceMTime: objInfo.ModTime, + SourceETag: objInfo.ETag, + } + return putOpts, nil +} + +// ListBatchJobs - lists all currently active batch jobs, optionally takes {jobType} +// input to list only active batch jobs of 'jobType' +func (a adminAPIHandlers) ListBatchJobs(w http.ResponseWriter, r *http.Request) { + ctx := newContext(r, w, "ListBatchJobs") + + defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r)) + + objectAPI, _ := validateAdminReq(ctx, w, r, "admin:ListBatchJobs") + if objectAPI == nil { + return + } + + jobType := r.Form.Get("jobType") + if jobType == "" { + jobType = string(madmin.BatchJobReplicate) + } + + resultCh := make(chan ObjectInfo) + + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + if err := objectAPI.Walk(ctx, minioMetaBucket, batchJobPrefix, resultCh, ObjectOptions{}); err != nil { + writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL) + return + } + + listResult := madmin.ListBatchJobsResult{} + for result := range resultCh { + req := &BatchJobRequest{} + if err := req.load(ctx, objectAPI, result.Name); err != nil { + if !errors.Is(err, errNoSuchJob) { + logger.LogIf(ctx, err) + } + continue + } + + if jobType == string(req.Type()) { + listResult.Jobs = append(listResult.Jobs, madmin.BatchJobResult{ + ID: req.ID, + Type: req.Type(), + Started: req.Started, + User: req.User, + Elapsed: time.Since(req.Started), + }) + } + } + + logger.LogIf(ctx, json.NewEncoder(w).Encode(&listResult)) +} + +var errNoSuchJob = errors.New("no such job") + +// DescribeBatchJob returns the currently active batch job definition +func (a adminAPIHandlers) DescribeBatchJob(w http.ResponseWriter, r *http.Request) { + ctx := newContext(r, w, "DescribeBatchJob") + + defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r)) + + objectAPI, _ := validateAdminReq(ctx, w, r, "admin:DescribeBatchJob") + if objectAPI == nil { + return + } + + id := r.Form.Get("jobId") + if id == "" { + writeErrorResponseJSON(ctx, w, toAPIError(ctx, errInvalidArgument), r.URL) + return + } + + req := &BatchJobRequest{} + if err := req.load(ctx, objectAPI, pathJoin(batchJobPrefix, id)); err != nil { + if !errors.Is(err, errNoSuchJob) { + logger.LogIf(ctx, err) + } + + writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL) + return + } + + buf, err := yaml.Marshal(req) + if err != nil { + logger.LogIf(ctx, err) + writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL) + return + } + + w.Write(buf) +} + +// StarBatchJob queue a new job for execution +func (a adminAPIHandlers) StartBatchJob(w http.ResponseWriter, r *http.Request) { + ctx := newContext(r, w, "StartBatchJob") + + defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r)) + + objectAPI, creds := validateAdminReq(ctx, w, r, "admin:StartBatchJob") + if objectAPI == nil { + return + } + + buf, err := io.ReadAll(r.Body) + if err != nil { + writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL) + return + } + + user := creds.AccessKey + if creds.ParentUser != "" { + user = creds.ParentUser + } + + job := &BatchJobRequest{} + if err = yaml.Unmarshal(buf, job); err != nil { + writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL) + return + } + + job.ID = shortuuid.New() + job.User = user + job.Started = time.Now() + + if err := job.save(ctx, objectAPI); err != nil { + writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL) + return + } + + if err = globalBatchJobPool.queueJob(job); err != nil { + writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL) + return + } + + buf, err = json.Marshal(&madmin.BatchJobResult{ + ID: job.ID, + Type: job.Type(), + Started: job.Started, + User: job.User, + }) + if err != nil { + writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL) + return + } + + writeSuccessResponseJSON(w, buf) +} + +//msgp:ignore BatchJobPool + +// BatchJobPool batch job pool +type BatchJobPool struct { + ctx context.Context + objLayer ObjectLayer + once sync.Once + mu sync.Mutex + jobCh chan *BatchJobRequest + workerKillCh chan struct{} + workerWg sync.WaitGroup + workerSize int +} + +var globalBatchJobPool *BatchJobPool + +// newBatchJobPool creates a pool of job manifest workers of specified size +func newBatchJobPool(ctx context.Context, o ObjectLayer, workers int) *BatchJobPool { + jpool := &BatchJobPool{ + ctx: ctx, + objLayer: o, + jobCh: make(chan *BatchJobRequest, 10000), + workerKillCh: make(chan struct{}, workers), + } + jpool.ResizeWorkers(workers) + jpool.resume() + return jpool +} + +func (j *BatchJobPool) resume() { + results := make(chan ObjectInfo, 100) + ctx, cancel := context.WithCancel(j.ctx) + defer cancel() + if err := j.objLayer.Walk(ctx, minioMetaBucket, batchJobPrefix, results, ObjectOptions{}); err != nil { + logger.LogIf(j.ctx, err) + return + } + for result := range results { + req := &BatchJobRequest{} + if err := req.load(ctx, j.objLayer, result.Name); err != nil { + logger.LogIf(ctx, err) + continue + } + if err := j.queueJob(req); err != nil { + logger.LogIf(ctx, err) + continue + } + } +} + +// AddWorker adds a replication worker to the pool +func (j *BatchJobPool) AddWorker() { + if j == nil { + return + } + defer j.workerWg.Done() + for { + select { + case <-j.ctx.Done(): + return + case job, ok := <-j.jobCh: + if !ok { + return + } + if job.Replicate != nil { + if err := job.Replicate.Start(j.ctx, j.objLayer, *job); err != nil { + if !isErrBucketNotFound(err) { + logger.LogIf(j.ctx, err) + continue + } + // Bucket not found proceed to delete such a job. + } + } + job.delete(j.ctx, j.objLayer) + case <-j.workerKillCh: + return + } + } +} + +// ResizeWorkers sets replication workers pool to new size +func (j *BatchJobPool) ResizeWorkers(n int) { + if j == nil { + return + } + + j.mu.Lock() + defer j.mu.Unlock() + + for j.workerSize < n { + j.workerSize++ + j.workerWg.Add(1) + go j.AddWorker() + } + for j.workerSize > n { + j.workerSize-- + go func() { j.workerKillCh <- struct{}{} }() + } +} + +func (j *BatchJobPool) queueJob(req *BatchJobRequest) error { + if j == nil { + return errInvalidArgument + } + select { + case <-j.ctx.Done(): + j.once.Do(func() { + close(j.jobCh) + }) + case j.jobCh <- req: + default: + return fmt.Errorf("batch job queue is currently full please try again later %#v", req) + } + return nil +} + +//msgp:ignore batchJobMetrics +type batchJobMetrics struct { + sync.RWMutex + metrics map[string]batchJobInfo +} + +var globalBatchJobsMetrics = batchJobMetrics{ + metrics: make(map[string]batchJobInfo), +} + +//msgp:ignore batchReplicationMetric +//go:generate stringer -type=batchReplicationMetric -trimprefix=batchReplicationMetric $GOFILE +type batchReplicationMetric uint8 + +const ( + batchReplicationMetricObject batchReplicationMetric = iota +) + +func batchReplicationTrace(d batchReplicationMetric, job string, startTime time.Time, duration time.Duration, info ObjectInfo, attempts int, err error) madmin.TraceInfo { + var errStr string + if err != nil { + errStr = err.Error() + } + funcName := fmt.Sprintf("batchReplication.%s (job-name=%s)", d.String(), job) + if attempts > 0 { + funcName = fmt.Sprintf("batchReplication.%s (job-name=%s,attempts=%s)", d.String(), job, humanize.Ordinal(attempts)) + } + return madmin.TraceInfo{ + TraceType: madmin.TraceBatchReplication, + Time: startTime, + NodeName: globalLocalNodeName, + FuncName: funcName, + Duration: duration, + Path: info.Name, + Error: errStr, + } +} + +func (m *batchJobMetrics) report(jobID string) (metrics *madmin.BatchJobMetrics) { + metrics = &madmin.BatchJobMetrics{CollectedAt: time.Now(), Jobs: make(map[string]madmin.JobMetric)} + m.RLock() + defer m.RUnlock() + for id, job := range m.metrics { + match := jobID != "" && id == jobID + metrics.Jobs[id] = madmin.JobMetric{ + JobID: job.JobID, + JobType: job.JobType, + StartTime: job.StartTime, + LastUpdate: job.LastUpdate, + RetryAttempts: job.RetryAttempts, + Complete: job.Complete, + Failed: job.Failed, + Replicate: &madmin.ReplicateInfo{ + Bucket: job.Bucket, + Object: job.Object, + Objects: job.Objects, + ObjectsFailed: job.ObjectsFailed, + BytesTransferred: job.BytesTransferred, + BytesFailed: job.BytesFailed, + }, + } + if match { + break + } + } + return metrics +} + +func (m *batchJobMetrics) delete(jobID string) { + m.Lock() + defer m.Unlock() + + delete(m.metrics, jobID) +} + +func (m *batchJobMetrics) save(jobID string, ri batchJobInfo) { + m.Lock() + defer m.Unlock() + + m.metrics[jobID] = ri +} + +func (m *batchJobMetrics) trace(d batchReplicationMetric, job string, attempts int, info ObjectInfo) func(err error) { + startTime := time.Now() + return func(err error) { + duration := time.Since(startTime) + if globalTrace.NumSubscribers(madmin.TraceBatchReplication) > 0 { + globalTrace.Publish(batchReplicationTrace(d, job, startTime, duration, info, attempts, err)) + } + } +} diff --git a/cmd/batch-handlers_gen.go b/cmd/batch-handlers_gen.go new file mode 100644 index 000000000..39054c790 --- /dev/null +++ b/cmd/batch-handlers_gen.go @@ -0,0 +1,2876 @@ +package cmd + +// Code generated by github.com/tinylib/msgp DO NOT EDIT. + +import ( + "github.com/tinylib/msgp/msgp" +) + +// DecodeMsg implements msgp.Decodable +func (z *BatchJobReplicateCredentials) DecodeMsg(dc *msgp.Reader) (err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, err = dc.ReadMapHeader() + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, err = dc.ReadMapKeyPtr() + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "AccessKey": + z.AccessKey, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "AccessKey") + return + } + case "SecretKey": + z.SecretKey, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "SecretKey") + return + } + case "SessionToken": + z.SessionToken, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "SessionToken") + return + } + default: + err = dc.Skip() + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + return +} + +// EncodeMsg implements msgp.Encodable +func (z BatchJobReplicateCredentials) EncodeMsg(en *msgp.Writer) (err error) { + // map header, size 3 + // write "AccessKey" + err = en.Append(0x83, 0xa9, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4b, 0x65, 0x79) + if err != nil { + return + } + err = en.WriteString(z.AccessKey) + if err != nil { + err = msgp.WrapError(err, "AccessKey") + return + } + // write "SecretKey" + err = en.Append(0xa9, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x4b, 0x65, 0x79) + if err != nil { + return + } + err = en.WriteString(z.SecretKey) + if err != nil { + err = msgp.WrapError(err, "SecretKey") + return + } + // write "SessionToken" + err = en.Append(0xac, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x54, 0x6f, 0x6b, 0x65, 0x6e) + if err != nil { + return + } + err = en.WriteString(z.SessionToken) + if err != nil { + err = msgp.WrapError(err, "SessionToken") + return + } + return +} + +// MarshalMsg implements msgp.Marshaler +func (z BatchJobReplicateCredentials) MarshalMsg(b []byte) (o []byte, err error) { + o = msgp.Require(b, z.Msgsize()) + // map header, size 3 + // string "AccessKey" + o = append(o, 0x83, 0xa9, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4b, 0x65, 0x79) + o = msgp.AppendString(o, z.AccessKey) + // string "SecretKey" + o = append(o, 0xa9, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x4b, 0x65, 0x79) + o = msgp.AppendString(o, z.SecretKey) + // string "SessionToken" + o = append(o, 0xac, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x54, 0x6f, 0x6b, 0x65, 0x6e) + o = msgp.AppendString(o, z.SessionToken) + return +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *BatchJobReplicateCredentials) UnmarshalMsg(bts []byte) (o []byte, err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "AccessKey": + z.AccessKey, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "AccessKey") + return + } + case "SecretKey": + z.SecretKey, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "SecretKey") + return + } + case "SessionToken": + z.SessionToken, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "SessionToken") + return + } + default: + bts, err = msgp.Skip(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + o = bts + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z BatchJobReplicateCredentials) Msgsize() (s int) { + s = 1 + 10 + msgp.StringPrefixSize + len(z.AccessKey) + 10 + msgp.StringPrefixSize + len(z.SecretKey) + 13 + msgp.StringPrefixSize + len(z.SessionToken) + return +} + +// DecodeMsg implements msgp.Decodable +func (z *BatchJobReplicateFlags) DecodeMsg(dc *msgp.Reader) (err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, err = dc.ReadMapHeader() + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, err = dc.ReadMapKeyPtr() + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "Filter": + err = z.Filter.DecodeMsg(dc) + if err != nil { + err = msgp.WrapError(err, "Filter") + return + } + case "Notify": + var zb0002 uint32 + zb0002, err = dc.ReadMapHeader() + if err != nil { + err = msgp.WrapError(err, "Notify") + return + } + for zb0002 > 0 { + zb0002-- + field, err = dc.ReadMapKeyPtr() + if err != nil { + err = msgp.WrapError(err, "Notify") + return + } + switch msgp.UnsafeString(field) { + case "Endpoint": + z.Notify.Endpoint, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "Notify", "Endpoint") + return + } + case "Token": + z.Notify.Token, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "Notify", "Token") + return + } + default: + err = dc.Skip() + if err != nil { + err = msgp.WrapError(err, "Notify") + return + } + } + } + case "Retry": + var zb0003 uint32 + zb0003, err = dc.ReadMapHeader() + if err != nil { + err = msgp.WrapError(err, "Retry") + return + } + for zb0003 > 0 { + zb0003-- + field, err = dc.ReadMapKeyPtr() + if err != nil { + err = msgp.WrapError(err, "Retry") + return + } + switch msgp.UnsafeString(field) { + case "Attempts": + z.Retry.Attempts, err = dc.ReadInt() + if err != nil { + err = msgp.WrapError(err, "Retry", "Attempts") + return + } + case "Delay": + z.Retry.Delay, err = dc.ReadDuration() + if err != nil { + err = msgp.WrapError(err, "Retry", "Delay") + return + } + default: + err = dc.Skip() + if err != nil { + err = msgp.WrapError(err, "Retry") + return + } + } + } + default: + err = dc.Skip() + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + return +} + +// EncodeMsg implements msgp.Encodable +func (z *BatchJobReplicateFlags) EncodeMsg(en *msgp.Writer) (err error) { + // map header, size 3 + // write "Filter" + err = en.Append(0x83, 0xa6, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72) + if err != nil { + return + } + err = z.Filter.EncodeMsg(en) + if err != nil { + err = msgp.WrapError(err, "Filter") + return + } + // write "Notify" + err = en.Append(0xa6, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x79) + if err != nil { + return + } + // map header, size 2 + // write "Endpoint" + err = en.Append(0x82, 0xa8, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74) + if err != nil { + return + } + err = en.WriteString(z.Notify.Endpoint) + if err != nil { + err = msgp.WrapError(err, "Notify", "Endpoint") + return + } + // write "Token" + err = en.Append(0xa5, 0x54, 0x6f, 0x6b, 0x65, 0x6e) + if err != nil { + return + } + err = en.WriteString(z.Notify.Token) + if err != nil { + err = msgp.WrapError(err, "Notify", "Token") + return + } + // write "Retry" + err = en.Append(0xa5, 0x52, 0x65, 0x74, 0x72, 0x79) + if err != nil { + return + } + // map header, size 2 + // write "Attempts" + err = en.Append(0x82, 0xa8, 0x41, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x73) + if err != nil { + return + } + err = en.WriteInt(z.Retry.Attempts) + if err != nil { + err = msgp.WrapError(err, "Retry", "Attempts") + return + } + // write "Delay" + err = en.Append(0xa5, 0x44, 0x65, 0x6c, 0x61, 0x79) + if err != nil { + return + } + err = en.WriteDuration(z.Retry.Delay) + if err != nil { + err = msgp.WrapError(err, "Retry", "Delay") + return + } + return +} + +// MarshalMsg implements msgp.Marshaler +func (z *BatchJobReplicateFlags) MarshalMsg(b []byte) (o []byte, err error) { + o = msgp.Require(b, z.Msgsize()) + // map header, size 3 + // string "Filter" + o = append(o, 0x83, 0xa6, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72) + o, err = z.Filter.MarshalMsg(o) + if err != nil { + err = msgp.WrapError(err, "Filter") + return + } + // string "Notify" + o = append(o, 0xa6, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x79) + // map header, size 2 + // string "Endpoint" + o = append(o, 0x82, 0xa8, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74) + o = msgp.AppendString(o, z.Notify.Endpoint) + // string "Token" + o = append(o, 0xa5, 0x54, 0x6f, 0x6b, 0x65, 0x6e) + o = msgp.AppendString(o, z.Notify.Token) + // string "Retry" + o = append(o, 0xa5, 0x52, 0x65, 0x74, 0x72, 0x79) + // map header, size 2 + // string "Attempts" + o = append(o, 0x82, 0xa8, 0x41, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x73) + o = msgp.AppendInt(o, z.Retry.Attempts) + // string "Delay" + o = append(o, 0xa5, 0x44, 0x65, 0x6c, 0x61, 0x79) + o = msgp.AppendDuration(o, z.Retry.Delay) + return +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *BatchJobReplicateFlags) UnmarshalMsg(bts []byte) (o []byte, err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "Filter": + bts, err = z.Filter.UnmarshalMsg(bts) + if err != nil { + err = msgp.WrapError(err, "Filter") + return + } + case "Notify": + var zb0002 uint32 + zb0002, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Notify") + return + } + for zb0002 > 0 { + zb0002-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err, "Notify") + return + } + switch msgp.UnsafeString(field) { + case "Endpoint": + z.Notify.Endpoint, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Notify", "Endpoint") + return + } + case "Token": + z.Notify.Token, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Notify", "Token") + return + } + default: + bts, err = msgp.Skip(bts) + if err != nil { + err = msgp.WrapError(err, "Notify") + return + } + } + } + case "Retry": + var zb0003 uint32 + zb0003, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Retry") + return + } + for zb0003 > 0 { + zb0003-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err, "Retry") + return + } + switch msgp.UnsafeString(field) { + case "Attempts": + z.Retry.Attempts, bts, err = msgp.ReadIntBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Retry", "Attempts") + return + } + case "Delay": + z.Retry.Delay, bts, err = msgp.ReadDurationBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Retry", "Delay") + return + } + default: + bts, err = msgp.Skip(bts) + if err != nil { + err = msgp.WrapError(err, "Retry") + return + } + } + } + default: + bts, err = msgp.Skip(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + o = bts + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *BatchJobReplicateFlags) Msgsize() (s int) { + s = 1 + 7 + z.Filter.Msgsize() + 7 + 1 + 9 + msgp.StringPrefixSize + len(z.Notify.Endpoint) + 6 + msgp.StringPrefixSize + len(z.Notify.Token) + 6 + 1 + 9 + msgp.IntSize + 6 + msgp.DurationSize + return +} + +// DecodeMsg implements msgp.Decodable +func (z *BatchJobReplicateKV) DecodeMsg(dc *msgp.Reader) (err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, err = dc.ReadMapHeader() + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, err = dc.ReadMapKeyPtr() + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "Key": + z.Key, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "Key") + return + } + case "Value": + z.Value, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "Value") + return + } + default: + err = dc.Skip() + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + return +} + +// EncodeMsg implements msgp.Encodable +func (z BatchJobReplicateKV) EncodeMsg(en *msgp.Writer) (err error) { + // map header, size 2 + // write "Key" + err = en.Append(0x82, 0xa3, 0x4b, 0x65, 0x79) + if err != nil { + return + } + err = en.WriteString(z.Key) + if err != nil { + err = msgp.WrapError(err, "Key") + return + } + // write "Value" + err = en.Append(0xa5, 0x56, 0x61, 0x6c, 0x75, 0x65) + if err != nil { + return + } + err = en.WriteString(z.Value) + if err != nil { + err = msgp.WrapError(err, "Value") + return + } + return +} + +// MarshalMsg implements msgp.Marshaler +func (z BatchJobReplicateKV) MarshalMsg(b []byte) (o []byte, err error) { + o = msgp.Require(b, z.Msgsize()) + // map header, size 2 + // string "Key" + o = append(o, 0x82, 0xa3, 0x4b, 0x65, 0x79) + o = msgp.AppendString(o, z.Key) + // string "Value" + o = append(o, 0xa5, 0x56, 0x61, 0x6c, 0x75, 0x65) + o = msgp.AppendString(o, z.Value) + return +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *BatchJobReplicateKV) UnmarshalMsg(bts []byte) (o []byte, err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "Key": + z.Key, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Key") + return + } + case "Value": + z.Value, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Value") + return + } + default: + bts, err = msgp.Skip(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + o = bts + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z BatchJobReplicateKV) Msgsize() (s int) { + s = 1 + 4 + msgp.StringPrefixSize + len(z.Key) + 6 + msgp.StringPrefixSize + len(z.Value) + return +} + +// DecodeMsg implements msgp.Decodable +func (z *BatchJobReplicateResourceType) DecodeMsg(dc *msgp.Reader) (err error) { + { + var zb0001 string + zb0001, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err) + return + } + (*z) = BatchJobReplicateResourceType(zb0001) + } + return +} + +// EncodeMsg implements msgp.Encodable +func (z BatchJobReplicateResourceType) EncodeMsg(en *msgp.Writer) (err error) { + err = en.WriteString(string(z)) + if err != nil { + err = msgp.WrapError(err) + return + } + return +} + +// MarshalMsg implements msgp.Marshaler +func (z BatchJobReplicateResourceType) MarshalMsg(b []byte) (o []byte, err error) { + o = msgp.Require(b, z.Msgsize()) + o = msgp.AppendString(o, string(z)) + return +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *BatchJobReplicateResourceType) UnmarshalMsg(bts []byte) (o []byte, err error) { + { + var zb0001 string + zb0001, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + (*z) = BatchJobReplicateResourceType(zb0001) + } + o = bts + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z BatchJobReplicateResourceType) Msgsize() (s int) { + s = msgp.StringPrefixSize + len(string(z)) + return +} + +// DecodeMsg implements msgp.Decodable +func (z *BatchJobReplicateSource) DecodeMsg(dc *msgp.Reader) (err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, err = dc.ReadMapHeader() + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, err = dc.ReadMapKeyPtr() + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "Type": + { + var zb0002 string + zb0002, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "Type") + return + } + z.Type = BatchJobReplicateResourceType(zb0002) + } + case "Bucket": + z.Bucket, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "Bucket") + return + } + case "Prefix": + z.Prefix, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "Prefix") + return + } + case "Endpoint": + z.Endpoint, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "Endpoint") + return + } + case "Creds": + var zb0003 uint32 + zb0003, err = dc.ReadMapHeader() + if err != nil { + err = msgp.WrapError(err, "Creds") + return + } + for zb0003 > 0 { + zb0003-- + field, err = dc.ReadMapKeyPtr() + if err != nil { + err = msgp.WrapError(err, "Creds") + return + } + switch msgp.UnsafeString(field) { + case "AccessKey": + z.Creds.AccessKey, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "Creds", "AccessKey") + return + } + case "SecretKey": + z.Creds.SecretKey, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "Creds", "SecretKey") + return + } + case "SessionToken": + z.Creds.SessionToken, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "Creds", "SessionToken") + return + } + default: + err = dc.Skip() + if err != nil { + err = msgp.WrapError(err, "Creds") + return + } + } + } + default: + err = dc.Skip() + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + return +} + +// EncodeMsg implements msgp.Encodable +func (z *BatchJobReplicateSource) EncodeMsg(en *msgp.Writer) (err error) { + // map header, size 5 + // write "Type" + err = en.Append(0x85, 0xa4, 0x54, 0x79, 0x70, 0x65) + if err != nil { + return + } + err = en.WriteString(string(z.Type)) + if err != nil { + err = msgp.WrapError(err, "Type") + return + } + // write "Bucket" + err = en.Append(0xa6, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74) + if err != nil { + return + } + err = en.WriteString(z.Bucket) + if err != nil { + err = msgp.WrapError(err, "Bucket") + return + } + // write "Prefix" + err = en.Append(0xa6, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78) + if err != nil { + return + } + err = en.WriteString(z.Prefix) + if err != nil { + err = msgp.WrapError(err, "Prefix") + return + } + // write "Endpoint" + err = en.Append(0xa8, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74) + if err != nil { + return + } + err = en.WriteString(z.Endpoint) + if err != nil { + err = msgp.WrapError(err, "Endpoint") + return + } + // write "Creds" + err = en.Append(0xa5, 0x43, 0x72, 0x65, 0x64, 0x73) + if err != nil { + return + } + // map header, size 3 + // write "AccessKey" + err = en.Append(0x83, 0xa9, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4b, 0x65, 0x79) + if err != nil { + return + } + err = en.WriteString(z.Creds.AccessKey) + if err != nil { + err = msgp.WrapError(err, "Creds", "AccessKey") + return + } + // write "SecretKey" + err = en.Append(0xa9, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x4b, 0x65, 0x79) + if err != nil { + return + } + err = en.WriteString(z.Creds.SecretKey) + if err != nil { + err = msgp.WrapError(err, "Creds", "SecretKey") + return + } + // write "SessionToken" + err = en.Append(0xac, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x54, 0x6f, 0x6b, 0x65, 0x6e) + if err != nil { + return + } + err = en.WriteString(z.Creds.SessionToken) + if err != nil { + err = msgp.WrapError(err, "Creds", "SessionToken") + return + } + return +} + +// MarshalMsg implements msgp.Marshaler +func (z *BatchJobReplicateSource) MarshalMsg(b []byte) (o []byte, err error) { + o = msgp.Require(b, z.Msgsize()) + // map header, size 5 + // string "Type" + o = append(o, 0x85, 0xa4, 0x54, 0x79, 0x70, 0x65) + o = msgp.AppendString(o, string(z.Type)) + // string "Bucket" + o = append(o, 0xa6, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74) + o = msgp.AppendString(o, z.Bucket) + // string "Prefix" + o = append(o, 0xa6, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78) + o = msgp.AppendString(o, z.Prefix) + // string "Endpoint" + o = append(o, 0xa8, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74) + o = msgp.AppendString(o, z.Endpoint) + // string "Creds" + o = append(o, 0xa5, 0x43, 0x72, 0x65, 0x64, 0x73) + // map header, size 3 + // string "AccessKey" + o = append(o, 0x83, 0xa9, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4b, 0x65, 0x79) + o = msgp.AppendString(o, z.Creds.AccessKey) + // string "SecretKey" + o = append(o, 0xa9, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x4b, 0x65, 0x79) + o = msgp.AppendString(o, z.Creds.SecretKey) + // string "SessionToken" + o = append(o, 0xac, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x54, 0x6f, 0x6b, 0x65, 0x6e) + o = msgp.AppendString(o, z.Creds.SessionToken) + return +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *BatchJobReplicateSource) UnmarshalMsg(bts []byte) (o []byte, err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "Type": + { + var zb0002 string + zb0002, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Type") + return + } + z.Type = BatchJobReplicateResourceType(zb0002) + } + case "Bucket": + z.Bucket, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Bucket") + return + } + case "Prefix": + z.Prefix, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Prefix") + return + } + case "Endpoint": + z.Endpoint, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Endpoint") + return + } + case "Creds": + var zb0003 uint32 + zb0003, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Creds") + return + } + for zb0003 > 0 { + zb0003-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err, "Creds") + return + } + switch msgp.UnsafeString(field) { + case "AccessKey": + z.Creds.AccessKey, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Creds", "AccessKey") + return + } + case "SecretKey": + z.Creds.SecretKey, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Creds", "SecretKey") + return + } + case "SessionToken": + z.Creds.SessionToken, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Creds", "SessionToken") + return + } + default: + bts, err = msgp.Skip(bts) + if err != nil { + err = msgp.WrapError(err, "Creds") + return + } + } + } + default: + bts, err = msgp.Skip(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + o = bts + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *BatchJobReplicateSource) Msgsize() (s int) { + s = 1 + 5 + msgp.StringPrefixSize + len(string(z.Type)) + 7 + msgp.StringPrefixSize + len(z.Bucket) + 7 + msgp.StringPrefixSize + len(z.Prefix) + 9 + msgp.StringPrefixSize + len(z.Endpoint) + 6 + 1 + 10 + msgp.StringPrefixSize + len(z.Creds.AccessKey) + 10 + msgp.StringPrefixSize + len(z.Creds.SecretKey) + 13 + msgp.StringPrefixSize + len(z.Creds.SessionToken) + return +} + +// DecodeMsg implements msgp.Decodable +func (z *BatchJobReplicateTarget) DecodeMsg(dc *msgp.Reader) (err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, err = dc.ReadMapHeader() + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, err = dc.ReadMapKeyPtr() + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "Type": + { + var zb0002 string + zb0002, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "Type") + return + } + z.Type = BatchJobReplicateResourceType(zb0002) + } + case "Bucket": + z.Bucket, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "Bucket") + return + } + case "Prefix": + z.Prefix, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "Prefix") + return + } + case "Endpoint": + z.Endpoint, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "Endpoint") + return + } + case "Creds": + var zb0003 uint32 + zb0003, err = dc.ReadMapHeader() + if err != nil { + err = msgp.WrapError(err, "Creds") + return + } + for zb0003 > 0 { + zb0003-- + field, err = dc.ReadMapKeyPtr() + if err != nil { + err = msgp.WrapError(err, "Creds") + return + } + switch msgp.UnsafeString(field) { + case "AccessKey": + z.Creds.AccessKey, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "Creds", "AccessKey") + return + } + case "SecretKey": + z.Creds.SecretKey, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "Creds", "SecretKey") + return + } + case "SessionToken": + z.Creds.SessionToken, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "Creds", "SessionToken") + return + } + default: + err = dc.Skip() + if err != nil { + err = msgp.WrapError(err, "Creds") + return + } + } + } + default: + err = dc.Skip() + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + return +} + +// EncodeMsg implements msgp.Encodable +func (z *BatchJobReplicateTarget) EncodeMsg(en *msgp.Writer) (err error) { + // map header, size 5 + // write "Type" + err = en.Append(0x85, 0xa4, 0x54, 0x79, 0x70, 0x65) + if err != nil { + return + } + err = en.WriteString(string(z.Type)) + if err != nil { + err = msgp.WrapError(err, "Type") + return + } + // write "Bucket" + err = en.Append(0xa6, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74) + if err != nil { + return + } + err = en.WriteString(z.Bucket) + if err != nil { + err = msgp.WrapError(err, "Bucket") + return + } + // write "Prefix" + err = en.Append(0xa6, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78) + if err != nil { + return + } + err = en.WriteString(z.Prefix) + if err != nil { + err = msgp.WrapError(err, "Prefix") + return + } + // write "Endpoint" + err = en.Append(0xa8, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74) + if err != nil { + return + } + err = en.WriteString(z.Endpoint) + if err != nil { + err = msgp.WrapError(err, "Endpoint") + return + } + // write "Creds" + err = en.Append(0xa5, 0x43, 0x72, 0x65, 0x64, 0x73) + if err != nil { + return + } + // map header, size 3 + // write "AccessKey" + err = en.Append(0x83, 0xa9, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4b, 0x65, 0x79) + if err != nil { + return + } + err = en.WriteString(z.Creds.AccessKey) + if err != nil { + err = msgp.WrapError(err, "Creds", "AccessKey") + return + } + // write "SecretKey" + err = en.Append(0xa9, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x4b, 0x65, 0x79) + if err != nil { + return + } + err = en.WriteString(z.Creds.SecretKey) + if err != nil { + err = msgp.WrapError(err, "Creds", "SecretKey") + return + } + // write "SessionToken" + err = en.Append(0xac, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x54, 0x6f, 0x6b, 0x65, 0x6e) + if err != nil { + return + } + err = en.WriteString(z.Creds.SessionToken) + if err != nil { + err = msgp.WrapError(err, "Creds", "SessionToken") + return + } + return +} + +// MarshalMsg implements msgp.Marshaler +func (z *BatchJobReplicateTarget) MarshalMsg(b []byte) (o []byte, err error) { + o = msgp.Require(b, z.Msgsize()) + // map header, size 5 + // string "Type" + o = append(o, 0x85, 0xa4, 0x54, 0x79, 0x70, 0x65) + o = msgp.AppendString(o, string(z.Type)) + // string "Bucket" + o = append(o, 0xa6, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74) + o = msgp.AppendString(o, z.Bucket) + // string "Prefix" + o = append(o, 0xa6, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78) + o = msgp.AppendString(o, z.Prefix) + // string "Endpoint" + o = append(o, 0xa8, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74) + o = msgp.AppendString(o, z.Endpoint) + // string "Creds" + o = append(o, 0xa5, 0x43, 0x72, 0x65, 0x64, 0x73) + // map header, size 3 + // string "AccessKey" + o = append(o, 0x83, 0xa9, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4b, 0x65, 0x79) + o = msgp.AppendString(o, z.Creds.AccessKey) + // string "SecretKey" + o = append(o, 0xa9, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x4b, 0x65, 0x79) + o = msgp.AppendString(o, z.Creds.SecretKey) + // string "SessionToken" + o = append(o, 0xac, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x54, 0x6f, 0x6b, 0x65, 0x6e) + o = msgp.AppendString(o, z.Creds.SessionToken) + return +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *BatchJobReplicateTarget) UnmarshalMsg(bts []byte) (o []byte, err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "Type": + { + var zb0002 string + zb0002, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Type") + return + } + z.Type = BatchJobReplicateResourceType(zb0002) + } + case "Bucket": + z.Bucket, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Bucket") + return + } + case "Prefix": + z.Prefix, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Prefix") + return + } + case "Endpoint": + z.Endpoint, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Endpoint") + return + } + case "Creds": + var zb0003 uint32 + zb0003, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Creds") + return + } + for zb0003 > 0 { + zb0003-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err, "Creds") + return + } + switch msgp.UnsafeString(field) { + case "AccessKey": + z.Creds.AccessKey, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Creds", "AccessKey") + return + } + case "SecretKey": + z.Creds.SecretKey, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Creds", "SecretKey") + return + } + case "SessionToken": + z.Creds.SessionToken, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Creds", "SessionToken") + return + } + default: + bts, err = msgp.Skip(bts) + if err != nil { + err = msgp.WrapError(err, "Creds") + return + } + } + } + default: + bts, err = msgp.Skip(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + o = bts + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *BatchJobReplicateTarget) Msgsize() (s int) { + s = 1 + 5 + msgp.StringPrefixSize + len(string(z.Type)) + 7 + msgp.StringPrefixSize + len(z.Bucket) + 7 + msgp.StringPrefixSize + len(z.Prefix) + 9 + msgp.StringPrefixSize + len(z.Endpoint) + 6 + 1 + 10 + msgp.StringPrefixSize + len(z.Creds.AccessKey) + 10 + msgp.StringPrefixSize + len(z.Creds.SecretKey) + 13 + msgp.StringPrefixSize + len(z.Creds.SessionToken) + return +} + +// DecodeMsg implements msgp.Decodable +func (z *BatchJobReplicateV1) DecodeMsg(dc *msgp.Reader) (err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, err = dc.ReadMapHeader() + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, err = dc.ReadMapKeyPtr() + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "APIVersion": + z.APIVersion, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "APIVersion") + return + } + case "Flags": + err = z.Flags.DecodeMsg(dc) + if err != nil { + err = msgp.WrapError(err, "Flags") + return + } + case "Target": + err = z.Target.DecodeMsg(dc) + if err != nil { + err = msgp.WrapError(err, "Target") + return + } + case "Source": + err = z.Source.DecodeMsg(dc) + if err != nil { + err = msgp.WrapError(err, "Source") + return + } + default: + err = dc.Skip() + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + return +} + +// EncodeMsg implements msgp.Encodable +func (z *BatchJobReplicateV1) EncodeMsg(en *msgp.Writer) (err error) { + // map header, size 4 + // write "APIVersion" + err = en.Append(0x84, 0xaa, 0x41, 0x50, 0x49, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e) + if err != nil { + return + } + err = en.WriteString(z.APIVersion) + if err != nil { + err = msgp.WrapError(err, "APIVersion") + return + } + // write "Flags" + err = en.Append(0xa5, 0x46, 0x6c, 0x61, 0x67, 0x73) + if err != nil { + return + } + err = z.Flags.EncodeMsg(en) + if err != nil { + err = msgp.WrapError(err, "Flags") + return + } + // write "Target" + err = en.Append(0xa6, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74) + if err != nil { + return + } + err = z.Target.EncodeMsg(en) + if err != nil { + err = msgp.WrapError(err, "Target") + return + } + // write "Source" + err = en.Append(0xa6, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65) + if err != nil { + return + } + err = z.Source.EncodeMsg(en) + if err != nil { + err = msgp.WrapError(err, "Source") + return + } + return +} + +// MarshalMsg implements msgp.Marshaler +func (z *BatchJobReplicateV1) MarshalMsg(b []byte) (o []byte, err error) { + o = msgp.Require(b, z.Msgsize()) + // map header, size 4 + // string "APIVersion" + o = append(o, 0x84, 0xaa, 0x41, 0x50, 0x49, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e) + o = msgp.AppendString(o, z.APIVersion) + // string "Flags" + o = append(o, 0xa5, 0x46, 0x6c, 0x61, 0x67, 0x73) + o, err = z.Flags.MarshalMsg(o) + if err != nil { + err = msgp.WrapError(err, "Flags") + return + } + // string "Target" + o = append(o, 0xa6, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74) + o, err = z.Target.MarshalMsg(o) + if err != nil { + err = msgp.WrapError(err, "Target") + return + } + // string "Source" + o = append(o, 0xa6, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65) + o, err = z.Source.MarshalMsg(o) + if err != nil { + err = msgp.WrapError(err, "Source") + return + } + return +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *BatchJobReplicateV1) UnmarshalMsg(bts []byte) (o []byte, err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "APIVersion": + z.APIVersion, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "APIVersion") + return + } + case "Flags": + bts, err = z.Flags.UnmarshalMsg(bts) + if err != nil { + err = msgp.WrapError(err, "Flags") + return + } + case "Target": + bts, err = z.Target.UnmarshalMsg(bts) + if err != nil { + err = msgp.WrapError(err, "Target") + return + } + case "Source": + bts, err = z.Source.UnmarshalMsg(bts) + if err != nil { + err = msgp.WrapError(err, "Source") + return + } + default: + bts, err = msgp.Skip(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + o = bts + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *BatchJobReplicateV1) Msgsize() (s int) { + s = 1 + 11 + msgp.StringPrefixSize + len(z.APIVersion) + 6 + z.Flags.Msgsize() + 7 + z.Target.Msgsize() + 7 + z.Source.Msgsize() + return +} + +// DecodeMsg implements msgp.Decodable +func (z *BatchJobRequest) DecodeMsg(dc *msgp.Reader) (err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, err = dc.ReadMapHeader() + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, err = dc.ReadMapKeyPtr() + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "ID": + z.ID, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "ID") + return + } + case "User": + z.User, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "User") + return + } + case "Started": + z.Started, err = dc.ReadTime() + if err != nil { + err = msgp.WrapError(err, "Started") + return + } + case "Location": + z.Location, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "Location") + return + } + case "Replicate": + if dc.IsNil() { + err = dc.ReadNil() + if err != nil { + err = msgp.WrapError(err, "Replicate") + return + } + z.Replicate = nil + } else { + if z.Replicate == nil { + z.Replicate = new(BatchJobReplicateV1) + } + err = z.Replicate.DecodeMsg(dc) + if err != nil { + err = msgp.WrapError(err, "Replicate") + return + } + } + default: + err = dc.Skip() + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + return +} + +// EncodeMsg implements msgp.Encodable +func (z *BatchJobRequest) EncodeMsg(en *msgp.Writer) (err error) { + // map header, size 5 + // write "ID" + err = en.Append(0x85, 0xa2, 0x49, 0x44) + if err != nil { + return + } + err = en.WriteString(z.ID) + if err != nil { + err = msgp.WrapError(err, "ID") + return + } + // write "User" + err = en.Append(0xa4, 0x55, 0x73, 0x65, 0x72) + if err != nil { + return + } + err = en.WriteString(z.User) + if err != nil { + err = msgp.WrapError(err, "User") + return + } + // write "Started" + err = en.Append(0xa7, 0x53, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64) + if err != nil { + return + } + err = en.WriteTime(z.Started) + if err != nil { + err = msgp.WrapError(err, "Started") + return + } + // write "Location" + err = en.Append(0xa8, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e) + if err != nil { + return + } + err = en.WriteString(z.Location) + if err != nil { + err = msgp.WrapError(err, "Location") + return + } + // write "Replicate" + err = en.Append(0xa9, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x65) + if err != nil { + return + } + if z.Replicate == nil { + err = en.WriteNil() + if err != nil { + return + } + } else { + err = z.Replicate.EncodeMsg(en) + if err != nil { + err = msgp.WrapError(err, "Replicate") + return + } + } + return +} + +// MarshalMsg implements msgp.Marshaler +func (z *BatchJobRequest) MarshalMsg(b []byte) (o []byte, err error) { + o = msgp.Require(b, z.Msgsize()) + // map header, size 5 + // string "ID" + o = append(o, 0x85, 0xa2, 0x49, 0x44) + o = msgp.AppendString(o, z.ID) + // string "User" + o = append(o, 0xa4, 0x55, 0x73, 0x65, 0x72) + o = msgp.AppendString(o, z.User) + // string "Started" + o = append(o, 0xa7, 0x53, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64) + o = msgp.AppendTime(o, z.Started) + // string "Location" + o = append(o, 0xa8, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e) + o = msgp.AppendString(o, z.Location) + // string "Replicate" + o = append(o, 0xa9, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x65) + if z.Replicate == nil { + o = msgp.AppendNil(o) + } else { + o, err = z.Replicate.MarshalMsg(o) + if err != nil { + err = msgp.WrapError(err, "Replicate") + return + } + } + return +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *BatchJobRequest) UnmarshalMsg(bts []byte) (o []byte, err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "ID": + z.ID, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "ID") + return + } + case "User": + z.User, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "User") + return + } + case "Started": + z.Started, bts, err = msgp.ReadTimeBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Started") + return + } + case "Location": + z.Location, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Location") + return + } + case "Replicate": + if msgp.IsNil(bts) { + bts, err = msgp.ReadNilBytes(bts) + if err != nil { + return + } + z.Replicate = nil + } else { + if z.Replicate == nil { + z.Replicate = new(BatchJobReplicateV1) + } + bts, err = z.Replicate.UnmarshalMsg(bts) + if err != nil { + err = msgp.WrapError(err, "Replicate") + return + } + } + default: + bts, err = msgp.Skip(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + o = bts + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *BatchJobRequest) Msgsize() (s int) { + s = 1 + 3 + msgp.StringPrefixSize + len(z.ID) + 5 + msgp.StringPrefixSize + len(z.User) + 8 + msgp.TimeSize + 9 + msgp.StringPrefixSize + len(z.Location) + 10 + if z.Replicate == nil { + s += msgp.NilSize + } else { + s += z.Replicate.Msgsize() + } + return +} + +// DecodeMsg implements msgp.Decodable +func (z *BatchReplicateFilter) DecodeMsg(dc *msgp.Reader) (err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, err = dc.ReadMapHeader() + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, err = dc.ReadMapKeyPtr() + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "NewerThan": + z.NewerThan, err = dc.ReadDuration() + if err != nil { + err = msgp.WrapError(err, "NewerThan") + return + } + case "OlderThan": + z.OlderThan, err = dc.ReadDuration() + if err != nil { + err = msgp.WrapError(err, "OlderThan") + return + } + case "CreatedAfter": + z.CreatedAfter, err = dc.ReadTime() + if err != nil { + err = msgp.WrapError(err, "CreatedAfter") + return + } + case "CreatedBefore": + z.CreatedBefore, err = dc.ReadTime() + if err != nil { + err = msgp.WrapError(err, "CreatedBefore") + return + } + case "Tags": + var zb0002 uint32 + zb0002, err = dc.ReadArrayHeader() + if err != nil { + err = msgp.WrapError(err, "Tags") + return + } + if cap(z.Tags) >= int(zb0002) { + z.Tags = (z.Tags)[:zb0002] + } else { + z.Tags = make([]BatchJobReplicateKV, zb0002) + } + for za0001 := range z.Tags { + var zb0003 uint32 + zb0003, err = dc.ReadMapHeader() + if err != nil { + err = msgp.WrapError(err, "Tags", za0001) + return + } + for zb0003 > 0 { + zb0003-- + field, err = dc.ReadMapKeyPtr() + if err != nil { + err = msgp.WrapError(err, "Tags", za0001) + return + } + switch msgp.UnsafeString(field) { + case "Key": + z.Tags[za0001].Key, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "Tags", za0001, "Key") + return + } + case "Value": + z.Tags[za0001].Value, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "Tags", za0001, "Value") + return + } + default: + err = dc.Skip() + if err != nil { + err = msgp.WrapError(err, "Tags", za0001) + return + } + } + } + } + case "Metadata": + var zb0004 uint32 + zb0004, err = dc.ReadArrayHeader() + if err != nil { + err = msgp.WrapError(err, "Metadata") + return + } + if cap(z.Metadata) >= int(zb0004) { + z.Metadata = (z.Metadata)[:zb0004] + } else { + z.Metadata = make([]BatchJobReplicateKV, zb0004) + } + for za0002 := range z.Metadata { + var zb0005 uint32 + zb0005, err = dc.ReadMapHeader() + if err != nil { + err = msgp.WrapError(err, "Metadata", za0002) + return + } + for zb0005 > 0 { + zb0005-- + field, err = dc.ReadMapKeyPtr() + if err != nil { + err = msgp.WrapError(err, "Metadata", za0002) + return + } + switch msgp.UnsafeString(field) { + case "Key": + z.Metadata[za0002].Key, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "Metadata", za0002, "Key") + return + } + case "Value": + z.Metadata[za0002].Value, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "Metadata", za0002, "Value") + return + } + default: + err = dc.Skip() + if err != nil { + err = msgp.WrapError(err, "Metadata", za0002) + return + } + } + } + } + default: + err = dc.Skip() + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + return +} + +// EncodeMsg implements msgp.Encodable +func (z *BatchReplicateFilter) EncodeMsg(en *msgp.Writer) (err error) { + // map header, size 6 + // write "NewerThan" + err = en.Append(0x86, 0xa9, 0x4e, 0x65, 0x77, 0x65, 0x72, 0x54, 0x68, 0x61, 0x6e) + if err != nil { + return + } + err = en.WriteDuration(z.NewerThan) + if err != nil { + err = msgp.WrapError(err, "NewerThan") + return + } + // write "OlderThan" + err = en.Append(0xa9, 0x4f, 0x6c, 0x64, 0x65, 0x72, 0x54, 0x68, 0x61, 0x6e) + if err != nil { + return + } + err = en.WriteDuration(z.OlderThan) + if err != nil { + err = msgp.WrapError(err, "OlderThan") + return + } + // write "CreatedAfter" + err = en.Append(0xac, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x41, 0x66, 0x74, 0x65, 0x72) + if err != nil { + return + } + err = en.WriteTime(z.CreatedAfter) + if err != nil { + err = msgp.WrapError(err, "CreatedAfter") + return + } + // write "CreatedBefore" + err = en.Append(0xad, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65) + if err != nil { + return + } + err = en.WriteTime(z.CreatedBefore) + if err != nil { + err = msgp.WrapError(err, "CreatedBefore") + return + } + // write "Tags" + err = en.Append(0xa4, 0x54, 0x61, 0x67, 0x73) + if err != nil { + return + } + err = en.WriteArrayHeader(uint32(len(z.Tags))) + if err != nil { + err = msgp.WrapError(err, "Tags") + return + } + for za0001 := range z.Tags { + // map header, size 2 + // write "Key" + err = en.Append(0x82, 0xa3, 0x4b, 0x65, 0x79) + if err != nil { + return + } + err = en.WriteString(z.Tags[za0001].Key) + if err != nil { + err = msgp.WrapError(err, "Tags", za0001, "Key") + return + } + // write "Value" + err = en.Append(0xa5, 0x56, 0x61, 0x6c, 0x75, 0x65) + if err != nil { + return + } + err = en.WriteString(z.Tags[za0001].Value) + if err != nil { + err = msgp.WrapError(err, "Tags", za0001, "Value") + return + } + } + // write "Metadata" + err = en.Append(0xa8, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61) + if err != nil { + return + } + err = en.WriteArrayHeader(uint32(len(z.Metadata))) + if err != nil { + err = msgp.WrapError(err, "Metadata") + return + } + for za0002 := range z.Metadata { + // map header, size 2 + // write "Key" + err = en.Append(0x82, 0xa3, 0x4b, 0x65, 0x79) + if err != nil { + return + } + err = en.WriteString(z.Metadata[za0002].Key) + if err != nil { + err = msgp.WrapError(err, "Metadata", za0002, "Key") + return + } + // write "Value" + err = en.Append(0xa5, 0x56, 0x61, 0x6c, 0x75, 0x65) + if err != nil { + return + } + err = en.WriteString(z.Metadata[za0002].Value) + if err != nil { + err = msgp.WrapError(err, "Metadata", za0002, "Value") + return + } + } + return +} + +// MarshalMsg implements msgp.Marshaler +func (z *BatchReplicateFilter) MarshalMsg(b []byte) (o []byte, err error) { + o = msgp.Require(b, z.Msgsize()) + // map header, size 6 + // string "NewerThan" + o = append(o, 0x86, 0xa9, 0x4e, 0x65, 0x77, 0x65, 0x72, 0x54, 0x68, 0x61, 0x6e) + o = msgp.AppendDuration(o, z.NewerThan) + // string "OlderThan" + o = append(o, 0xa9, 0x4f, 0x6c, 0x64, 0x65, 0x72, 0x54, 0x68, 0x61, 0x6e) + o = msgp.AppendDuration(o, z.OlderThan) + // string "CreatedAfter" + o = append(o, 0xac, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x41, 0x66, 0x74, 0x65, 0x72) + o = msgp.AppendTime(o, z.CreatedAfter) + // string "CreatedBefore" + o = append(o, 0xad, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65) + o = msgp.AppendTime(o, z.CreatedBefore) + // string "Tags" + o = append(o, 0xa4, 0x54, 0x61, 0x67, 0x73) + o = msgp.AppendArrayHeader(o, uint32(len(z.Tags))) + for za0001 := range z.Tags { + // map header, size 2 + // string "Key" + o = append(o, 0x82, 0xa3, 0x4b, 0x65, 0x79) + o = msgp.AppendString(o, z.Tags[za0001].Key) + // string "Value" + o = append(o, 0xa5, 0x56, 0x61, 0x6c, 0x75, 0x65) + o = msgp.AppendString(o, z.Tags[za0001].Value) + } + // string "Metadata" + o = append(o, 0xa8, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61) + o = msgp.AppendArrayHeader(o, uint32(len(z.Metadata))) + for za0002 := range z.Metadata { + // map header, size 2 + // string "Key" + o = append(o, 0x82, 0xa3, 0x4b, 0x65, 0x79) + o = msgp.AppendString(o, z.Metadata[za0002].Key) + // string "Value" + o = append(o, 0xa5, 0x56, 0x61, 0x6c, 0x75, 0x65) + o = msgp.AppendString(o, z.Metadata[za0002].Value) + } + return +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *BatchReplicateFilter) UnmarshalMsg(bts []byte) (o []byte, err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "NewerThan": + z.NewerThan, bts, err = msgp.ReadDurationBytes(bts) + if err != nil { + err = msgp.WrapError(err, "NewerThan") + return + } + case "OlderThan": + z.OlderThan, bts, err = msgp.ReadDurationBytes(bts) + if err != nil { + err = msgp.WrapError(err, "OlderThan") + return + } + case "CreatedAfter": + z.CreatedAfter, bts, err = msgp.ReadTimeBytes(bts) + if err != nil { + err = msgp.WrapError(err, "CreatedAfter") + return + } + case "CreatedBefore": + z.CreatedBefore, bts, err = msgp.ReadTimeBytes(bts) + if err != nil { + err = msgp.WrapError(err, "CreatedBefore") + return + } + case "Tags": + var zb0002 uint32 + zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Tags") + return + } + if cap(z.Tags) >= int(zb0002) { + z.Tags = (z.Tags)[:zb0002] + } else { + z.Tags = make([]BatchJobReplicateKV, zb0002) + } + for za0001 := range z.Tags { + var zb0003 uint32 + zb0003, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Tags", za0001) + return + } + for zb0003 > 0 { + zb0003-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err, "Tags", za0001) + return + } + switch msgp.UnsafeString(field) { + case "Key": + z.Tags[za0001].Key, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Tags", za0001, "Key") + return + } + case "Value": + z.Tags[za0001].Value, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Tags", za0001, "Value") + return + } + default: + bts, err = msgp.Skip(bts) + if err != nil { + err = msgp.WrapError(err, "Tags", za0001) + return + } + } + } + } + case "Metadata": + var zb0004 uint32 + zb0004, bts, err = msgp.ReadArrayHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Metadata") + return + } + if cap(z.Metadata) >= int(zb0004) { + z.Metadata = (z.Metadata)[:zb0004] + } else { + z.Metadata = make([]BatchJobReplicateKV, zb0004) + } + for za0002 := range z.Metadata { + var zb0005 uint32 + zb0005, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Metadata", za0002) + return + } + for zb0005 > 0 { + zb0005-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err, "Metadata", za0002) + return + } + switch msgp.UnsafeString(field) { + case "Key": + z.Metadata[za0002].Key, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Metadata", za0002, "Key") + return + } + case "Value": + z.Metadata[za0002].Value, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Metadata", za0002, "Value") + return + } + default: + bts, err = msgp.Skip(bts) + if err != nil { + err = msgp.WrapError(err, "Metadata", za0002) + return + } + } + } + } + default: + bts, err = msgp.Skip(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + o = bts + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *BatchReplicateFilter) Msgsize() (s int) { + s = 1 + 10 + msgp.DurationSize + 10 + msgp.DurationSize + 13 + msgp.TimeSize + 14 + msgp.TimeSize + 5 + msgp.ArrayHeaderSize + for za0001 := range z.Tags { + s += 1 + 4 + msgp.StringPrefixSize + len(z.Tags[za0001].Key) + 6 + msgp.StringPrefixSize + len(z.Tags[za0001].Value) + } + s += 9 + msgp.ArrayHeaderSize + for za0002 := range z.Metadata { + s += 1 + 4 + msgp.StringPrefixSize + len(z.Metadata[za0002].Key) + 6 + msgp.StringPrefixSize + len(z.Metadata[za0002].Value) + } + return +} + +// DecodeMsg implements msgp.Decodable +func (z *BatchReplicateNotification) DecodeMsg(dc *msgp.Reader) (err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, err = dc.ReadMapHeader() + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, err = dc.ReadMapKeyPtr() + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "Endpoint": + z.Endpoint, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "Endpoint") + return + } + case "Token": + z.Token, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "Token") + return + } + default: + err = dc.Skip() + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + return +} + +// EncodeMsg implements msgp.Encodable +func (z BatchReplicateNotification) EncodeMsg(en *msgp.Writer) (err error) { + // map header, size 2 + // write "Endpoint" + err = en.Append(0x82, 0xa8, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74) + if err != nil { + return + } + err = en.WriteString(z.Endpoint) + if err != nil { + err = msgp.WrapError(err, "Endpoint") + return + } + // write "Token" + err = en.Append(0xa5, 0x54, 0x6f, 0x6b, 0x65, 0x6e) + if err != nil { + return + } + err = en.WriteString(z.Token) + if err != nil { + err = msgp.WrapError(err, "Token") + return + } + return +} + +// MarshalMsg implements msgp.Marshaler +func (z BatchReplicateNotification) MarshalMsg(b []byte) (o []byte, err error) { + o = msgp.Require(b, z.Msgsize()) + // map header, size 2 + // string "Endpoint" + o = append(o, 0x82, 0xa8, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74) + o = msgp.AppendString(o, z.Endpoint) + // string "Token" + o = append(o, 0xa5, 0x54, 0x6f, 0x6b, 0x65, 0x6e) + o = msgp.AppendString(o, z.Token) + return +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *BatchReplicateNotification) UnmarshalMsg(bts []byte) (o []byte, err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "Endpoint": + z.Endpoint, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Endpoint") + return + } + case "Token": + z.Token, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Token") + return + } + default: + bts, err = msgp.Skip(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + o = bts + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z BatchReplicateNotification) Msgsize() (s int) { + s = 1 + 9 + msgp.StringPrefixSize + len(z.Endpoint) + 6 + msgp.StringPrefixSize + len(z.Token) + return +} + +// DecodeMsg implements msgp.Decodable +func (z *BatchReplicateRetry) DecodeMsg(dc *msgp.Reader) (err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, err = dc.ReadMapHeader() + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, err = dc.ReadMapKeyPtr() + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "Attempts": + z.Attempts, err = dc.ReadInt() + if err != nil { + err = msgp.WrapError(err, "Attempts") + return + } + case "Delay": + z.Delay, err = dc.ReadDuration() + if err != nil { + err = msgp.WrapError(err, "Delay") + return + } + default: + err = dc.Skip() + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + return +} + +// EncodeMsg implements msgp.Encodable +func (z BatchReplicateRetry) EncodeMsg(en *msgp.Writer) (err error) { + // map header, size 2 + // write "Attempts" + err = en.Append(0x82, 0xa8, 0x41, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x73) + if err != nil { + return + } + err = en.WriteInt(z.Attempts) + if err != nil { + err = msgp.WrapError(err, "Attempts") + return + } + // write "Delay" + err = en.Append(0xa5, 0x44, 0x65, 0x6c, 0x61, 0x79) + if err != nil { + return + } + err = en.WriteDuration(z.Delay) + if err != nil { + err = msgp.WrapError(err, "Delay") + return + } + return +} + +// MarshalMsg implements msgp.Marshaler +func (z BatchReplicateRetry) MarshalMsg(b []byte) (o []byte, err error) { + o = msgp.Require(b, z.Msgsize()) + // map header, size 2 + // string "Attempts" + o = append(o, 0x82, 0xa8, 0x41, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x73) + o = msgp.AppendInt(o, z.Attempts) + // string "Delay" + o = append(o, 0xa5, 0x44, 0x65, 0x6c, 0x61, 0x79) + o = msgp.AppendDuration(o, z.Delay) + return +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *BatchReplicateRetry) UnmarshalMsg(bts []byte) (o []byte, err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "Attempts": + z.Attempts, bts, err = msgp.ReadIntBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Attempts") + return + } + case "Delay": + z.Delay, bts, err = msgp.ReadDurationBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Delay") + return + } + default: + bts, err = msgp.Skip(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + o = bts + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z BatchReplicateRetry) Msgsize() (s int) { + s = 1 + 9 + msgp.IntSize + 6 + msgp.DurationSize + return +} + +// DecodeMsg implements msgp.Decodable +func (z *batchJobInfo) DecodeMsg(dc *msgp.Reader) (err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, err = dc.ReadMapHeader() + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, err = dc.ReadMapKeyPtr() + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "v": + z.Version, err = dc.ReadInt() + if err != nil { + err = msgp.WrapError(err, "Version") + return + } + case "jid": + z.JobID, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "JobID") + return + } + case "jt": + z.JobType, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "JobType") + return + } + case "st": + z.StartTime, err = dc.ReadTime() + if err != nil { + err = msgp.WrapError(err, "StartTime") + return + } + case "lu": + z.LastUpdate, err = dc.ReadTime() + if err != nil { + err = msgp.WrapError(err, "LastUpdate") + return + } + case "ra": + z.RetryAttempts, err = dc.ReadInt() + if err != nil { + err = msgp.WrapError(err, "RetryAttempts") + return + } + case "cmp": + z.Complete, err = dc.ReadBool() + if err != nil { + err = msgp.WrapError(err, "Complete") + return + } + case "fld": + z.Failed, err = dc.ReadBool() + if err != nil { + err = msgp.WrapError(err, "Failed") + return + } + case "lbkt": + z.Bucket, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "Bucket") + return + } + case "lobj": + z.Object, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "Object") + return + } + case "ob": + z.Objects, err = dc.ReadInt64() + if err != nil { + err = msgp.WrapError(err, "Objects") + return + } + case "obf": + z.ObjectsFailed, err = dc.ReadInt64() + if err != nil { + err = msgp.WrapError(err, "ObjectsFailed") + return + } + case "bt": + z.BytesTransferred, err = dc.ReadInt64() + if err != nil { + err = msgp.WrapError(err, "BytesTransferred") + return + } + case "bf": + z.BytesFailed, err = dc.ReadInt64() + if err != nil { + err = msgp.WrapError(err, "BytesFailed") + return + } + default: + err = dc.Skip() + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + return +} + +// EncodeMsg implements msgp.Encodable +func (z *batchJobInfo) EncodeMsg(en *msgp.Writer) (err error) { + // map header, size 14 + // write "v" + err = en.Append(0x8e, 0xa1, 0x76) + if err != nil { + return + } + err = en.WriteInt(z.Version) + if err != nil { + err = msgp.WrapError(err, "Version") + return + } + // write "jid" + err = en.Append(0xa3, 0x6a, 0x69, 0x64) + if err != nil { + return + } + err = en.WriteString(z.JobID) + if err != nil { + err = msgp.WrapError(err, "JobID") + return + } + // write "jt" + err = en.Append(0xa2, 0x6a, 0x74) + if err != nil { + return + } + err = en.WriteString(z.JobType) + if err != nil { + err = msgp.WrapError(err, "JobType") + return + } + // write "st" + err = en.Append(0xa2, 0x73, 0x74) + if err != nil { + return + } + err = en.WriteTime(z.StartTime) + if err != nil { + err = msgp.WrapError(err, "StartTime") + return + } + // write "lu" + err = en.Append(0xa2, 0x6c, 0x75) + if err != nil { + return + } + err = en.WriteTime(z.LastUpdate) + if err != nil { + err = msgp.WrapError(err, "LastUpdate") + return + } + // write "ra" + err = en.Append(0xa2, 0x72, 0x61) + if err != nil { + return + } + err = en.WriteInt(z.RetryAttempts) + if err != nil { + err = msgp.WrapError(err, "RetryAttempts") + return + } + // write "cmp" + err = en.Append(0xa3, 0x63, 0x6d, 0x70) + if err != nil { + return + } + err = en.WriteBool(z.Complete) + if err != nil { + err = msgp.WrapError(err, "Complete") + return + } + // write "fld" + err = en.Append(0xa3, 0x66, 0x6c, 0x64) + if err != nil { + return + } + err = en.WriteBool(z.Failed) + if err != nil { + err = msgp.WrapError(err, "Failed") + return + } + // write "lbkt" + err = en.Append(0xa4, 0x6c, 0x62, 0x6b, 0x74) + if err != nil { + return + } + err = en.WriteString(z.Bucket) + if err != nil { + err = msgp.WrapError(err, "Bucket") + return + } + // write "lobj" + err = en.Append(0xa4, 0x6c, 0x6f, 0x62, 0x6a) + if err != nil { + return + } + err = en.WriteString(z.Object) + if err != nil { + err = msgp.WrapError(err, "Object") + return + } + // write "ob" + err = en.Append(0xa2, 0x6f, 0x62) + if err != nil { + return + } + err = en.WriteInt64(z.Objects) + if err != nil { + err = msgp.WrapError(err, "Objects") + return + } + // write "obf" + err = en.Append(0xa3, 0x6f, 0x62, 0x66) + if err != nil { + return + } + err = en.WriteInt64(z.ObjectsFailed) + if err != nil { + err = msgp.WrapError(err, "ObjectsFailed") + return + } + // write "bt" + err = en.Append(0xa2, 0x62, 0x74) + if err != nil { + return + } + err = en.WriteInt64(z.BytesTransferred) + if err != nil { + err = msgp.WrapError(err, "BytesTransferred") + return + } + // write "bf" + err = en.Append(0xa2, 0x62, 0x66) + if err != nil { + return + } + err = en.WriteInt64(z.BytesFailed) + if err != nil { + err = msgp.WrapError(err, "BytesFailed") + return + } + return +} + +// MarshalMsg implements msgp.Marshaler +func (z *batchJobInfo) MarshalMsg(b []byte) (o []byte, err error) { + o = msgp.Require(b, z.Msgsize()) + // map header, size 14 + // string "v" + o = append(o, 0x8e, 0xa1, 0x76) + o = msgp.AppendInt(o, z.Version) + // string "jid" + o = append(o, 0xa3, 0x6a, 0x69, 0x64) + o = msgp.AppendString(o, z.JobID) + // string "jt" + o = append(o, 0xa2, 0x6a, 0x74) + o = msgp.AppendString(o, z.JobType) + // string "st" + o = append(o, 0xa2, 0x73, 0x74) + o = msgp.AppendTime(o, z.StartTime) + // string "lu" + o = append(o, 0xa2, 0x6c, 0x75) + o = msgp.AppendTime(o, z.LastUpdate) + // string "ra" + o = append(o, 0xa2, 0x72, 0x61) + o = msgp.AppendInt(o, z.RetryAttempts) + // string "cmp" + o = append(o, 0xa3, 0x63, 0x6d, 0x70) + o = msgp.AppendBool(o, z.Complete) + // string "fld" + o = append(o, 0xa3, 0x66, 0x6c, 0x64) + o = msgp.AppendBool(o, z.Failed) + // string "lbkt" + o = append(o, 0xa4, 0x6c, 0x62, 0x6b, 0x74) + o = msgp.AppendString(o, z.Bucket) + // string "lobj" + o = append(o, 0xa4, 0x6c, 0x6f, 0x62, 0x6a) + o = msgp.AppendString(o, z.Object) + // string "ob" + o = append(o, 0xa2, 0x6f, 0x62) + o = msgp.AppendInt64(o, z.Objects) + // string "obf" + o = append(o, 0xa3, 0x6f, 0x62, 0x66) + o = msgp.AppendInt64(o, z.ObjectsFailed) + // string "bt" + o = append(o, 0xa2, 0x62, 0x74) + o = msgp.AppendInt64(o, z.BytesTransferred) + // string "bf" + o = append(o, 0xa2, 0x62, 0x66) + o = msgp.AppendInt64(o, z.BytesFailed) + return +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *batchJobInfo) UnmarshalMsg(bts []byte) (o []byte, err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "v": + z.Version, bts, err = msgp.ReadIntBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Version") + return + } + case "jid": + z.JobID, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "JobID") + return + } + case "jt": + z.JobType, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "JobType") + return + } + case "st": + z.StartTime, bts, err = msgp.ReadTimeBytes(bts) + if err != nil { + err = msgp.WrapError(err, "StartTime") + return + } + case "lu": + z.LastUpdate, bts, err = msgp.ReadTimeBytes(bts) + if err != nil { + err = msgp.WrapError(err, "LastUpdate") + return + } + case "ra": + z.RetryAttempts, bts, err = msgp.ReadIntBytes(bts) + if err != nil { + err = msgp.WrapError(err, "RetryAttempts") + return + } + case "cmp": + z.Complete, bts, err = msgp.ReadBoolBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Complete") + return + } + case "fld": + z.Failed, bts, err = msgp.ReadBoolBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Failed") + return + } + case "lbkt": + z.Bucket, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Bucket") + return + } + case "lobj": + z.Object, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Object") + return + } + case "ob": + z.Objects, bts, err = msgp.ReadInt64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "Objects") + return + } + case "obf": + z.ObjectsFailed, bts, err = msgp.ReadInt64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "ObjectsFailed") + return + } + case "bt": + z.BytesTransferred, bts, err = msgp.ReadInt64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "BytesTransferred") + return + } + case "bf": + z.BytesFailed, bts, err = msgp.ReadInt64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "BytesFailed") + return + } + default: + bts, err = msgp.Skip(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + o = bts + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *batchJobInfo) Msgsize() (s int) { + s = 1 + 2 + msgp.IntSize + 4 + msgp.StringPrefixSize + len(z.JobID) + 3 + msgp.StringPrefixSize + len(z.JobType) + 3 + msgp.TimeSize + 3 + msgp.TimeSize + 3 + msgp.IntSize + 4 + msgp.BoolSize + 4 + msgp.BoolSize + 5 + msgp.StringPrefixSize + len(z.Bucket) + 5 + msgp.StringPrefixSize + len(z.Object) + 3 + msgp.Int64Size + 4 + msgp.Int64Size + 3 + msgp.Int64Size + 3 + msgp.Int64Size + return +} diff --git a/cmd/batch-handlers_gen_test.go b/cmd/batch-handlers_gen_test.go new file mode 100644 index 000000000..d9f7dfe4e --- /dev/null +++ b/cmd/batch-handlers_gen_test.go @@ -0,0 +1,1253 @@ +package cmd + +// Code generated by github.com/tinylib/msgp DO NOT EDIT. + +import ( + "bytes" + "testing" + + "github.com/tinylib/msgp/msgp" +) + +func TestMarshalUnmarshalBatchJobReplicateCredentials(t *testing.T) { + v := BatchJobReplicateCredentials{} + bts, err := v.MarshalMsg(nil) + if err != nil { + t.Fatal(err) + } + left, err := v.UnmarshalMsg(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left) + } + + left, err = msgp.Skip(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after Skip(): %q", len(left), left) + } +} + +func BenchmarkMarshalMsgBatchJobReplicateCredentials(b *testing.B) { + v := BatchJobReplicateCredentials{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalMsg(nil) + } +} + +func BenchmarkAppendMsgBatchJobReplicateCredentials(b *testing.B) { + v := BatchJobReplicateCredentials{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalMsg(bts[0:0]) + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalMsg(bts[0:0]) + } +} + +func BenchmarkUnmarshalBatchJobReplicateCredentials(b *testing.B) { + v := BatchJobReplicateCredentials{} + bts, _ := v.MarshalMsg(nil) + b.ReportAllocs() + b.SetBytes(int64(len(bts))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := v.UnmarshalMsg(bts) + if err != nil { + b.Fatal(err) + } + } +} + +func TestEncodeDecodeBatchJobReplicateCredentials(t *testing.T) { + v := BatchJobReplicateCredentials{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + + m := v.Msgsize() + if buf.Len() > m { + t.Log("WARNING: TestEncodeDecodeBatchJobReplicateCredentials Msgsize() is inaccurate") + } + + vn := BatchJobReplicateCredentials{} + err := msgp.Decode(&buf, &vn) + if err != nil { + t.Error(err) + } + + buf.Reset() + msgp.Encode(&buf, &v) + err = msgp.NewReader(&buf).Skip() + if err != nil { + t.Error(err) + } +} + +func BenchmarkEncodeBatchJobReplicateCredentials(b *testing.B) { + v := BatchJobReplicateCredentials{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + b.SetBytes(int64(buf.Len())) + en := msgp.NewWriter(msgp.Nowhere) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.EncodeMsg(en) + } + en.Flush() +} + +func BenchmarkDecodeBatchJobReplicateCredentials(b *testing.B) { + v := BatchJobReplicateCredentials{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + b.SetBytes(int64(buf.Len())) + rd := msgp.NewEndlessReader(buf.Bytes(), b) + dc := msgp.NewReader(rd) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + err := v.DecodeMsg(dc) + if err != nil { + b.Fatal(err) + } + } +} + +func TestMarshalUnmarshalBatchJobReplicateFlags(t *testing.T) { + v := BatchJobReplicateFlags{} + bts, err := v.MarshalMsg(nil) + if err != nil { + t.Fatal(err) + } + left, err := v.UnmarshalMsg(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left) + } + + left, err = msgp.Skip(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after Skip(): %q", len(left), left) + } +} + +func BenchmarkMarshalMsgBatchJobReplicateFlags(b *testing.B) { + v := BatchJobReplicateFlags{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalMsg(nil) + } +} + +func BenchmarkAppendMsgBatchJobReplicateFlags(b *testing.B) { + v := BatchJobReplicateFlags{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalMsg(bts[0:0]) + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalMsg(bts[0:0]) + } +} + +func BenchmarkUnmarshalBatchJobReplicateFlags(b *testing.B) { + v := BatchJobReplicateFlags{} + bts, _ := v.MarshalMsg(nil) + b.ReportAllocs() + b.SetBytes(int64(len(bts))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := v.UnmarshalMsg(bts) + if err != nil { + b.Fatal(err) + } + } +} + +func TestEncodeDecodeBatchJobReplicateFlags(t *testing.T) { + v := BatchJobReplicateFlags{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + + m := v.Msgsize() + if buf.Len() > m { + t.Log("WARNING: TestEncodeDecodeBatchJobReplicateFlags Msgsize() is inaccurate") + } + + vn := BatchJobReplicateFlags{} + err := msgp.Decode(&buf, &vn) + if err != nil { + t.Error(err) + } + + buf.Reset() + msgp.Encode(&buf, &v) + err = msgp.NewReader(&buf).Skip() + if err != nil { + t.Error(err) + } +} + +func BenchmarkEncodeBatchJobReplicateFlags(b *testing.B) { + v := BatchJobReplicateFlags{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + b.SetBytes(int64(buf.Len())) + en := msgp.NewWriter(msgp.Nowhere) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.EncodeMsg(en) + } + en.Flush() +} + +func BenchmarkDecodeBatchJobReplicateFlags(b *testing.B) { + v := BatchJobReplicateFlags{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + b.SetBytes(int64(buf.Len())) + rd := msgp.NewEndlessReader(buf.Bytes(), b) + dc := msgp.NewReader(rd) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + err := v.DecodeMsg(dc) + if err != nil { + b.Fatal(err) + } + } +} + +func TestMarshalUnmarshalBatchJobReplicateKV(t *testing.T) { + v := BatchJobReplicateKV{} + bts, err := v.MarshalMsg(nil) + if err != nil { + t.Fatal(err) + } + left, err := v.UnmarshalMsg(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left) + } + + left, err = msgp.Skip(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after Skip(): %q", len(left), left) + } +} + +func BenchmarkMarshalMsgBatchJobReplicateKV(b *testing.B) { + v := BatchJobReplicateKV{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalMsg(nil) + } +} + +func BenchmarkAppendMsgBatchJobReplicateKV(b *testing.B) { + v := BatchJobReplicateKV{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalMsg(bts[0:0]) + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalMsg(bts[0:0]) + } +} + +func BenchmarkUnmarshalBatchJobReplicateKV(b *testing.B) { + v := BatchJobReplicateKV{} + bts, _ := v.MarshalMsg(nil) + b.ReportAllocs() + b.SetBytes(int64(len(bts))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := v.UnmarshalMsg(bts) + if err != nil { + b.Fatal(err) + } + } +} + +func TestEncodeDecodeBatchJobReplicateKV(t *testing.T) { + v := BatchJobReplicateKV{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + + m := v.Msgsize() + if buf.Len() > m { + t.Log("WARNING: TestEncodeDecodeBatchJobReplicateKV Msgsize() is inaccurate") + } + + vn := BatchJobReplicateKV{} + err := msgp.Decode(&buf, &vn) + if err != nil { + t.Error(err) + } + + buf.Reset() + msgp.Encode(&buf, &v) + err = msgp.NewReader(&buf).Skip() + if err != nil { + t.Error(err) + } +} + +func BenchmarkEncodeBatchJobReplicateKV(b *testing.B) { + v := BatchJobReplicateKV{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + b.SetBytes(int64(buf.Len())) + en := msgp.NewWriter(msgp.Nowhere) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.EncodeMsg(en) + } + en.Flush() +} + +func BenchmarkDecodeBatchJobReplicateKV(b *testing.B) { + v := BatchJobReplicateKV{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + b.SetBytes(int64(buf.Len())) + rd := msgp.NewEndlessReader(buf.Bytes(), b) + dc := msgp.NewReader(rd) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + err := v.DecodeMsg(dc) + if err != nil { + b.Fatal(err) + } + } +} + +func TestMarshalUnmarshalBatchJobReplicateSource(t *testing.T) { + v := BatchJobReplicateSource{} + bts, err := v.MarshalMsg(nil) + if err != nil { + t.Fatal(err) + } + left, err := v.UnmarshalMsg(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left) + } + + left, err = msgp.Skip(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after Skip(): %q", len(left), left) + } +} + +func BenchmarkMarshalMsgBatchJobReplicateSource(b *testing.B) { + v := BatchJobReplicateSource{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalMsg(nil) + } +} + +func BenchmarkAppendMsgBatchJobReplicateSource(b *testing.B) { + v := BatchJobReplicateSource{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalMsg(bts[0:0]) + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalMsg(bts[0:0]) + } +} + +func BenchmarkUnmarshalBatchJobReplicateSource(b *testing.B) { + v := BatchJobReplicateSource{} + bts, _ := v.MarshalMsg(nil) + b.ReportAllocs() + b.SetBytes(int64(len(bts))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := v.UnmarshalMsg(bts) + if err != nil { + b.Fatal(err) + } + } +} + +func TestEncodeDecodeBatchJobReplicateSource(t *testing.T) { + v := BatchJobReplicateSource{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + + m := v.Msgsize() + if buf.Len() > m { + t.Log("WARNING: TestEncodeDecodeBatchJobReplicateSource Msgsize() is inaccurate") + } + + vn := BatchJobReplicateSource{} + err := msgp.Decode(&buf, &vn) + if err != nil { + t.Error(err) + } + + buf.Reset() + msgp.Encode(&buf, &v) + err = msgp.NewReader(&buf).Skip() + if err != nil { + t.Error(err) + } +} + +func BenchmarkEncodeBatchJobReplicateSource(b *testing.B) { + v := BatchJobReplicateSource{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + b.SetBytes(int64(buf.Len())) + en := msgp.NewWriter(msgp.Nowhere) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.EncodeMsg(en) + } + en.Flush() +} + +func BenchmarkDecodeBatchJobReplicateSource(b *testing.B) { + v := BatchJobReplicateSource{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + b.SetBytes(int64(buf.Len())) + rd := msgp.NewEndlessReader(buf.Bytes(), b) + dc := msgp.NewReader(rd) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + err := v.DecodeMsg(dc) + if err != nil { + b.Fatal(err) + } + } +} + +func TestMarshalUnmarshalBatchJobReplicateTarget(t *testing.T) { + v := BatchJobReplicateTarget{} + bts, err := v.MarshalMsg(nil) + if err != nil { + t.Fatal(err) + } + left, err := v.UnmarshalMsg(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left) + } + + left, err = msgp.Skip(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after Skip(): %q", len(left), left) + } +} + +func BenchmarkMarshalMsgBatchJobReplicateTarget(b *testing.B) { + v := BatchJobReplicateTarget{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalMsg(nil) + } +} + +func BenchmarkAppendMsgBatchJobReplicateTarget(b *testing.B) { + v := BatchJobReplicateTarget{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalMsg(bts[0:0]) + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalMsg(bts[0:0]) + } +} + +func BenchmarkUnmarshalBatchJobReplicateTarget(b *testing.B) { + v := BatchJobReplicateTarget{} + bts, _ := v.MarshalMsg(nil) + b.ReportAllocs() + b.SetBytes(int64(len(bts))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := v.UnmarshalMsg(bts) + if err != nil { + b.Fatal(err) + } + } +} + +func TestEncodeDecodeBatchJobReplicateTarget(t *testing.T) { + v := BatchJobReplicateTarget{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + + m := v.Msgsize() + if buf.Len() > m { + t.Log("WARNING: TestEncodeDecodeBatchJobReplicateTarget Msgsize() is inaccurate") + } + + vn := BatchJobReplicateTarget{} + err := msgp.Decode(&buf, &vn) + if err != nil { + t.Error(err) + } + + buf.Reset() + msgp.Encode(&buf, &v) + err = msgp.NewReader(&buf).Skip() + if err != nil { + t.Error(err) + } +} + +func BenchmarkEncodeBatchJobReplicateTarget(b *testing.B) { + v := BatchJobReplicateTarget{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + b.SetBytes(int64(buf.Len())) + en := msgp.NewWriter(msgp.Nowhere) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.EncodeMsg(en) + } + en.Flush() +} + +func BenchmarkDecodeBatchJobReplicateTarget(b *testing.B) { + v := BatchJobReplicateTarget{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + b.SetBytes(int64(buf.Len())) + rd := msgp.NewEndlessReader(buf.Bytes(), b) + dc := msgp.NewReader(rd) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + err := v.DecodeMsg(dc) + if err != nil { + b.Fatal(err) + } + } +} + +func TestMarshalUnmarshalBatchJobReplicateV1(t *testing.T) { + v := BatchJobReplicateV1{} + bts, err := v.MarshalMsg(nil) + if err != nil { + t.Fatal(err) + } + left, err := v.UnmarshalMsg(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left) + } + + left, err = msgp.Skip(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after Skip(): %q", len(left), left) + } +} + +func BenchmarkMarshalMsgBatchJobReplicateV1(b *testing.B) { + v := BatchJobReplicateV1{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalMsg(nil) + } +} + +func BenchmarkAppendMsgBatchJobReplicateV1(b *testing.B) { + v := BatchJobReplicateV1{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalMsg(bts[0:0]) + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalMsg(bts[0:0]) + } +} + +func BenchmarkUnmarshalBatchJobReplicateV1(b *testing.B) { + v := BatchJobReplicateV1{} + bts, _ := v.MarshalMsg(nil) + b.ReportAllocs() + b.SetBytes(int64(len(bts))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := v.UnmarshalMsg(bts) + if err != nil { + b.Fatal(err) + } + } +} + +func TestEncodeDecodeBatchJobReplicateV1(t *testing.T) { + v := BatchJobReplicateV1{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + + m := v.Msgsize() + if buf.Len() > m { + t.Log("WARNING: TestEncodeDecodeBatchJobReplicateV1 Msgsize() is inaccurate") + } + + vn := BatchJobReplicateV1{} + err := msgp.Decode(&buf, &vn) + if err != nil { + t.Error(err) + } + + buf.Reset() + msgp.Encode(&buf, &v) + err = msgp.NewReader(&buf).Skip() + if err != nil { + t.Error(err) + } +} + +func BenchmarkEncodeBatchJobReplicateV1(b *testing.B) { + v := BatchJobReplicateV1{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + b.SetBytes(int64(buf.Len())) + en := msgp.NewWriter(msgp.Nowhere) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.EncodeMsg(en) + } + en.Flush() +} + +func BenchmarkDecodeBatchJobReplicateV1(b *testing.B) { + v := BatchJobReplicateV1{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + b.SetBytes(int64(buf.Len())) + rd := msgp.NewEndlessReader(buf.Bytes(), b) + dc := msgp.NewReader(rd) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + err := v.DecodeMsg(dc) + if err != nil { + b.Fatal(err) + } + } +} + +func TestMarshalUnmarshalBatchJobRequest(t *testing.T) { + v := BatchJobRequest{} + bts, err := v.MarshalMsg(nil) + if err != nil { + t.Fatal(err) + } + left, err := v.UnmarshalMsg(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left) + } + + left, err = msgp.Skip(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after Skip(): %q", len(left), left) + } +} + +func BenchmarkMarshalMsgBatchJobRequest(b *testing.B) { + v := BatchJobRequest{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalMsg(nil) + } +} + +func BenchmarkAppendMsgBatchJobRequest(b *testing.B) { + v := BatchJobRequest{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalMsg(bts[0:0]) + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalMsg(bts[0:0]) + } +} + +func BenchmarkUnmarshalBatchJobRequest(b *testing.B) { + v := BatchJobRequest{} + bts, _ := v.MarshalMsg(nil) + b.ReportAllocs() + b.SetBytes(int64(len(bts))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := v.UnmarshalMsg(bts) + if err != nil { + b.Fatal(err) + } + } +} + +func TestEncodeDecodeBatchJobRequest(t *testing.T) { + v := BatchJobRequest{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + + m := v.Msgsize() + if buf.Len() > m { + t.Log("WARNING: TestEncodeDecodeBatchJobRequest Msgsize() is inaccurate") + } + + vn := BatchJobRequest{} + err := msgp.Decode(&buf, &vn) + if err != nil { + t.Error(err) + } + + buf.Reset() + msgp.Encode(&buf, &v) + err = msgp.NewReader(&buf).Skip() + if err != nil { + t.Error(err) + } +} + +func BenchmarkEncodeBatchJobRequest(b *testing.B) { + v := BatchJobRequest{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + b.SetBytes(int64(buf.Len())) + en := msgp.NewWriter(msgp.Nowhere) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.EncodeMsg(en) + } + en.Flush() +} + +func BenchmarkDecodeBatchJobRequest(b *testing.B) { + v := BatchJobRequest{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + b.SetBytes(int64(buf.Len())) + rd := msgp.NewEndlessReader(buf.Bytes(), b) + dc := msgp.NewReader(rd) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + err := v.DecodeMsg(dc) + if err != nil { + b.Fatal(err) + } + } +} + +func TestMarshalUnmarshalBatchReplicateFilter(t *testing.T) { + v := BatchReplicateFilter{} + bts, err := v.MarshalMsg(nil) + if err != nil { + t.Fatal(err) + } + left, err := v.UnmarshalMsg(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left) + } + + left, err = msgp.Skip(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after Skip(): %q", len(left), left) + } +} + +func BenchmarkMarshalMsgBatchReplicateFilter(b *testing.B) { + v := BatchReplicateFilter{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalMsg(nil) + } +} + +func BenchmarkAppendMsgBatchReplicateFilter(b *testing.B) { + v := BatchReplicateFilter{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalMsg(bts[0:0]) + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalMsg(bts[0:0]) + } +} + +func BenchmarkUnmarshalBatchReplicateFilter(b *testing.B) { + v := BatchReplicateFilter{} + bts, _ := v.MarshalMsg(nil) + b.ReportAllocs() + b.SetBytes(int64(len(bts))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := v.UnmarshalMsg(bts) + if err != nil { + b.Fatal(err) + } + } +} + +func TestEncodeDecodeBatchReplicateFilter(t *testing.T) { + v := BatchReplicateFilter{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + + m := v.Msgsize() + if buf.Len() > m { + t.Log("WARNING: TestEncodeDecodeBatchReplicateFilter Msgsize() is inaccurate") + } + + vn := BatchReplicateFilter{} + err := msgp.Decode(&buf, &vn) + if err != nil { + t.Error(err) + } + + buf.Reset() + msgp.Encode(&buf, &v) + err = msgp.NewReader(&buf).Skip() + if err != nil { + t.Error(err) + } +} + +func BenchmarkEncodeBatchReplicateFilter(b *testing.B) { + v := BatchReplicateFilter{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + b.SetBytes(int64(buf.Len())) + en := msgp.NewWriter(msgp.Nowhere) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.EncodeMsg(en) + } + en.Flush() +} + +func BenchmarkDecodeBatchReplicateFilter(b *testing.B) { + v := BatchReplicateFilter{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + b.SetBytes(int64(buf.Len())) + rd := msgp.NewEndlessReader(buf.Bytes(), b) + dc := msgp.NewReader(rd) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + err := v.DecodeMsg(dc) + if err != nil { + b.Fatal(err) + } + } +} + +func TestMarshalUnmarshalBatchReplicateNotification(t *testing.T) { + v := BatchReplicateNotification{} + bts, err := v.MarshalMsg(nil) + if err != nil { + t.Fatal(err) + } + left, err := v.UnmarshalMsg(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left) + } + + left, err = msgp.Skip(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after Skip(): %q", len(left), left) + } +} + +func BenchmarkMarshalMsgBatchReplicateNotification(b *testing.B) { + v := BatchReplicateNotification{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalMsg(nil) + } +} + +func BenchmarkAppendMsgBatchReplicateNotification(b *testing.B) { + v := BatchReplicateNotification{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalMsg(bts[0:0]) + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalMsg(bts[0:0]) + } +} + +func BenchmarkUnmarshalBatchReplicateNotification(b *testing.B) { + v := BatchReplicateNotification{} + bts, _ := v.MarshalMsg(nil) + b.ReportAllocs() + b.SetBytes(int64(len(bts))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := v.UnmarshalMsg(bts) + if err != nil { + b.Fatal(err) + } + } +} + +func TestEncodeDecodeBatchReplicateNotification(t *testing.T) { + v := BatchReplicateNotification{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + + m := v.Msgsize() + if buf.Len() > m { + t.Log("WARNING: TestEncodeDecodeBatchReplicateNotification Msgsize() is inaccurate") + } + + vn := BatchReplicateNotification{} + err := msgp.Decode(&buf, &vn) + if err != nil { + t.Error(err) + } + + buf.Reset() + msgp.Encode(&buf, &v) + err = msgp.NewReader(&buf).Skip() + if err != nil { + t.Error(err) + } +} + +func BenchmarkEncodeBatchReplicateNotification(b *testing.B) { + v := BatchReplicateNotification{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + b.SetBytes(int64(buf.Len())) + en := msgp.NewWriter(msgp.Nowhere) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.EncodeMsg(en) + } + en.Flush() +} + +func BenchmarkDecodeBatchReplicateNotification(b *testing.B) { + v := BatchReplicateNotification{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + b.SetBytes(int64(buf.Len())) + rd := msgp.NewEndlessReader(buf.Bytes(), b) + dc := msgp.NewReader(rd) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + err := v.DecodeMsg(dc) + if err != nil { + b.Fatal(err) + } + } +} + +func TestMarshalUnmarshalBatchReplicateRetry(t *testing.T) { + v := BatchReplicateRetry{} + bts, err := v.MarshalMsg(nil) + if err != nil { + t.Fatal(err) + } + left, err := v.UnmarshalMsg(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left) + } + + left, err = msgp.Skip(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after Skip(): %q", len(left), left) + } +} + +func BenchmarkMarshalMsgBatchReplicateRetry(b *testing.B) { + v := BatchReplicateRetry{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalMsg(nil) + } +} + +func BenchmarkAppendMsgBatchReplicateRetry(b *testing.B) { + v := BatchReplicateRetry{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalMsg(bts[0:0]) + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalMsg(bts[0:0]) + } +} + +func BenchmarkUnmarshalBatchReplicateRetry(b *testing.B) { + v := BatchReplicateRetry{} + bts, _ := v.MarshalMsg(nil) + b.ReportAllocs() + b.SetBytes(int64(len(bts))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := v.UnmarshalMsg(bts) + if err != nil { + b.Fatal(err) + } + } +} + +func TestEncodeDecodeBatchReplicateRetry(t *testing.T) { + v := BatchReplicateRetry{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + + m := v.Msgsize() + if buf.Len() > m { + t.Log("WARNING: TestEncodeDecodeBatchReplicateRetry Msgsize() is inaccurate") + } + + vn := BatchReplicateRetry{} + err := msgp.Decode(&buf, &vn) + if err != nil { + t.Error(err) + } + + buf.Reset() + msgp.Encode(&buf, &v) + err = msgp.NewReader(&buf).Skip() + if err != nil { + t.Error(err) + } +} + +func BenchmarkEncodeBatchReplicateRetry(b *testing.B) { + v := BatchReplicateRetry{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + b.SetBytes(int64(buf.Len())) + en := msgp.NewWriter(msgp.Nowhere) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.EncodeMsg(en) + } + en.Flush() +} + +func BenchmarkDecodeBatchReplicateRetry(b *testing.B) { + v := BatchReplicateRetry{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + b.SetBytes(int64(buf.Len())) + rd := msgp.NewEndlessReader(buf.Bytes(), b) + dc := msgp.NewReader(rd) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + err := v.DecodeMsg(dc) + if err != nil { + b.Fatal(err) + } + } +} + +func TestMarshalUnmarshalbatchJobInfo(t *testing.T) { + v := batchJobInfo{} + bts, err := v.MarshalMsg(nil) + if err != nil { + t.Fatal(err) + } + left, err := v.UnmarshalMsg(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left) + } + + left, err = msgp.Skip(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after Skip(): %q", len(left), left) + } +} + +func BenchmarkMarshalMsgbatchJobInfo(b *testing.B) { + v := batchJobInfo{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalMsg(nil) + } +} + +func BenchmarkAppendMsgbatchJobInfo(b *testing.B) { + v := batchJobInfo{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalMsg(bts[0:0]) + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalMsg(bts[0:0]) + } +} + +func BenchmarkUnmarshalbatchJobInfo(b *testing.B) { + v := batchJobInfo{} + bts, _ := v.MarshalMsg(nil) + b.ReportAllocs() + b.SetBytes(int64(len(bts))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := v.UnmarshalMsg(bts) + if err != nil { + b.Fatal(err) + } + } +} + +func TestEncodeDecodebatchJobInfo(t *testing.T) { + v := batchJobInfo{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + + m := v.Msgsize() + if buf.Len() > m { + t.Log("WARNING: TestEncodeDecodebatchJobInfo Msgsize() is inaccurate") + } + + vn := batchJobInfo{} + err := msgp.Decode(&buf, &vn) + if err != nil { + t.Error(err) + } + + buf.Reset() + msgp.Encode(&buf, &v) + err = msgp.NewReader(&buf).Skip() + if err != nil { + t.Error(err) + } +} + +func BenchmarkEncodebatchJobInfo(b *testing.B) { + v := batchJobInfo{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + b.SetBytes(int64(buf.Len())) + en := msgp.NewWriter(msgp.Nowhere) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.EncodeMsg(en) + } + en.Flush() +} + +func BenchmarkDecodebatchJobInfo(b *testing.B) { + v := batchJobInfo{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + b.SetBytes(int64(buf.Len())) + rd := msgp.NewEndlessReader(buf.Bytes(), b) + dc := msgp.NewReader(rd) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + err := v.DecodeMsg(dc) + if err != nil { + b.Fatal(err) + } + } +} diff --git a/cmd/batchreplicationmetric_string.go b/cmd/batchreplicationmetric_string.go new file mode 100644 index 000000000..84631e383 --- /dev/null +++ b/cmd/batchreplicationmetric_string.go @@ -0,0 +1,23 @@ +// Code generated by "stringer -type=batchReplicationMetric -trimprefix=batchReplicationMetric batch-handlers.go"; DO NOT EDIT. + +package cmd + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[batchReplicationMetricObject-0] +} + +const _batchReplicationMetric_name = "Object" + +var _batchReplicationMetric_index = [...]uint8{0, 6} + +func (i batchReplicationMetric) String() string { + if i >= batchReplicationMetric(len(_batchReplicationMetric_index)-1) { + return "batchReplicationMetric(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _batchReplicationMetric_name[_batchReplicationMetric_index[i]:_batchReplicationMetric_index[i+1]] +} diff --git a/cmd/erasure-server-pool.go b/cmd/erasure-server-pool.go index b087b8893..88f2cc16a 100644 --- a/cmd/erasure-server-pool.go +++ b/cmd/erasure-server-pool.go @@ -1875,12 +1875,22 @@ func (z *erasureServerPools) Walk(ctx context.Context, bucket, prefix string, re versionsSorter(fivs.Versions).reverse() for _, version := range fivs.Versions { + send := true + if opts.WalkFilter != nil && !opts.WalkFilter(version) { + send = false + } + + if !send { + continue + } + versioned := vcfg != nil && vcfg.Versioned(version.Name) + objInfo := version.ToObjectInfo(bucket, version.Name, versioned) select { case <-ctx.Done(): return - case results <- version.ToObjectInfo(bucket, version.Name, versioned): + case results <- objInfo: } } } @@ -1904,7 +1914,7 @@ func (z *erasureServerPools) Walk(ctx context.Context, bucket, prefix string, re path: path, filterPrefix: filterPrefix, recursive: true, - forwardTo: "", + forwardTo: opts.WalkMarker, minDisks: 1, reportNotFound: false, agreed: loadEntry, diff --git a/cmd/erasure-single-drive.go b/cmd/erasure-single-drive.go index 5a19487f7..af9fe6c6c 100644 --- a/cmd/erasure-single-drive.go +++ b/cmd/erasure-single-drive.go @@ -3050,12 +3050,22 @@ func (es *erasureSingle) Walk(ctx context.Context, bucket, prefix string, result versionsSorter(fivs.Versions).reverse() for _, version := range fivs.Versions { + send := true + if opts.WalkFilter != nil && !opts.WalkFilter(version) { + send = false + } + + if !send { + continue + } + versioned := vcfg != nil && vcfg.Versioned(version.Name) + objInfo := version.ToObjectInfo(bucket, version.Name, versioned) select { case <-ctx.Done(): return - case results <- version.ToObjectInfo(bucket, version.Name, versioned): + case results <- objInfo: } } } @@ -3079,7 +3089,7 @@ func (es *erasureSingle) Walk(ctx context.Context, bucket, prefix string, result path: path, filterPrefix: filterPrefix, recursive: true, - forwardTo: "", + forwardTo: opts.WalkMarker, minDisks: 1, reportNotFound: false, agreed: loadEntry, diff --git a/cmd/metrics-realtime.go b/cmd/metrics-realtime.go index 4f948da23..c43111c5e 100644 --- a/cmd/metrics-realtime.go +++ b/cmd/metrics-realtime.go @@ -25,23 +25,29 @@ import ( "github.com/minio/minio/internal/disk" ) -func collectLocalMetrics(types madmin.MetricType, hosts map[string]struct{}, disks map[string]struct{}) (m madmin.RealtimeMetrics) { +type collectMetricsOpts struct { + hosts map[string]struct{} + disks map[string]struct{} + jobID string +} + +func collectLocalMetrics(types madmin.MetricType, opts collectMetricsOpts) (m madmin.RealtimeMetrics) { if types == madmin.MetricsNone { return } - if len(hosts) > 0 { - if _, ok := hosts[globalMinioAddr]; !ok { + if len(opts.hosts) > 0 { + if _, ok := opts.hosts[globalMinioAddr]; !ok { return } } - if types.Contains(madmin.MetricsDisk) && !globalIsGateway { + if types.Contains(madmin.MetricsDisk) { m.ByDisk = make(map[string]madmin.DiskMetric) aggr := madmin.DiskMetric{ CollectedAt: time.Now(), } - for name, disk := range collectLocalDisksMetrics(disks) { + for name, disk := range collectLocalDisksMetrics(opts.disks) { m.ByDisk[name] = disk aggr.Merge(&disk) } @@ -56,6 +62,10 @@ func collectLocalMetrics(types madmin.MetricType, hosts map[string]struct{}, dis metrics := globalOSMetrics.report() m.Aggregated.OS = &metrics } + if types.Contains(madmin.MetricsBatchJobs) { + m.Aggregated.BatchJobs = globalBatchJobsMetrics.report(opts.jobID) + } + // Add types... // ByHost is a shallow reference, so careful about sharing. @@ -143,11 +153,11 @@ func collectLocalDisksMetrics(disks map[string]struct{}) map[string]madmin.DiskM return metrics } -func collectRemoteMetrics(ctx context.Context, types madmin.MetricType, hosts map[string]struct{}, disks map[string]struct{}) (m madmin.RealtimeMetrics) { +func collectRemoteMetrics(ctx context.Context, types madmin.MetricType, opts collectMetricsOpts) (m madmin.RealtimeMetrics) { if !globalIsDistErasure { return } - all := globalNotificationSys.GetMetrics(ctx, types, hosts, disks) + all := globalNotificationSys.GetMetrics(ctx, types, opts) for _, remote := range all { m.Merge(&remote) } diff --git a/cmd/notification.go b/cmd/notification.go index 2474d5934..2e34f1c7d 100644 --- a/cmd/notification.go +++ b/cmd/notification.go @@ -731,7 +731,7 @@ func (sys *NotificationSys) GetOSInfo(ctx context.Context) []madmin.OSInfo { } // GetMetrics - Get metrics from all peers. -func (sys *NotificationSys) GetMetrics(ctx context.Context, t madmin.MetricType, hosts map[string]struct{}, disks map[string]struct{}) []madmin.RealtimeMetrics { +func (sys *NotificationSys) GetMetrics(ctx context.Context, t madmin.MetricType, opts collectMetricsOpts) []madmin.RealtimeMetrics { reply := make([]madmin.RealtimeMetrics, len(sys.peerClients)) g := errgroup.WithNErrs(len(sys.peerClients)) @@ -740,8 +740,8 @@ func (sys *NotificationSys) GetMetrics(ctx context.Context, t madmin.MetricType, continue } host := client.host.String() - if len(hosts) > 0 { - if _, ok := hosts[host]; !ok { + if len(opts.hosts) > 0 { + if _, ok := opts.hosts[host]; !ok { continue } } @@ -749,7 +749,7 @@ func (sys *NotificationSys) GetMetrics(ctx context.Context, t madmin.MetricType, index := index g.Go(func() error { var err error - reply[index], err = sys.peerClients[index].GetMetrics(ctx, t, disks) + reply[index], err = sys.peerClients[index].GetMetrics(ctx, t, opts) return err }, index) } diff --git a/cmd/object-api-interface.go b/cmd/object-api-interface.go index ef4e36230..2c2673a70 100644 --- a/cmd/object-api-interface.go +++ b/cmd/object-api-interface.go @@ -85,6 +85,8 @@ type ObjectOptions struct { // mainly set for certain WRITE operations. SkipDecommissioned bool + WalkFilter func(info FileInfo) bool // return WalkFilter returns 'true/false' + WalkMarker string // set to skip until this object PrefixEnabledFn func(prefix string) bool // function which returns true if versioning is enabled on prefix // IndexCB will return any index created but the compression. diff --git a/cmd/peer-rest-client.go b/cmd/peer-rest-client.go index 03e91e04f..943bc7215 100644 --- a/cmd/peer-rest-client.go +++ b/cmd/peer-rest-client.go @@ -195,12 +195,13 @@ func (client *peerRESTClient) GetMemInfo(ctx context.Context) (info madmin.MemIn } // GetMetrics - fetch metrics from a remote node. -func (client *peerRESTClient) GetMetrics(ctx context.Context, t madmin.MetricType, diskMap map[string]struct{}) (info madmin.RealtimeMetrics, err error) { +func (client *peerRESTClient) GetMetrics(ctx context.Context, t madmin.MetricType, opts collectMetricsOpts) (info madmin.RealtimeMetrics, err error) { values := make(url.Values) - values.Set(peerRESTTypes, strconv.FormatUint(uint64(t), 10)) - for disk := range diskMap { + values.Set(peerRESTMetricsTypes, strconv.FormatUint(uint64(t), 10)) + for disk := range opts.disks { values.Set(peerRESTDisk, disk) } + values.Set(peerRESTJobID, opts.jobID) respBody, err := client.callWithContext(ctx, peerRESTMethodMetrics, values, nil, -1) if err != nil { return diff --git a/cmd/peer-rest-common.go b/cmd/peer-rest-common.go index f0ba0c89b..28901c4bc 100644 --- a/cmd/peer-rest-common.go +++ b/cmd/peer-rest-common.go @@ -91,8 +91,9 @@ const ( peerRESTConcurrent = "concurrent" peerRESTDuration = "duration" peerRESTStorageClass = "storage-class" - peerRESTTypes = "types" + peerRESTMetricsTypes = "types" peerRESTDisk = "disk" + peerRESTJobID = "job-id" peerRESTListenBucket = "bucket" peerRESTListenPrefix = "prefix" diff --git a/cmd/peer-rest-server.go b/cmd/peer-rest-server.go index 27764865b..0af707fcc 100644 --- a/cmd/peer-rest-server.go +++ b/cmd/peer-rest-server.go @@ -425,23 +425,25 @@ func (s *peerRESTServer) GetMetricsHandler(w http.ResponseWriter, r *http.Reques } var types madmin.MetricType - if t, _ := strconv.ParseUint(r.Form.Get(peerRESTTypes), 10, 64); t != 0 { + if t, _ := strconv.ParseUint(r.Form.Get(peerRESTMetricsTypes), 10, 64); t != 0 { types = madmin.MetricType(t) } else { types = madmin.MetricsAll } diskMap := make(map[string]struct{}) - if r.Form != nil { - for _, disk := range r.Form[peerRESTDisk] { - diskMap[disk] = struct{}{} - } + for _, disk := range r.Form[peerRESTDisk] { + diskMap[disk] = struct{}{} } + jobID := r.Form.Get(peerRESTJobID) ctx, cancel := context.WithCancel(r.Context()) defer cancel() - info := collectLocalMetrics(types, nil, diskMap) + info := collectLocalMetrics(types, collectMetricsOpts{ + disks: diskMap, + jobID: jobID, + }) logger.LogIf(ctx, gob.NewEncoder(w).Encode(info)) } @@ -1308,7 +1310,7 @@ func registerPeerRESTHandlers(router *mux.Router) { subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodServerInfo).HandlerFunc(httpTraceHdrs(server.ServerInfoHandler)) subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodProcInfo).HandlerFunc(httpTraceHdrs(server.GetProcInfoHandler)) subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodMemInfo).HandlerFunc(httpTraceHdrs(server.GetMemInfoHandler)) - subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodMetrics).HandlerFunc(httpTraceHdrs(server.GetMetricsHandler)).Queries(restQueries(peerRESTTypes)...) + subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodMetrics).HandlerFunc(httpTraceHdrs(server.GetMetricsHandler)).Queries(restQueries(peerRESTMetricsTypes)...) subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodSysErrors).HandlerFunc(httpTraceHdrs(server.GetSysErrorsHandler)) subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodSysServices).HandlerFunc(httpTraceHdrs(server.GetSysServicesHandler)) subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodSysConfig).HandlerFunc(httpTraceHdrs(server.GetSysConfigHandler)) diff --git a/cmd/server-main.go b/cmd/server-main.go index 808355f14..00fe35537 100644 --- a/cmd/server-main.go +++ b/cmd/server-main.go @@ -601,6 +601,8 @@ func serverMain(ctx *cli.Context) { initBackgroundReplication(GlobalContext, newObject) initBackgroundTransition(GlobalContext, newObject) + globalBatchJobPool = newBatchJobPool(GlobalContext, newObject, 100) + go func() { err := globalTierConfigMgr.Init(GlobalContext, newObject) if err != nil { diff --git a/cmd/server-rlimit.go b/cmd/server-rlimit.go index 5a42bcf3a..c273779a3 100644 --- a/cmd/server-rlimit.go +++ b/cmd/server-rlimit.go @@ -45,8 +45,8 @@ func oldLinux() bool { func setMaxResources() (err error) { // Set the Go runtime max threads threshold to 90% of kernel setting. - sysMaxThreads, mErr := sys.GetMaxThreads() - if mErr == nil { + sysMaxThreads, err := sys.GetMaxThreads() + if err == nil { minioMaxThreads := (sysMaxThreads * 90) / 100 // Only set max threads if it is greater than the default one if minioMaxThreads > 10000 { diff --git a/docs/batch-jobs/README.md b/docs/batch-jobs/README.md new file mode 100644 index 000000000..ccb690435 --- /dev/null +++ b/docs/batch-jobs/README.md @@ -0,0 +1,152 @@ +# MinIO Batch Job +MinIO Batch jobs is an MinIO object management feature that lets you manage objects at scale. Jobs currently supported by MinIO + +- Replicate objects between buckets on multiple sites + +Upcoming Jobs + +- Copy objects from NAS to MinIO +- Copy objects from HDFS to MinIO + +## Replication Job +To perform replication via batch jobs, you create a job. The job consists of a job description YAML that describes + +- Source location from where the objects must be copied from +- Target location from where the objects must be copied to +- Fine grained filtering is available to pick relevant objects from source to copy from + +MinIO batch jobs framework also provides + +- Retrying a failed job automatically driven by user input +- Monitoring job progress in real-time +- Send notifications upon completion or failure to user configured target + +Following YAML describes the structure of a replication job, each value is documented and self-describing. + +```yaml +replicate: + apiVersion: v1 + # source of the objects to be replicated + source: + type: TYPE # valid values are "minio" + bucket: BUCKET + prefix: PREFIX + # NOTE: if source is remote then target must be "local" + # endpoint: ENDPOINT + # credentials: + # accessKey: ACCESS-KEY + # secretKey: SECRET-KEY + # sessionToken: SESSION-TOKEN # Available when rotating credentials are used + + # target where the objects must be replicated + target: + type: TYPE # valid values are "minio" + bucket: BUCKET + prefix: PREFIX + # NOTE: if target is remote then source must be "local" + # endpoint: ENDPOINT + # credentials: + # accessKey: ACCESS-KEY + # secretKey: SECRET-KEY + # sessionToken: SESSION-TOKEN # Available when rotating credentials are used + + # optional flags based filtering criteria + # for all source objects + flags: + filter: + newerThan: "7d" # match objects newer than this value (e.g. 7d10h31s) + olderThan: "7d" # match objects older than this value (e.g. 7d10h31s) + createdAfter: "date" # match objects created after "date" + createdBefore: "date" # match objects created before "date" + + ## NOTE: tags are not supported when "source" is remote. + # tags: + # - key: "name" + # value: "pick*" # match objects with tag 'name', with all values starting with 'pick' + + ## NOTE: metadata filter not supported when "source" is non MinIO. + # metadata: + # - key: "content-type" + # value: "image/*" # match objects with 'content-type', with all values starting with 'image/' + + notify: + endpoint: "https://notify.endpoint" # notification endpoint to receive job status events + token: "Bearer xxxxx" # optional authentication token for the notification endpoint + + retry: + attempts: 10 # number of retries for the job before giving up + delay: "500ms" # least amount of delay between each retry +``` + +You can create and run multiple 'replication' jobs at a time there are no predefined limits set. + +## Batch Jobs Terminology + +### Job +A job is the basic unit of work for MinIO Batch Job. A job is a self describing YAML, once this YAML is submitted and evaluated - MinIO performs the requested actions on each of the objects obtained under the described criteria in job YAML file. + +### Type +Type describes the job type, such as replicating objects between MinIO sites. Each job performs a single type of operation across all objects that match the job description criteria. + +## Batch Jobs via Commandline +[mc](http://github.com/minio/mc) provides 'mc batch' command to create, start and manage submitted jobs. + +``` +NAME: + mc batch - manage batch jobs + +USAGE: + mc batch COMMAND [COMMAND FLAGS | -h] [ARGUMENTS...] + +COMMANDS: + generate generate a new batch job definition + start start a new batch job + list, ls list all current batch jobs + status summarize job events on MinIO server in real-time + describe describe job definition for a job +``` + +### Generate a job yaml +``` +mc batch generate alias/ replicate +``` + +### Start the batch job (returns back the JID) +``` +mc batch start alias/ ./replicate.yaml +Successfully start 'replicate' job `E24HH4nNMcgY5taynaPfxu` on '2022-09-26 17:19:06.296974771 -0700 PDT' +``` + +### List all batch jobs +``` +mc batch list alias/ +ID TYPE USER STARTED +E24HH4nNMcgY5taynaPfxu replicate minioadmin 1 minute ago +``` + +### List all 'replicate' batch jobs +``` +mc batch list alias/ --type replicate +ID TYPE USER STARTED +E24HH4nNMcgY5taynaPfxu replicate minioadmin 1 minute ago +``` + +### Real-time 'status' for a batch job +``` +mc batch status myminio/ E24HH4nNMcgY5taynaPfxu +●∙∙ +Objects: 28766 +Versions: 28766 +Throughput: 3.0 MiB/s +Transferred: 406 MiB +Elapsed: 2m14.227222868s +CurrObjName: share/doc/xml-core/examples/foo.xmlcatalogs +``` + +### 'describe' the batch job yaml. +``` +mc batch describe myminio/ E24HH4nNMcgY5taynaPfxu +replicate: + apiVersion: v1 +... +``` diff --git a/go.mod b/go.mod index 2d063df64..4944b750d 100644 --- a/go.mod +++ b/go.mod @@ -40,6 +40,7 @@ require ( github.com/klauspost/readahead v1.4.0 github.com/klauspost/reedsolomon v1.11.0 github.com/lib/pq v1.10.7 + github.com/lithammer/shortuuid/v4 v4.0.0 github.com/miekg/dns v1.1.50 github.com/minio/cli v1.24.0 github.com/minio/console v0.20.6-0.20221003060434-413870e995a9 @@ -74,7 +75,7 @@ require ( github.com/secure-io/sio-go v0.3.1 github.com/shirou/gopsutil/v3 v3.22.8 github.com/streadway/amqp v1.0.0 - github.com/tinylib/msgp v1.1.7-0.20211026165309-e818a1881b0e + github.com/tinylib/msgp v1.1.7-0.20220719154719-f3635b96e483 github.com/valyala/bytebufferpool v1.0.0 github.com/xdg/scram v1.0.5 github.com/yargevad/filepathx v1.0.0 @@ -88,6 +89,7 @@ require ( golang.org/x/sys v0.0.0-20220915200043-7b5979e65e41 golang.org/x/time v0.0.0-20220722155302-e5dcc9cfc0b9 google.golang.org/api v0.96.0 + gopkg.in/yaml.v2 v2.4.0 ) require ( @@ -222,6 +224,5 @@ require ( gopkg.in/h2non/filetype.v1 v1.0.5 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/square/go-jose.v2 v2.6.0 // indirect - gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/go.sum b/go.sum index e8b097de6..ce4930a1a 100644 --- a/go.sum +++ b/go.sum @@ -584,6 +584,8 @@ github.com/lestrrat-go/pdebug/v3 v3.0.1/go.mod h1:za+m+Ve24yCxTEhR59N7UlnJomWwCi github.com/lib/pq v1.10.4/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/lib/pq v1.10.7 h1:p7ZhMD+KsSRozJr34udlUrhboJwWAgCg34+/ZZNvZZw= github.com/lib/pq v1.10.7/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/lithammer/shortuuid/v4 v4.0.0 h1:QRbbVkfgNippHOS8PXDkti4NaWeyYfcBTHtw7k08o4c= +github.com/lithammer/shortuuid/v4 v4.0.0/go.mod h1:Zs8puNcrvf2rV9rTH51ZLLcj7ZXqQI3lv67aw4KiB1Y= github.com/lucasb-eyer/go-colorful v1.2.0 h1:1nnpGOrhyZZuNyfu1QjKiUICQ74+3FNCN69Aj6K7nkY= github.com/lucasb-eyer/go-colorful v1.2.0/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0= github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I= @@ -899,8 +901,8 @@ github.com/tidwall/pretty v1.2.0 h1:RWIZEg2iJ8/g6fDDYzMpobmaoGh5OLl4AXtGUGPcqCs= github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= github.com/tinylib/msgp v1.1.3/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= github.com/tinylib/msgp v1.1.6/go.mod h1:75BAfg2hauQhs3qedfdDZmWAPcFMAvJE5b9rGOMufyw= -github.com/tinylib/msgp v1.1.7-0.20211026165309-e818a1881b0e h1:P5tyWbssToKowBPTA1/EzqPXwrZNc8ZeNPdjgpcDEoI= -github.com/tinylib/msgp v1.1.7-0.20211026165309-e818a1881b0e/go.mod h1:g7jEyb18KPe65d9RRhGw+ThaJr5duyBH8eaFgBUor7Y= +github.com/tinylib/msgp v1.1.7-0.20220719154719-f3635b96e483 h1:dV39KLgsNZGri7Hn5QhHwRQzGf7kHOki2vZujFXDFhI= +github.com/tinylib/msgp v1.1.7-0.20220719154719-f3635b96e483/go.mod h1:g7jEyb18KPe65d9RRhGw+ThaJr5duyBH8eaFgBUor7Y= github.com/tklauser/go-sysconf v0.3.6/go.mod h1:MkWzOF4RMCshBAMXuhXJs64Rte09mITnppBXY/rYEFI= github.com/tklauser/go-sysconf v0.3.10 h1:IJ1AZGZRWbY8T5Vfk04D9WOA5WSejdflXxP03OUqALw= github.com/tklauser/go-sysconf v0.3.10/go.mod h1:C8XykCvCb+Gn0oNCWPIlcb0RuglQTYaQ2hGm7jmxEFk= diff --git a/internal/auth/credentials.go b/internal/auth/credentials.go index 654f79da5..a5eb4b29b 100644 --- a/internal/auth/credentials.go +++ b/internal/auth/credentials.go @@ -94,10 +94,10 @@ const ( // Credentials holds access and secret keys. type Credentials struct { - AccessKey string `xml:"AccessKeyId" json:"accessKey,omitempty"` - SecretKey string `xml:"SecretAccessKey" json:"secretKey,omitempty"` - Expiration time.Time `xml:"Expiration" json:"expiration,omitempty"` - SessionToken string `xml:"SessionToken" json:"sessionToken,omitempty"` + AccessKey string `xml:"AccessKeyId" json:"accessKey,omitempty" yaml:"accessKey"` + SecretKey string `xml:"SecretAccessKey" json:"secretKey,omitempty" yaml:"secretKey"` + SessionToken string `xml:"SessionToken" json:"sessionToken,omitempty" yaml:"sessionToken"` + Expiration time.Time `xml:"Expiration" json:"expiration,omitempty" yaml:"-"` Status string `xml:"-" json:"status,omitempty"` ParentUser string `xml:"-" json:"parentUser,omitempty"` Groups []string `xml:"-" json:"groups,omitempty"`