diff --git a/cmd/admin-handlers_test.go b/cmd/admin-handlers_test.go
index 9d29c9c8c..666df99c6 100644
--- a/cmd/admin-handlers_test.go
+++ b/cmd/admin-handlers_test.go
@@ -182,7 +182,7 @@ func testServicesCmdHandler(cmd cmdType, t *testing.T) {
adminTestBed, err := prepareAdminErasureTestBed(ctx)
if err != nil {
- t.Fatal("Failed to initialize a single node Erasure backend for admin handler tests.")
+ t.Fatal("Failed to initialize a single node Erasure backend for admin handler tests.", err)
}
defer adminTestBed.TearDown()
@@ -253,7 +253,7 @@ func TestAdminServerInfo(t *testing.T) {
adminTestBed, err := prepareAdminErasureTestBed(ctx)
if err != nil {
- t.Fatal("Failed to initialize a single node Erasure backend for admin handler tests.")
+ t.Fatal("Failed to initialize a single node Erasure backend for admin handler tests.", err)
}
defer adminTestBed.TearDown()
diff --git a/cmd/api-errors.go b/cmd/api-errors.go
index d14f7996c..6ab738f91 100644
--- a/cmd/api-errors.go
+++ b/cmd/api-errors.go
@@ -231,7 +231,6 @@ const (
// MinIO extended errors.
ErrReadQuorum
ErrWriteQuorum
- ErrParentIsObject
ErrStorageFull
ErrRequestBodyParse
ErrObjectExistsAsDirectory
@@ -1122,11 +1121,6 @@ var errorCodes = errorCodeMap{
Description: "Storage backend has reached its minimum free disk threshold. Please delete a few objects to proceed.",
HTTPStatusCode: http.StatusInsufficientStorage,
},
- ErrParentIsObject: {
- Code: "XMinioParentIsObject",
- Description: "Object-prefix is already an object, please choose a different object-prefix name.",
- HTTPStatusCode: http.StatusBadRequest,
- },
ErrRequestBodyParse: {
Code: "XMinioRequestBodyParse",
Description: "The request body failed to parse.",
@@ -1899,8 +1893,6 @@ func toAPIErrorCode(ctx context.Context, err error) (apiErr APIErrorCode) {
apiErr = ErrObjectExistsAsDirectory
case PrefixAccessDenied:
apiErr = ErrAccessDenied
- case ParentIsObject:
- apiErr = ErrParentIsObject
case BucketNameInvalid:
apiErr = ErrInvalidBucketName
case BucketNotFound:
diff --git a/cmd/apierrorcode_string.go b/cmd/apierrorcode_string.go
index 314d0f910..04043d3ca 100644
--- a/cmd/apierrorcode_string.go
+++ b/cmd/apierrorcode_string.go
@@ -154,142 +154,141 @@ func _() {
_ = x[ErrContentSHA256Mismatch-143]
_ = x[ErrReadQuorum-144]
_ = x[ErrWriteQuorum-145]
- _ = x[ErrParentIsObject-146]
- _ = x[ErrStorageFull-147]
- _ = x[ErrRequestBodyParse-148]
- _ = x[ErrObjectExistsAsDirectory-149]
- _ = x[ErrInvalidObjectName-150]
- _ = x[ErrInvalidObjectNamePrefixSlash-151]
- _ = x[ErrInvalidResourceName-152]
- _ = x[ErrServerNotInitialized-153]
- _ = x[ErrOperationTimedOut-154]
- _ = x[ErrClientDisconnected-155]
- _ = x[ErrOperationMaxedOut-156]
- _ = x[ErrInvalidRequest-157]
- _ = x[ErrTransitionStorageClassNotFoundError-158]
- _ = x[ErrInvalidStorageClass-159]
- _ = x[ErrBackendDown-160]
- _ = x[ErrMalformedJSON-161]
- _ = x[ErrAdminNoSuchUser-162]
- _ = x[ErrAdminNoSuchGroup-163]
- _ = x[ErrAdminGroupNotEmpty-164]
- _ = x[ErrAdminNoSuchPolicy-165]
- _ = x[ErrAdminInvalidArgument-166]
- _ = x[ErrAdminInvalidAccessKey-167]
- _ = x[ErrAdminInvalidSecretKey-168]
- _ = x[ErrAdminConfigNoQuorum-169]
- _ = x[ErrAdminConfigTooLarge-170]
- _ = x[ErrAdminConfigBadJSON-171]
- _ = x[ErrAdminConfigDuplicateKeys-172]
- _ = x[ErrAdminCredentialsMismatch-173]
- _ = x[ErrInsecureClientRequest-174]
- _ = x[ErrObjectTampered-175]
- _ = x[ErrAdminBucketQuotaExceeded-176]
- _ = x[ErrAdminNoSuchQuotaConfiguration-177]
- _ = x[ErrHealNotImplemented-178]
- _ = x[ErrHealNoSuchProcess-179]
- _ = x[ErrHealInvalidClientToken-180]
- _ = x[ErrHealMissingBucket-181]
- _ = x[ErrHealAlreadyRunning-182]
- _ = x[ErrHealOverlappingPaths-183]
- _ = x[ErrIncorrectContinuationToken-184]
- _ = x[ErrEmptyRequestBody-185]
- _ = x[ErrUnsupportedFunction-186]
- _ = x[ErrInvalidExpressionType-187]
- _ = x[ErrBusy-188]
- _ = x[ErrUnauthorizedAccess-189]
- _ = x[ErrExpressionTooLong-190]
- _ = x[ErrIllegalSQLFunctionArgument-191]
- _ = x[ErrInvalidKeyPath-192]
- _ = x[ErrInvalidCompressionFormat-193]
- _ = x[ErrInvalidFileHeaderInfo-194]
- _ = x[ErrInvalidJSONType-195]
- _ = x[ErrInvalidQuoteFields-196]
- _ = x[ErrInvalidRequestParameter-197]
- _ = x[ErrInvalidDataType-198]
- _ = x[ErrInvalidTextEncoding-199]
- _ = x[ErrInvalidDataSource-200]
- _ = x[ErrInvalidTableAlias-201]
- _ = x[ErrMissingRequiredParameter-202]
- _ = x[ErrObjectSerializationConflict-203]
- _ = x[ErrUnsupportedSQLOperation-204]
- _ = x[ErrUnsupportedSQLStructure-205]
- _ = x[ErrUnsupportedSyntax-206]
- _ = x[ErrUnsupportedRangeHeader-207]
- _ = x[ErrLexerInvalidChar-208]
- _ = x[ErrLexerInvalidOperator-209]
- _ = x[ErrLexerInvalidLiteral-210]
- _ = x[ErrLexerInvalidIONLiteral-211]
- _ = x[ErrParseExpectedDatePart-212]
- _ = x[ErrParseExpectedKeyword-213]
- _ = x[ErrParseExpectedTokenType-214]
- _ = x[ErrParseExpected2TokenTypes-215]
- _ = x[ErrParseExpectedNumber-216]
- _ = x[ErrParseExpectedRightParenBuiltinFunctionCall-217]
- _ = x[ErrParseExpectedTypeName-218]
- _ = x[ErrParseExpectedWhenClause-219]
- _ = x[ErrParseUnsupportedToken-220]
- _ = x[ErrParseUnsupportedLiteralsGroupBy-221]
- _ = x[ErrParseExpectedMember-222]
- _ = x[ErrParseUnsupportedSelect-223]
- _ = x[ErrParseUnsupportedCase-224]
- _ = x[ErrParseUnsupportedCaseClause-225]
- _ = x[ErrParseUnsupportedAlias-226]
- _ = x[ErrParseUnsupportedSyntax-227]
- _ = x[ErrParseUnknownOperator-228]
- _ = x[ErrParseMissingIdentAfterAt-229]
- _ = x[ErrParseUnexpectedOperator-230]
- _ = x[ErrParseUnexpectedTerm-231]
- _ = x[ErrParseUnexpectedToken-232]
- _ = x[ErrParseUnexpectedKeyword-233]
- _ = x[ErrParseExpectedExpression-234]
- _ = x[ErrParseExpectedLeftParenAfterCast-235]
- _ = x[ErrParseExpectedLeftParenValueConstructor-236]
- _ = x[ErrParseExpectedLeftParenBuiltinFunctionCall-237]
- _ = x[ErrParseExpectedArgumentDelimiter-238]
- _ = x[ErrParseCastArity-239]
- _ = x[ErrParseInvalidTypeParam-240]
- _ = x[ErrParseEmptySelect-241]
- _ = x[ErrParseSelectMissingFrom-242]
- _ = x[ErrParseExpectedIdentForGroupName-243]
- _ = x[ErrParseExpectedIdentForAlias-244]
- _ = x[ErrParseUnsupportedCallWithStar-245]
- _ = x[ErrParseNonUnaryAgregateFunctionCall-246]
- _ = x[ErrParseMalformedJoin-247]
- _ = x[ErrParseExpectedIdentForAt-248]
- _ = x[ErrParseAsteriskIsNotAloneInSelectList-249]
- _ = x[ErrParseCannotMixSqbAndWildcardInSelectList-250]
- _ = x[ErrParseInvalidContextForWildcardInSelectList-251]
- _ = x[ErrIncorrectSQLFunctionArgumentType-252]
- _ = x[ErrValueParseFailure-253]
- _ = x[ErrEvaluatorInvalidArguments-254]
- _ = x[ErrIntegerOverflow-255]
- _ = x[ErrLikeInvalidInputs-256]
- _ = x[ErrCastFailed-257]
- _ = x[ErrInvalidCast-258]
- _ = x[ErrEvaluatorInvalidTimestampFormatPattern-259]
- _ = x[ErrEvaluatorInvalidTimestampFormatPatternSymbolForParsing-260]
- _ = x[ErrEvaluatorTimestampFormatPatternDuplicateFields-261]
- _ = x[ErrEvaluatorTimestampFormatPatternHourClockAmPmMismatch-262]
- _ = x[ErrEvaluatorUnterminatedTimestampFormatPatternToken-263]
- _ = x[ErrEvaluatorInvalidTimestampFormatPatternToken-264]
- _ = x[ErrEvaluatorInvalidTimestampFormatPatternSymbol-265]
- _ = x[ErrEvaluatorBindingDoesNotExist-266]
- _ = x[ErrMissingHeaders-267]
- _ = x[ErrInvalidColumnIndex-268]
- _ = x[ErrAdminConfigNotificationTargetsFailed-269]
- _ = x[ErrAdminProfilerNotEnabled-270]
- _ = x[ErrInvalidDecompressedSize-271]
- _ = x[ErrAddUserInvalidArgument-272]
- _ = x[ErrAdminAccountNotEligible-273]
- _ = x[ErrAccountNotEligible-274]
- _ = x[ErrAdminServiceAccountNotFound-275]
- _ = x[ErrPostPolicyConditionInvalidFormat-276]
+ _ = x[ErrStorageFull-146]
+ _ = x[ErrRequestBodyParse-147]
+ _ = x[ErrObjectExistsAsDirectory-148]
+ _ = x[ErrInvalidObjectName-149]
+ _ = x[ErrInvalidObjectNamePrefixSlash-150]
+ _ = x[ErrInvalidResourceName-151]
+ _ = x[ErrServerNotInitialized-152]
+ _ = x[ErrOperationTimedOut-153]
+ _ = x[ErrClientDisconnected-154]
+ _ = x[ErrOperationMaxedOut-155]
+ _ = x[ErrInvalidRequest-156]
+ _ = x[ErrTransitionStorageClassNotFoundError-157]
+ _ = x[ErrInvalidStorageClass-158]
+ _ = x[ErrBackendDown-159]
+ _ = x[ErrMalformedJSON-160]
+ _ = x[ErrAdminNoSuchUser-161]
+ _ = x[ErrAdminNoSuchGroup-162]
+ _ = x[ErrAdminGroupNotEmpty-163]
+ _ = x[ErrAdminNoSuchPolicy-164]
+ _ = x[ErrAdminInvalidArgument-165]
+ _ = x[ErrAdminInvalidAccessKey-166]
+ _ = x[ErrAdminInvalidSecretKey-167]
+ _ = x[ErrAdminConfigNoQuorum-168]
+ _ = x[ErrAdminConfigTooLarge-169]
+ _ = x[ErrAdminConfigBadJSON-170]
+ _ = x[ErrAdminConfigDuplicateKeys-171]
+ _ = x[ErrAdminCredentialsMismatch-172]
+ _ = x[ErrInsecureClientRequest-173]
+ _ = x[ErrObjectTampered-174]
+ _ = x[ErrAdminBucketQuotaExceeded-175]
+ _ = x[ErrAdminNoSuchQuotaConfiguration-176]
+ _ = x[ErrHealNotImplemented-177]
+ _ = x[ErrHealNoSuchProcess-178]
+ _ = x[ErrHealInvalidClientToken-179]
+ _ = x[ErrHealMissingBucket-180]
+ _ = x[ErrHealAlreadyRunning-181]
+ _ = x[ErrHealOverlappingPaths-182]
+ _ = x[ErrIncorrectContinuationToken-183]
+ _ = x[ErrEmptyRequestBody-184]
+ _ = x[ErrUnsupportedFunction-185]
+ _ = x[ErrInvalidExpressionType-186]
+ _ = x[ErrBusy-187]
+ _ = x[ErrUnauthorizedAccess-188]
+ _ = x[ErrExpressionTooLong-189]
+ _ = x[ErrIllegalSQLFunctionArgument-190]
+ _ = x[ErrInvalidKeyPath-191]
+ _ = x[ErrInvalidCompressionFormat-192]
+ _ = x[ErrInvalidFileHeaderInfo-193]
+ _ = x[ErrInvalidJSONType-194]
+ _ = x[ErrInvalidQuoteFields-195]
+ _ = x[ErrInvalidRequestParameter-196]
+ _ = x[ErrInvalidDataType-197]
+ _ = x[ErrInvalidTextEncoding-198]
+ _ = x[ErrInvalidDataSource-199]
+ _ = x[ErrInvalidTableAlias-200]
+ _ = x[ErrMissingRequiredParameter-201]
+ _ = x[ErrObjectSerializationConflict-202]
+ _ = x[ErrUnsupportedSQLOperation-203]
+ _ = x[ErrUnsupportedSQLStructure-204]
+ _ = x[ErrUnsupportedSyntax-205]
+ _ = x[ErrUnsupportedRangeHeader-206]
+ _ = x[ErrLexerInvalidChar-207]
+ _ = x[ErrLexerInvalidOperator-208]
+ _ = x[ErrLexerInvalidLiteral-209]
+ _ = x[ErrLexerInvalidIONLiteral-210]
+ _ = x[ErrParseExpectedDatePart-211]
+ _ = x[ErrParseExpectedKeyword-212]
+ _ = x[ErrParseExpectedTokenType-213]
+ _ = x[ErrParseExpected2TokenTypes-214]
+ _ = x[ErrParseExpectedNumber-215]
+ _ = x[ErrParseExpectedRightParenBuiltinFunctionCall-216]
+ _ = x[ErrParseExpectedTypeName-217]
+ _ = x[ErrParseExpectedWhenClause-218]
+ _ = x[ErrParseUnsupportedToken-219]
+ _ = x[ErrParseUnsupportedLiteralsGroupBy-220]
+ _ = x[ErrParseExpectedMember-221]
+ _ = x[ErrParseUnsupportedSelect-222]
+ _ = x[ErrParseUnsupportedCase-223]
+ _ = x[ErrParseUnsupportedCaseClause-224]
+ _ = x[ErrParseUnsupportedAlias-225]
+ _ = x[ErrParseUnsupportedSyntax-226]
+ _ = x[ErrParseUnknownOperator-227]
+ _ = x[ErrParseMissingIdentAfterAt-228]
+ _ = x[ErrParseUnexpectedOperator-229]
+ _ = x[ErrParseUnexpectedTerm-230]
+ _ = x[ErrParseUnexpectedToken-231]
+ _ = x[ErrParseUnexpectedKeyword-232]
+ _ = x[ErrParseExpectedExpression-233]
+ _ = x[ErrParseExpectedLeftParenAfterCast-234]
+ _ = x[ErrParseExpectedLeftParenValueConstructor-235]
+ _ = x[ErrParseExpectedLeftParenBuiltinFunctionCall-236]
+ _ = x[ErrParseExpectedArgumentDelimiter-237]
+ _ = x[ErrParseCastArity-238]
+ _ = x[ErrParseInvalidTypeParam-239]
+ _ = x[ErrParseEmptySelect-240]
+ _ = x[ErrParseSelectMissingFrom-241]
+ _ = x[ErrParseExpectedIdentForGroupName-242]
+ _ = x[ErrParseExpectedIdentForAlias-243]
+ _ = x[ErrParseUnsupportedCallWithStar-244]
+ _ = x[ErrParseNonUnaryAgregateFunctionCall-245]
+ _ = x[ErrParseMalformedJoin-246]
+ _ = x[ErrParseExpectedIdentForAt-247]
+ _ = x[ErrParseAsteriskIsNotAloneInSelectList-248]
+ _ = x[ErrParseCannotMixSqbAndWildcardInSelectList-249]
+ _ = x[ErrParseInvalidContextForWildcardInSelectList-250]
+ _ = x[ErrIncorrectSQLFunctionArgumentType-251]
+ _ = x[ErrValueParseFailure-252]
+ _ = x[ErrEvaluatorInvalidArguments-253]
+ _ = x[ErrIntegerOverflow-254]
+ _ = x[ErrLikeInvalidInputs-255]
+ _ = x[ErrCastFailed-256]
+ _ = x[ErrInvalidCast-257]
+ _ = x[ErrEvaluatorInvalidTimestampFormatPattern-258]
+ _ = x[ErrEvaluatorInvalidTimestampFormatPatternSymbolForParsing-259]
+ _ = x[ErrEvaluatorTimestampFormatPatternDuplicateFields-260]
+ _ = x[ErrEvaluatorTimestampFormatPatternHourClockAmPmMismatch-261]
+ _ = x[ErrEvaluatorUnterminatedTimestampFormatPatternToken-262]
+ _ = x[ErrEvaluatorInvalidTimestampFormatPatternToken-263]
+ _ = x[ErrEvaluatorInvalidTimestampFormatPatternSymbol-264]
+ _ = x[ErrEvaluatorBindingDoesNotExist-265]
+ _ = x[ErrMissingHeaders-266]
+ _ = x[ErrInvalidColumnIndex-267]
+ _ = x[ErrAdminConfigNotificationTargetsFailed-268]
+ _ = x[ErrAdminProfilerNotEnabled-269]
+ _ = x[ErrInvalidDecompressedSize-270]
+ _ = x[ErrAddUserInvalidArgument-271]
+ _ = x[ErrAdminAccountNotEligible-272]
+ _ = x[ErrAccountNotEligible-273]
+ _ = x[ErrAdminServiceAccountNotFound-274]
+ _ = x[ErrPostPolicyConditionInvalidFormat-275]
}
-const _APIErrorCode_name = "NoneAccessDeniedBadDigestEntityTooSmallEntityTooLargePolicyTooLargeIncompleteBodyInternalErrorInvalidAccessKeyIDInvalidBucketNameInvalidDigestInvalidRangeInvalidRangePartNumberInvalidCopyPartRangeInvalidCopyPartRangeSourceInvalidMaxKeysInvalidEncodingMethodInvalidMaxUploadsInvalidMaxPartsInvalidPartNumberMarkerInvalidPartNumberInvalidRequestBodyInvalidCopySourceInvalidMetadataDirectiveInvalidCopyDestInvalidPolicyDocumentInvalidObjectStateMalformedXMLMissingContentLengthMissingContentMD5MissingRequestBodyErrorMissingSecurityHeaderNoSuchBucketNoSuchBucketPolicyNoSuchBucketLifecycleNoSuchLifecycleConfigurationNoSuchBucketSSEConfigNoSuchCORSConfigurationNoSuchWebsiteConfigurationReplicationConfigurationNotFoundErrorRemoteDestinationNotFoundErrorReplicationDestinationMissingLockRemoteTargetNotFoundErrorReplicationRemoteConnectionErrorReplicationBandwidthLimitErrorBucketRemoteIdenticalToSourceBucketRemoteAlreadyExistsBucketRemoteLabelInUseBucketRemoteArnTypeInvalidBucketRemoteArnInvalidBucketRemoteRemoveDisallowedRemoteTargetNotVersionedErrorReplicationSourceNotVersionedErrorReplicationNeedsVersioningErrorReplicationBucketNeedsVersioningErrorReplicationNoMatchingRuleErrorObjectRestoreAlreadyInProgressNoSuchKeyNoSuchUploadInvalidVersionIDNoSuchVersionNotImplementedPreconditionFailedRequestTimeTooSkewedSignatureDoesNotMatchMethodNotAllowedInvalidPartInvalidPartOrderAuthorizationHeaderMalformedMalformedPOSTRequestPOSTFileRequiredSignatureVersionNotSupportedBucketNotEmptyAllAccessDisabledMalformedPolicyMissingFieldsMissingCredTagCredMalformedInvalidRegionInvalidServiceS3InvalidServiceSTSInvalidRequestVersionMissingSignTagMissingSignHeadersTagMalformedDateMalformedPresignedDateMalformedCredentialDateMalformedCredentialRegionMalformedExpiresNegativeExpiresAuthHeaderEmptyExpiredPresignRequestRequestNotReadyYetUnsignedHeadersMissingDateHeaderInvalidQuerySignatureAlgoInvalidQueryParamsBucketAlreadyOwnedByYouInvalidDurationBucketAlreadyExistsMetadataTooLargeUnsupportedMetadataMaximumExpiresSlowDownInvalidPrefixMarkerBadRequestKeyTooLongErrorInvalidBucketObjectLockConfigurationObjectLockConfigurationNotFoundObjectLockConfigurationNotAllowedNoSuchObjectLockConfigurationObjectLockedInvalidRetentionDatePastObjectLockRetainDateUnknownWORMModeDirectiveBucketTaggingNotFoundObjectLockInvalidHeadersInvalidTagDirectiveInvalidEncryptionMethodInsecureSSECustomerRequestSSEMultipartEncryptedSSEEncryptedObjectInvalidEncryptionParametersInvalidSSECustomerAlgorithmInvalidSSECustomerKeyMissingSSECustomerKeyMissingSSECustomerKeyMD5SSECustomerKeyMD5MismatchInvalidSSECustomerParametersIncompatibleEncryptionMethodKMSNotConfiguredNoAccessKeyInvalidTokenEventNotificationARNNotificationRegionNotificationOverlappingFilterNotificationFilterNameInvalidFilterNamePrefixFilterNameSuffixFilterValueInvalidOverlappingConfigsUnsupportedNotificationContentSHA256MismatchReadQuorumWriteQuorumParentIsObjectStorageFullRequestBodyParseObjectExistsAsDirectoryInvalidObjectNameInvalidObjectNamePrefixSlashInvalidResourceNameServerNotInitializedOperationTimedOutClientDisconnectedOperationMaxedOutInvalidRequestTransitionStorageClassNotFoundErrorInvalidStorageClassBackendDownMalformedJSONAdminNoSuchUserAdminNoSuchGroupAdminGroupNotEmptyAdminNoSuchPolicyAdminInvalidArgumentAdminInvalidAccessKeyAdminInvalidSecretKeyAdminConfigNoQuorumAdminConfigTooLargeAdminConfigBadJSONAdminConfigDuplicateKeysAdminCredentialsMismatchInsecureClientRequestObjectTamperedAdminBucketQuotaExceededAdminNoSuchQuotaConfigurationHealNotImplementedHealNoSuchProcessHealInvalidClientTokenHealMissingBucketHealAlreadyRunningHealOverlappingPathsIncorrectContinuationTokenEmptyRequestBodyUnsupportedFunctionInvalidExpressionTypeBusyUnauthorizedAccessExpressionTooLongIllegalSQLFunctionArgumentInvalidKeyPathInvalidCompressionFormatInvalidFileHeaderInfoInvalidJSONTypeInvalidQuoteFieldsInvalidRequestParameterInvalidDataTypeInvalidTextEncodingInvalidDataSourceInvalidTableAliasMissingRequiredParameterObjectSerializationConflictUnsupportedSQLOperationUnsupportedSQLStructureUnsupportedSyntaxUnsupportedRangeHeaderLexerInvalidCharLexerInvalidOperatorLexerInvalidLiteralLexerInvalidIONLiteralParseExpectedDatePartParseExpectedKeywordParseExpectedTokenTypeParseExpected2TokenTypesParseExpectedNumberParseExpectedRightParenBuiltinFunctionCallParseExpectedTypeNameParseExpectedWhenClauseParseUnsupportedTokenParseUnsupportedLiteralsGroupByParseExpectedMemberParseUnsupportedSelectParseUnsupportedCaseParseUnsupportedCaseClauseParseUnsupportedAliasParseUnsupportedSyntaxParseUnknownOperatorParseMissingIdentAfterAtParseUnexpectedOperatorParseUnexpectedTermParseUnexpectedTokenParseUnexpectedKeywordParseExpectedExpressionParseExpectedLeftParenAfterCastParseExpectedLeftParenValueConstructorParseExpectedLeftParenBuiltinFunctionCallParseExpectedArgumentDelimiterParseCastArityParseInvalidTypeParamParseEmptySelectParseSelectMissingFromParseExpectedIdentForGroupNameParseExpectedIdentForAliasParseUnsupportedCallWithStarParseNonUnaryAgregateFunctionCallParseMalformedJoinParseExpectedIdentForAtParseAsteriskIsNotAloneInSelectListParseCannotMixSqbAndWildcardInSelectListParseInvalidContextForWildcardInSelectListIncorrectSQLFunctionArgumentTypeValueParseFailureEvaluatorInvalidArgumentsIntegerOverflowLikeInvalidInputsCastFailedInvalidCastEvaluatorInvalidTimestampFormatPatternEvaluatorInvalidTimestampFormatPatternSymbolForParsingEvaluatorTimestampFormatPatternDuplicateFieldsEvaluatorTimestampFormatPatternHourClockAmPmMismatchEvaluatorUnterminatedTimestampFormatPatternTokenEvaluatorInvalidTimestampFormatPatternTokenEvaluatorInvalidTimestampFormatPatternSymbolEvaluatorBindingDoesNotExistMissingHeadersInvalidColumnIndexAdminConfigNotificationTargetsFailedAdminProfilerNotEnabledInvalidDecompressedSizeAddUserInvalidArgumentAdminAccountNotEligibleAccountNotEligibleAdminServiceAccountNotFoundPostPolicyConditionInvalidFormat"
+const _APIErrorCode_name = "NoneAccessDeniedBadDigestEntityTooSmallEntityTooLargePolicyTooLargeIncompleteBodyInternalErrorInvalidAccessKeyIDInvalidBucketNameInvalidDigestInvalidRangeInvalidRangePartNumberInvalidCopyPartRangeInvalidCopyPartRangeSourceInvalidMaxKeysInvalidEncodingMethodInvalidMaxUploadsInvalidMaxPartsInvalidPartNumberMarkerInvalidPartNumberInvalidRequestBodyInvalidCopySourceInvalidMetadataDirectiveInvalidCopyDestInvalidPolicyDocumentInvalidObjectStateMalformedXMLMissingContentLengthMissingContentMD5MissingRequestBodyErrorMissingSecurityHeaderNoSuchBucketNoSuchBucketPolicyNoSuchBucketLifecycleNoSuchLifecycleConfigurationNoSuchBucketSSEConfigNoSuchCORSConfigurationNoSuchWebsiteConfigurationReplicationConfigurationNotFoundErrorRemoteDestinationNotFoundErrorReplicationDestinationMissingLockRemoteTargetNotFoundErrorReplicationRemoteConnectionErrorReplicationBandwidthLimitErrorBucketRemoteIdenticalToSourceBucketRemoteAlreadyExistsBucketRemoteLabelInUseBucketRemoteArnTypeInvalidBucketRemoteArnInvalidBucketRemoteRemoveDisallowedRemoteTargetNotVersionedErrorReplicationSourceNotVersionedErrorReplicationNeedsVersioningErrorReplicationBucketNeedsVersioningErrorReplicationNoMatchingRuleErrorObjectRestoreAlreadyInProgressNoSuchKeyNoSuchUploadInvalidVersionIDNoSuchVersionNotImplementedPreconditionFailedRequestTimeTooSkewedSignatureDoesNotMatchMethodNotAllowedInvalidPartInvalidPartOrderAuthorizationHeaderMalformedMalformedPOSTRequestPOSTFileRequiredSignatureVersionNotSupportedBucketNotEmptyAllAccessDisabledMalformedPolicyMissingFieldsMissingCredTagCredMalformedInvalidRegionInvalidServiceS3InvalidServiceSTSInvalidRequestVersionMissingSignTagMissingSignHeadersTagMalformedDateMalformedPresignedDateMalformedCredentialDateMalformedCredentialRegionMalformedExpiresNegativeExpiresAuthHeaderEmptyExpiredPresignRequestRequestNotReadyYetUnsignedHeadersMissingDateHeaderInvalidQuerySignatureAlgoInvalidQueryParamsBucketAlreadyOwnedByYouInvalidDurationBucketAlreadyExistsMetadataTooLargeUnsupportedMetadataMaximumExpiresSlowDownInvalidPrefixMarkerBadRequestKeyTooLongErrorInvalidBucketObjectLockConfigurationObjectLockConfigurationNotFoundObjectLockConfigurationNotAllowedNoSuchObjectLockConfigurationObjectLockedInvalidRetentionDatePastObjectLockRetainDateUnknownWORMModeDirectiveBucketTaggingNotFoundObjectLockInvalidHeadersInvalidTagDirectiveInvalidEncryptionMethodInsecureSSECustomerRequestSSEMultipartEncryptedSSEEncryptedObjectInvalidEncryptionParametersInvalidSSECustomerAlgorithmInvalidSSECustomerKeyMissingSSECustomerKeyMissingSSECustomerKeyMD5SSECustomerKeyMD5MismatchInvalidSSECustomerParametersIncompatibleEncryptionMethodKMSNotConfiguredNoAccessKeyInvalidTokenEventNotificationARNNotificationRegionNotificationOverlappingFilterNotificationFilterNameInvalidFilterNamePrefixFilterNameSuffixFilterValueInvalidOverlappingConfigsUnsupportedNotificationContentSHA256MismatchReadQuorumWriteQuorumStorageFullRequestBodyParseObjectExistsAsDirectoryInvalidObjectNameInvalidObjectNamePrefixSlashInvalidResourceNameServerNotInitializedOperationTimedOutClientDisconnectedOperationMaxedOutInvalidRequestTransitionStorageClassNotFoundErrorInvalidStorageClassBackendDownMalformedJSONAdminNoSuchUserAdminNoSuchGroupAdminGroupNotEmptyAdminNoSuchPolicyAdminInvalidArgumentAdminInvalidAccessKeyAdminInvalidSecretKeyAdminConfigNoQuorumAdminConfigTooLargeAdminConfigBadJSONAdminConfigDuplicateKeysAdminCredentialsMismatchInsecureClientRequestObjectTamperedAdminBucketQuotaExceededAdminNoSuchQuotaConfigurationHealNotImplementedHealNoSuchProcessHealInvalidClientTokenHealMissingBucketHealAlreadyRunningHealOverlappingPathsIncorrectContinuationTokenEmptyRequestBodyUnsupportedFunctionInvalidExpressionTypeBusyUnauthorizedAccessExpressionTooLongIllegalSQLFunctionArgumentInvalidKeyPathInvalidCompressionFormatInvalidFileHeaderInfoInvalidJSONTypeInvalidQuoteFieldsInvalidRequestParameterInvalidDataTypeInvalidTextEncodingInvalidDataSourceInvalidTableAliasMissingRequiredParameterObjectSerializationConflictUnsupportedSQLOperationUnsupportedSQLStructureUnsupportedSyntaxUnsupportedRangeHeaderLexerInvalidCharLexerInvalidOperatorLexerInvalidLiteralLexerInvalidIONLiteralParseExpectedDatePartParseExpectedKeywordParseExpectedTokenTypeParseExpected2TokenTypesParseExpectedNumberParseExpectedRightParenBuiltinFunctionCallParseExpectedTypeNameParseExpectedWhenClauseParseUnsupportedTokenParseUnsupportedLiteralsGroupByParseExpectedMemberParseUnsupportedSelectParseUnsupportedCaseParseUnsupportedCaseClauseParseUnsupportedAliasParseUnsupportedSyntaxParseUnknownOperatorParseMissingIdentAfterAtParseUnexpectedOperatorParseUnexpectedTermParseUnexpectedTokenParseUnexpectedKeywordParseExpectedExpressionParseExpectedLeftParenAfterCastParseExpectedLeftParenValueConstructorParseExpectedLeftParenBuiltinFunctionCallParseExpectedArgumentDelimiterParseCastArityParseInvalidTypeParamParseEmptySelectParseSelectMissingFromParseExpectedIdentForGroupNameParseExpectedIdentForAliasParseUnsupportedCallWithStarParseNonUnaryAgregateFunctionCallParseMalformedJoinParseExpectedIdentForAtParseAsteriskIsNotAloneInSelectListParseCannotMixSqbAndWildcardInSelectListParseInvalidContextForWildcardInSelectListIncorrectSQLFunctionArgumentTypeValueParseFailureEvaluatorInvalidArgumentsIntegerOverflowLikeInvalidInputsCastFailedInvalidCastEvaluatorInvalidTimestampFormatPatternEvaluatorInvalidTimestampFormatPatternSymbolForParsingEvaluatorTimestampFormatPatternDuplicateFieldsEvaluatorTimestampFormatPatternHourClockAmPmMismatchEvaluatorUnterminatedTimestampFormatPatternTokenEvaluatorInvalidTimestampFormatPatternTokenEvaluatorInvalidTimestampFormatPatternSymbolEvaluatorBindingDoesNotExistMissingHeadersInvalidColumnIndexAdminConfigNotificationTargetsFailedAdminProfilerNotEnabledInvalidDecompressedSizeAddUserInvalidArgumentAdminAccountNotEligibleAccountNotEligibleAdminServiceAccountNotFoundPostPolicyConditionInvalidFormat"
-var _APIErrorCode_index = [...]uint16{0, 4, 16, 25, 39, 53, 67, 81, 94, 112, 129, 142, 154, 176, 196, 222, 236, 257, 274, 289, 312, 329, 347, 364, 388, 403, 424, 442, 454, 474, 491, 514, 535, 547, 565, 586, 614, 635, 658, 684, 721, 751, 784, 809, 841, 871, 900, 925, 947, 973, 995, 1023, 1052, 1086, 1117, 1154, 1184, 1214, 1223, 1235, 1251, 1264, 1278, 1296, 1316, 1337, 1353, 1364, 1380, 1408, 1428, 1444, 1472, 1486, 1503, 1518, 1531, 1545, 1558, 1571, 1587, 1604, 1625, 1639, 1660, 1673, 1695, 1718, 1743, 1759, 1774, 1789, 1810, 1828, 1843, 1860, 1885, 1903, 1926, 1941, 1960, 1976, 1995, 2009, 2017, 2036, 2046, 2061, 2097, 2128, 2161, 2190, 2202, 2222, 2246, 2270, 2291, 2315, 2334, 2357, 2383, 2404, 2422, 2449, 2476, 2497, 2518, 2542, 2567, 2595, 2623, 2639, 2650, 2662, 2679, 2694, 2712, 2741, 2758, 2774, 2790, 2808, 2826, 2849, 2870, 2880, 2891, 2905, 2916, 2932, 2955, 2972, 3000, 3019, 3039, 3056, 3074, 3091, 3105, 3140, 3159, 3170, 3183, 3198, 3214, 3232, 3249, 3269, 3290, 3311, 3330, 3349, 3367, 3391, 3415, 3436, 3450, 3474, 3503, 3521, 3538, 3560, 3577, 3595, 3615, 3641, 3657, 3676, 3697, 3701, 3719, 3736, 3762, 3776, 3800, 3821, 3836, 3854, 3877, 3892, 3911, 3928, 3945, 3969, 3996, 4019, 4042, 4059, 4081, 4097, 4117, 4136, 4158, 4179, 4199, 4221, 4245, 4264, 4306, 4327, 4350, 4371, 4402, 4421, 4443, 4463, 4489, 4510, 4532, 4552, 4576, 4599, 4618, 4638, 4660, 4683, 4714, 4752, 4793, 4823, 4837, 4858, 4874, 4896, 4926, 4952, 4980, 5013, 5031, 5054, 5089, 5129, 5171, 5203, 5220, 5245, 5260, 5277, 5287, 5298, 5336, 5390, 5436, 5488, 5536, 5579, 5623, 5651, 5665, 5683, 5719, 5742, 5765, 5787, 5810, 5828, 5855, 5887}
+var _APIErrorCode_index = [...]uint16{0, 4, 16, 25, 39, 53, 67, 81, 94, 112, 129, 142, 154, 176, 196, 222, 236, 257, 274, 289, 312, 329, 347, 364, 388, 403, 424, 442, 454, 474, 491, 514, 535, 547, 565, 586, 614, 635, 658, 684, 721, 751, 784, 809, 841, 871, 900, 925, 947, 973, 995, 1023, 1052, 1086, 1117, 1154, 1184, 1214, 1223, 1235, 1251, 1264, 1278, 1296, 1316, 1337, 1353, 1364, 1380, 1408, 1428, 1444, 1472, 1486, 1503, 1518, 1531, 1545, 1558, 1571, 1587, 1604, 1625, 1639, 1660, 1673, 1695, 1718, 1743, 1759, 1774, 1789, 1810, 1828, 1843, 1860, 1885, 1903, 1926, 1941, 1960, 1976, 1995, 2009, 2017, 2036, 2046, 2061, 2097, 2128, 2161, 2190, 2202, 2222, 2246, 2270, 2291, 2315, 2334, 2357, 2383, 2404, 2422, 2449, 2476, 2497, 2518, 2542, 2567, 2595, 2623, 2639, 2650, 2662, 2679, 2694, 2712, 2741, 2758, 2774, 2790, 2808, 2826, 2849, 2870, 2880, 2891, 2902, 2918, 2941, 2958, 2986, 3005, 3025, 3042, 3060, 3077, 3091, 3126, 3145, 3156, 3169, 3184, 3200, 3218, 3235, 3255, 3276, 3297, 3316, 3335, 3353, 3377, 3401, 3422, 3436, 3460, 3489, 3507, 3524, 3546, 3563, 3581, 3601, 3627, 3643, 3662, 3683, 3687, 3705, 3722, 3748, 3762, 3786, 3807, 3822, 3840, 3863, 3878, 3897, 3914, 3931, 3955, 3982, 4005, 4028, 4045, 4067, 4083, 4103, 4122, 4144, 4165, 4185, 4207, 4231, 4250, 4292, 4313, 4336, 4357, 4388, 4407, 4429, 4449, 4475, 4496, 4518, 4538, 4562, 4585, 4604, 4624, 4646, 4669, 4700, 4738, 4779, 4809, 4823, 4844, 4860, 4882, 4912, 4938, 4966, 4999, 5017, 5040, 5075, 5115, 5157, 5189, 5206, 5231, 5246, 5263, 5273, 5284, 5322, 5376, 5422, 5474, 5522, 5565, 5609, 5637, 5651, 5669, 5705, 5728, 5751, 5773, 5796, 5814, 5841, 5873}
func (i APIErrorCode) String() string {
if i < 0 || i >= APIErrorCode(len(_APIErrorCode_index)-1) {
diff --git a/cmd/erasure-common.go b/cmd/erasure-common.go
index 1f9ddf4ca..106e23068 100644
--- a/cmd/erasure-common.go
+++ b/cmd/erasure-common.go
@@ -20,8 +20,6 @@ package cmd
import (
"context"
"sync"
-
- "github.com/minio/minio/internal/sync/errgroup"
)
func (er erasureObjects) getLocalDisks() (localDisks []StorageAPI) {
@@ -101,34 +99,3 @@ func (er erasureObjects) getLoadBalancedDisks(optimized bool) []StorageAPI {
// Return disks which have maximum disk usage common.
return newDisks[max]
}
-
-// This function does the following check, suppose
-// object is "a/b/c/d", stat makes sure that objects
-// - "a/b/c"
-// - "a/b"
-// - "a"
-// do not exist on the namespace.
-func (er erasureObjects) parentDirIsObject(ctx context.Context, bucket, parent string) bool {
- storageDisks := er.getDisks()
-
- g := errgroup.WithNErrs(len(storageDisks))
-
- for index := range storageDisks {
- index := index
- g.Go(func() error {
- if storageDisks[index] == nil {
- return errDiskNotFound
- }
- // Check if 'prefix' is an object on this 'disk', else continue the check the next disk
- return storageDisks[index].CheckFile(ctx, bucket, parent)
- }, index)
- }
-
- // NOTE: Observe we are not trying to read `xl.meta` and figure out the actual
- // quorum intentionally, but rely on the default case scenario. Actual quorum
- // verification will happen by top layer by using getObjectInfo() and will be
- // ignored if necessary.
- readQuorum := getReadQuorum(len(storageDisks))
-
- return reduceReadQuorumErrs(ctx, g.Wait(), objectOpIgnoredErrs, readQuorum) == nil
-}
diff --git a/cmd/erasure-common_test.go b/cmd/erasure-common_test.go
deleted file mode 100644
index e325ee283..000000000
--- a/cmd/erasure-common_test.go
+++ /dev/null
@@ -1,83 +0,0 @@
-// Copyright (c) 2015-2021 MinIO, Inc.
-//
-// This file is part of MinIO Object Storage stack
-//
-// This program is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Affero General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// This program is distributed in the hope that it will be useful
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Affero General Public License for more details.
-//
-// You should have received a copy of the GNU Affero General Public License
-// along with this program. If not, see .
-
-package cmd
-
-import (
- "bytes"
- "context"
- "os"
- "testing"
-)
-
-// Tests for if parent directory is object
-func TestErasureParentDirIsObject(t *testing.T) {
- ctx, cancel := context.WithCancel(context.Background())
- defer cancel()
-
- obj, fsDisks, err := prepareErasureSets32(ctx)
- if err != nil {
- t.Fatalf("Unable to initialize 'Erasure' object layer.")
- }
- defer obj.Shutdown(context.Background())
-
- // Remove all disks.
- for _, disk := range fsDisks {
- defer os.RemoveAll(disk)
- }
-
- bucketName := "testbucket"
- objectName := "object"
-
- if err = obj.MakeBucketWithLocation(GlobalContext, bucketName, BucketOptions{}); err != nil {
- t.Fatal(err)
- }
-
- objectContent := "12345"
- _, err = obj.PutObject(GlobalContext, bucketName, objectName,
- mustGetPutObjReader(t, bytes.NewReader([]byte(objectContent)), int64(len(objectContent)), "", ""), ObjectOptions{})
- if err != nil {
- t.Fatal(err)
- }
-
- testCases := []struct {
- expectedErr bool
- objectName string
- }{
- {
- expectedErr: true,
- objectName: pathJoin(objectName, "parent-is-object"),
- },
- {
- expectedErr: false,
- objectName: pathJoin("no-parent", "object"),
- },
- }
-
- for _, testCase := range testCases {
- t.Run("", func(t *testing.T) {
- _, err = obj.PutObject(GlobalContext, bucketName, testCase.objectName,
- mustGetPutObjReader(t, bytes.NewReader([]byte(objectContent)), int64(len(objectContent)), "", ""), ObjectOptions{})
- if testCase.expectedErr && err == nil {
- t.Error("Expected error but got nil")
- }
- if !testCase.expectedErr && err != nil {
- t.Errorf("Expected nil but got %v", err)
- }
- })
- }
-}
diff --git a/cmd/erasure-healing.go b/cmd/erasure-healing.go
index 35b6fa75c..7128a99d9 100644
--- a/cmd/erasure-healing.go
+++ b/cmd/erasure-healing.go
@@ -783,11 +783,9 @@ func (er erasureObjects) purgeObjectDangling(ctx context.Context, bucket, object
var err error
var returnNotFound bool
if !opts.DryRun && opts.Remove {
- if versionID == "" {
- err = er.deleteObject(ctx, bucket, object, writeQuorum)
- } else {
- err = er.deleteObjectVersion(ctx, bucket, object, writeQuorum, FileInfo{VersionID: versionID}, false)
- }
+ err = er.deleteObjectVersion(ctx, bucket, object, writeQuorum, FileInfo{
+ VersionID: versionID,
+ }, false)
// If Delete was successful, make sure to return the appropriate error
// and heal result appropriate with delete's error messages
diff --git a/cmd/erasure-healing_test.go b/cmd/erasure-healing_test.go
index 9c99904d4..62c6d873b 100644
--- a/cmd/erasure-healing_test.go
+++ b/cmd/erasure-healing_test.go
@@ -221,7 +221,7 @@ func TestHealObjectCorrupted(t *testing.T) {
t.Fatalf("Failed to getLatestFileInfo - %v", err)
}
- if err = firstDisk.CheckFile(context.Background(), bucket, object); err != nil {
+ if _, err = firstDisk.StatInfoFile(context.Background(), bucket, object+"/"+xlStorageFormatFile); err != nil {
t.Errorf("Expected er.meta file to be present but stat failed - %v", err)
}
@@ -365,7 +365,7 @@ func TestHealObjectErasure(t *testing.T) {
t.Fatalf("Failed to heal object - %v", err)
}
- if err = firstDisk.CheckFile(context.Background(), bucket, object); err != nil {
+ if _, err = firstDisk.StatInfoFile(context.Background(), bucket, object+"/"+xlStorageFormatFile); err != nil {
t.Errorf("Expected er.meta file to be present but stat failed - %v", err)
}
diff --git a/cmd/erasure-multipart.go b/cmd/erasure-multipart.go
index c24da8a71..1790cff19 100644
--- a/cmd/erasure-multipart.go
+++ b/cmd/erasure-multipart.go
@@ -782,12 +782,6 @@ func (er erasureObjects) CompleteMultipartUpload(ctx context.Context, bucket str
return oi, toObjectErr(err, bucket, object, uploadID)
}
- // Check if an object is present as one of the parent dir.
- // -- FIXME. (needs a new kind of lock).
- if opts.ParentIsObject != nil && opts.ParentIsObject(rctx, bucket, path.Dir(object)) {
- return oi, toObjectErr(errFileParentIsFile, bucket, object)
- }
-
// Calculate s3 compatible md5sum for complete multipart.
s3MD5 := getCompleteMultipartMD5(parts)
diff --git a/cmd/erasure-object.go b/cmd/erasure-object.go
index 58d55df74..f964ca2a0 100644
--- a/cmd/erasure-object.go
+++ b/cmd/erasure-object.go
@@ -660,13 +660,6 @@ func (er erasureObjects) putObject(ctx context.Context, bucket string, object st
return ObjectInfo{}, toObjectErr(errInvalidArgument)
}
- // Check if an object is present as one of the parent dir.
- // -- FIXME. (needs a new kind of lock).
- // -- FIXME (this also causes performance issue when disks are down).
- if opts.ParentIsObject != nil && opts.ParentIsObject(ctx, bucket, path.Dir(object)) {
- return ObjectInfo{}, toObjectErr(errFileParentIsFile, bucket, object)
- }
-
// Initialize parts metadata
partsMetadata := make([]FileInfo, len(storageDisks))
diff --git a/cmd/erasure-sets.go b/cmd/erasure-sets.go
index a1d3fd0fb..5fcf91f66 100644
--- a/cmd/erasure-sets.go
+++ b/cmd/erasure-sets.go
@@ -878,18 +878,10 @@ func (s *erasureSets) GetObjectNInfo(ctx context.Context, bucket, object string,
return set.GetObjectNInfo(ctx, bucket, object, rs, h, lockType, opts)
}
-func (s *erasureSets) parentDirIsObject(ctx context.Context, bucket, parent string) bool {
- if parent == "." {
- return false
- }
- return s.getHashedSet(parent).parentDirIsObject(ctx, bucket, parent)
-}
-
// PutObject - writes an object to hashedSet based on the object name.
func (s *erasureSets) PutObject(ctx context.Context, bucket string, object string, data *PutObjReader, opts ObjectOptions) (objInfo ObjectInfo, err error) {
set := s.getHashedSet(object)
auditObjectErasureSet(ctx, object, set)
- opts.ParentIsObject = s.parentDirIsObject
return set.PutObject(ctx, bucket, object, data, opts)
}
@@ -1074,7 +1066,6 @@ func (s *erasureSets) AbortMultipartUpload(ctx context.Context, bucket, object,
func (s *erasureSets) CompleteMultipartUpload(ctx context.Context, bucket, object, uploadID string, uploadedParts []CompletePart, opts ObjectOptions) (objInfo ObjectInfo, err error) {
set := s.getHashedSet(object)
auditObjectErasureSet(ctx, object, set)
- opts.ParentIsObject = s.parentDirIsObject
return set.CompleteMultipartUpload(ctx, bucket, object, uploadID, uploadedParts, opts)
}
diff --git a/cmd/fs-v1-multipart.go b/cmd/fs-v1-multipart.go
index ef1bc3319..73382b66e 100644
--- a/cmd/fs-v1-multipart.go
+++ b/cmd/fs-v1-multipart.go
@@ -24,7 +24,6 @@ import (
"fmt"
"io/ioutil"
"os"
- pathutil "path"
"sort"
"strconv"
"strings"
@@ -550,11 +549,6 @@ func (fs *FSObjects) CompleteMultipartUpload(ctx context.Context, bucket string,
return oi, toObjectErr(err)
}
- // Check if an object is present as one of the parent dir.
- if fs.parentDirIsObject(ctx, bucket, pathutil.Dir(object)) {
- return oi, toObjectErr(errFileParentIsFile, bucket, object)
- }
-
if _, err := fs.statBucketDir(ctx, bucket); err != nil {
return oi, toObjectErr(err, bucket)
}
diff --git a/cmd/fs-v1.go b/cmd/fs-v1.go
index dc550d4fb..64813ded6 100644
--- a/cmd/fs-v1.go
+++ b/cmd/fs-v1.go
@@ -1006,26 +1006,6 @@ func (fs *FSObjects) GetObjectInfo(ctx context.Context, bucket, object string, o
return oi, toObjectErr(err, bucket, object)
}
-// This function does the following check, suppose
-// object is "a/b/c/d", stat makes sure that objects ""a/b/c""
-// "a/b" and "a" do not exist.
-func (fs *FSObjects) parentDirIsObject(ctx context.Context, bucket, parent string) bool {
- var isParentDirObject func(string) bool
- isParentDirObject = func(p string) bool {
- if p == "." || p == SlashSeparator {
- return false
- }
- if fsIsFile(ctx, pathJoin(fs.fsPath, bucket, p)) {
- // If there is already a file at prefix "p", return true.
- return true
- }
-
- // Check if there is a file as one of the parent paths.
- return isParentDirObject(path.Dir(p))
- }
- return isParentDirObject(parent)
-}
-
// PutObject - creates an object upon reading from the input stream
// until EOF, writes data directly to configured filesystem path.
// Additionally writes `fs.json` which carries the necessary metadata
@@ -1079,10 +1059,6 @@ func (fs *FSObjects) putObject(ctx context.Context, bucket string, object string
// with a slash separator, we treat it like a valid operation
// and return success.
if isObjectDir(object, data.Size()) {
- // Check if an object is present as one of the parent dir.
- if fs.parentDirIsObject(ctx, bucket, path.Dir(object)) {
- return ObjectInfo{}, toObjectErr(errFileParentIsFile, bucket, object)
- }
if err = mkdirAll(pathJoin(fs.fsPath, bucket, object), 0777); err != nil {
logger.LogIf(ctx, err)
return ObjectInfo{}, toObjectErr(err, bucket, object)
@@ -1094,11 +1070,6 @@ func (fs *FSObjects) putObject(ctx context.Context, bucket string, object string
return fsMeta.ToObjectInfo(bucket, object, fi), nil
}
- // Check if an object is present as one of the parent dir.
- if fs.parentDirIsObject(ctx, bucket, path.Dir(object)) {
- return ObjectInfo{}, toObjectErr(errFileParentIsFile, bucket, object)
- }
-
// Validate input data size and it can never be less than zero.
if data.Size() < -1 {
logger.LogIf(ctx, errInvalidArgument, logger.Application)
diff --git a/cmd/fs-v1_test.go b/cmd/fs-v1_test.go
index 4adea4f5b..d77fdac32 100644
--- a/cmd/fs-v1_test.go
+++ b/cmd/fs-v1_test.go
@@ -26,71 +26,6 @@ import (
"github.com/minio/madmin-go"
)
-// Tests for if parent directory is object
-func TestFSParentDirIsObject(t *testing.T) {
- obj, disk, err := prepareFS()
- if err != nil {
- t.Fatal(err)
- }
- defer os.RemoveAll(disk)
-
- bucketName := "testbucket"
- objectName := "object"
-
- if err = obj.MakeBucketWithLocation(GlobalContext, bucketName, BucketOptions{}); err != nil {
- t.Fatal(err)
- }
- objectContent := "12345"
- objInfo, err := obj.PutObject(GlobalContext, bucketName, objectName,
- mustGetPutObjReader(t, bytes.NewReader([]byte(objectContent)), int64(len(objectContent)), "", ""), ObjectOptions{})
- if err != nil {
- t.Fatal(err)
- }
- if objInfo.Name != objectName {
- t.Fatalf("Unexpected object name returned got %s, expected %s", objInfo.Name, objectName)
- }
-
- fs := obj.(*FSObjects)
- testCases := []struct {
- parentIsObject bool
- objectName string
- }{
- // parentIsObject is true if object is available.
- {
- parentIsObject: true,
- objectName: objectName,
- },
- {
- parentIsObject: false,
- objectName: "",
- },
- {
- parentIsObject: false,
- objectName: ".",
- },
- // Should not cause infinite loop.
- {
- parentIsObject: false,
- objectName: SlashSeparator,
- },
- {
- parentIsObject: false,
- objectName: "\\",
- },
- // Should not cause infinite loop with double forward slash.
- {
- parentIsObject: false,
- objectName: "//",
- },
- }
- for i, testCase := range testCases {
- gotValue := fs.parentDirIsObject(GlobalContext, bucketName, testCase.objectName)
- if testCase.parentIsObject != gotValue {
- t.Errorf("Test %d: Unexpected value returned got %t, expected %t", i+1, gotValue, testCase.parentIsObject)
- }
- }
-}
-
// TestNewFS - tests initialization of all input disks
// and constructs a valid `FS` object layer.
func TestNewFS(t *testing.T) {
@@ -226,35 +161,6 @@ func TestFSPutObject(t *testing.T) {
if err != nil {
t.Fatal(err)
}
- _, err = obj.PutObject(GlobalContext, bucketName, objectName+"/1", mustGetPutObjReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), ObjectOptions{})
- if err == nil {
- t.Fatal("Unexpected should fail here, backend corruption occurred")
- }
- if nerr, ok := err.(ParentIsObject); !ok {
- t.Fatalf("Expected ParentIsObject, got %#v", err)
- } else {
- if nerr.Bucket != "bucket" {
- t.Fatalf("Expected 'bucket', got %s", nerr.Bucket)
- }
- if nerr.Object != "1/2/3/4/object/1" {
- t.Fatalf("Expected '1/2/3/4/object/1', got %s", nerr.Object)
- }
- }
-
- _, err = obj.PutObject(GlobalContext, bucketName, objectName+"/1/", mustGetPutObjReader(t, bytes.NewReader([]byte("abcd")), 0, "", ""), ObjectOptions{})
- if err == nil {
- t.Fatal("Unexpected should fail here, backned corruption occurred")
- }
- if nerr, ok := err.(ParentIsObject); !ok {
- t.Fatalf("Expected ParentIsObject, got %#v", err)
- } else {
- if nerr.Bucket != "bucket" {
- t.Fatalf("Expected 'bucket', got %s", nerr.Bucket)
- }
- if nerr.Object != "1/2/3/4/object/1/" {
- t.Fatalf("Expected '1/2/3/4/object/1/', got %s", nerr.Object)
- }
- }
}
// TestFSDeleteObject - test fs.DeleteObject() with healthy and corrupted disks
diff --git a/cmd/metacache-set.go b/cmd/metacache-set.go
index 4aa05721b..6286f81e7 100644
--- a/cmd/metacache-set.go
+++ b/cmd/metacache-set.go
@@ -649,9 +649,8 @@ func (er *erasureObjects) saveMetaCacheStream(ctx context.Context, mc *metaCache
logger.LogIf(ctx, err)
custom := b.headerKV()
_, err = er.putObject(ctx, minioMetaBucket, o.objectPath(b.n), NewPutObjReader(r), ObjectOptions{
- UserDefined: custom,
- NoLock: true, // No need to hold namespace lock, each prefix caches uniquely.
- ParentIsObject: nil,
+ UserDefined: custom,
+ NoLock: true, // No need to hold namespace lock, each prefix caches uniquely.
})
if err != nil {
mc.setErr(err.Error())
diff --git a/cmd/naughty-disk_test.go b/cmd/naughty-disk_test.go
index a7be10a1c..e59806361 100644
--- a/cmd/naughty-disk_test.go
+++ b/cmd/naughty-disk_test.go
@@ -218,13 +218,6 @@ func (d *naughtyDisk) CheckParts(ctx context.Context, volume string, path string
return d.disk.CheckParts(ctx, volume, path, fi)
}
-func (d *naughtyDisk) CheckFile(ctx context.Context, volume string, path string) (err error) {
- if err := d.calcError(); err != nil {
- return err
- }
- return d.disk.CheckFile(ctx, volume, path)
-}
-
func (d *naughtyDisk) Delete(ctx context.Context, volume string, path string, recursive bool) (err error) {
if err := d.calcError(); err != nil {
return err
diff --git a/cmd/object-api-errors.go b/cmd/object-api-errors.go
index 73afd3ee0..effca57c0 100644
--- a/cmd/object-api-errors.go
+++ b/cmd/object-api-errors.go
@@ -22,7 +22,6 @@ import (
"errors"
"fmt"
"io"
- "path"
)
// Converts underlying storage error. Convenience function written to
@@ -64,15 +63,6 @@ func toObjectErr(err error, params ...string) error {
apiErr.Object = decodeDirObject(params[1])
}
return apiErr
- case errFileParentIsFile.Error():
- apiErr := ParentIsObject{}
- if len(params) >= 1 {
- apiErr.Bucket = params[0]
- }
- if len(params) >= 2 {
- apiErr.Object = decodeDirObject(params[1])
- }
- return apiErr
case errIsNotRegular.Error():
apiErr := ObjectExistsAsDirectory{}
if len(params) >= 1 {
@@ -322,13 +312,6 @@ func (e PrefixAccessDenied) Error() string {
return "Prefix access is denied: " + e.Bucket + SlashSeparator + e.Object
}
-// ParentIsObject object access is denied.
-type ParentIsObject GenericError
-
-func (e ParentIsObject) Error() string {
- return "Parent is object " + e.Bucket + SlashSeparator + path.Dir(e.Object)
-}
-
// BucketExists bucket exists.
type BucketExists GenericError
diff --git a/cmd/object-api-interface.go b/cmd/object-api-interface.go
index bb5bd3a96..dd8b9111d 100644
--- a/cmd/object-api-interface.go
+++ b/cmd/object-api-interface.go
@@ -53,10 +53,9 @@ type ObjectOptions struct {
VersionPurgeStatus VersionPurgeStatusType // Is only set in DELETE operations for delete marker version to be permanently deleted.
Transition TransitionOptions
- NoLock bool // indicates to lower layers if the caller is expecting to hold locks.
- ProxyRequest bool // only set for GET/HEAD in active-active replication scenario
- ProxyHeaderSet bool // only set for GET/HEAD in active-active replication scenario
- ParentIsObject func(ctx context.Context, bucket, parent string) bool // Used to verify if parent is an object.
+ NoLock bool // indicates to lower layers if the caller is expecting to hold locks.
+ ProxyRequest bool // only set for GET/HEAD in active-active replication scenario
+ ProxyHeaderSet bool // only set for GET/HEAD in active-active replication scenario
DeletePrefix bool // set true to enforce a prefix deletion, only application for DeleteObject API,
diff --git a/cmd/storage-errors.go b/cmd/storage-errors.go
index 3c52c714c..f310e87ef 100644
--- a/cmd/storage-errors.go
+++ b/cmd/storage-errors.go
@@ -88,9 +88,6 @@ var errFileAccessDenied = StorageErr("file access denied")
// errFileCorrupt - file has an unexpected size, or is not readable
var errFileCorrupt = StorageErr("file is corrupted")
-// errFileParentIsFile - cannot have overlapping objects, parent is already a file.
-var errFileParentIsFile = StorageErr("parent is a file")
-
// errBitrotHashAlgoInvalid - the algo for bit-rot hash
// verification is empty or invalid.
var errBitrotHashAlgoInvalid = StorageErr("bit-rot hash algorithm is invalid")
diff --git a/cmd/storage-interface.go b/cmd/storage-interface.go
index eb0b30167..99a6566e5 100644
--- a/cmd/storage-interface.go
+++ b/cmd/storage-interface.go
@@ -71,7 +71,6 @@ type StorageAPI interface {
ReadFileStream(ctx context.Context, volume, path string, offset, length int64) (io.ReadCloser, error)
RenameFile(ctx context.Context, srcVolume, srcPath, dstVolume, dstPath string) error
CheckParts(ctx context.Context, volume string, path string, fi FileInfo) error
- CheckFile(ctx context.Context, volume string, path string) (err error)
Delete(ctx context.Context, volume string, path string, recursive bool) (err error)
VerifyFile(ctx context.Context, volume, path string, fi FileInfo) error
StatInfoFile(ctx context.Context, volume, path string) (stat StatInfo, err error)
diff --git a/cmd/storage-rest-client.go b/cmd/storage-rest-client.go
index 55f511176..f1f1af552 100644
--- a/cmd/storage-rest-client.go
+++ b/cmd/storage-rest-client.go
@@ -434,16 +434,6 @@ func (client *storageRESTClient) WriteAll(ctx context.Context, volume string, pa
return err
}
-// CheckFile - stat a file metadata.
-func (client *storageRESTClient) CheckFile(ctx context.Context, volume string, path string) error {
- values := make(url.Values)
- values.Set(storageRESTVolume, volume)
- values.Set(storageRESTFilePath, path)
- respBody, err := client.call(ctx, storageRESTMethodCheckFile, values, nil, -1)
- defer xhttp.DrainBody(respBody)
- return err
-}
-
// CheckParts - stat all file parts.
func (client *storageRESTClient) CheckParts(ctx context.Context, volume string, path string, fi FileInfo) error {
values := make(url.Values)
diff --git a/cmd/storage-rest-common.go b/cmd/storage-rest-common.go
index 7e0cb3943..27a97c42a 100644
--- a/cmd/storage-rest-common.go
+++ b/cmd/storage-rest-common.go
@@ -18,7 +18,7 @@
package cmd
const (
- storageRESTVersion = "v37" // cleanup behavior change at storage layer.
+ storageRESTVersion = "v38" // Remove CheckFile API
storageRESTVersionPrefix = SlashSeparator + storageRESTVersion
storageRESTPrefix = minioReservedBucketPath + "/storage"
)
@@ -42,7 +42,6 @@ const (
storageRESTMethodReadVersion = "/readversion"
storageRESTMethodRenameData = "/renamedata"
storageRESTMethodCheckParts = "/checkparts"
- storageRESTMethodCheckFile = "/checkfile"
storageRESTMethodReadAll = "/readall"
storageRESTMethodReadFile = "/readfile"
storageRESTMethodReadFileStream = "/readfilestream"
diff --git a/cmd/storage-rest-server.go b/cmd/storage-rest-server.go
index 7308af8bb..c47f044ab 100644
--- a/cmd/storage-rest-server.go
+++ b/cmd/storage-rest-server.go
@@ -487,20 +487,6 @@ func (s *storageRESTServer) CheckPartsHandler(w http.ResponseWriter, r *http.Req
}
}
-// CheckFileHandler - check if a file metadata exists.
-func (s *storageRESTServer) CheckFileHandler(w http.ResponseWriter, r *http.Request) {
- if !s.IsValid(w, r) {
- return
- }
- vars := mux.Vars(r)
- volume := vars[storageRESTVolume]
- filePath := vars[storageRESTFilePath]
-
- if err := s.storage.CheckFile(r.Context(), volume, filePath); err != nil {
- s.writeErrorResponse(w, err)
- }
-}
-
// ReadAllHandler - read all the contents of a file.
func (s *storageRESTServer) ReadAllHandler(w http.ResponseWriter, r *http.Request) {
if !s.IsValid(w, r) {
@@ -1121,8 +1107,6 @@ func registerStorageRESTHandlers(router *mux.Router, endpointServerPools Endpoin
subrouter.Methods(http.MethodPost).Path(storageRESTVersionPrefix + storageRESTMethodCreateFile).HandlerFunc(httpTraceHdrs(server.CreateFileHandler)).
Queries(restQueries(storageRESTVolume, storageRESTFilePath, storageRESTLength)...)
- subrouter.Methods(http.MethodPost).Path(storageRESTVersionPrefix + storageRESTMethodCheckFile).HandlerFunc(httpTraceHdrs(server.CheckFileHandler)).
- Queries(restQueries(storageRESTVolume, storageRESTFilePath)...)
subrouter.Methods(http.MethodPost).Path(storageRESTVersionPrefix + storageRESTMethodCheckParts).HandlerFunc(httpTraceHdrs(server.CheckPartsHandler)).
Queries(restQueries(storageRESTVolume, storageRESTFilePath)...)
diff --git a/cmd/storage-rest_test.go b/cmd/storage-rest_test.go
index cae6d43dd..3966d190d 100644
--- a/cmd/storage-rest_test.go
+++ b/cmd/storage-rest_test.go
@@ -166,7 +166,7 @@ func testStorageAPIDeleteVol(t *testing.T, storage StorageAPI) {
}
}
-func testStorageAPICheckFile(t *testing.T, storage StorageAPI) {
+func testStorageAPIStatInfoFile(t *testing.T, storage StorageAPI) {
err := storage.MakeVol(context.Background(), "foo")
if err != nil {
t.Fatalf("unexpected error %v", err)
@@ -187,7 +187,7 @@ func testStorageAPICheckFile(t *testing.T, storage StorageAPI) {
}
for i, testCase := range testCases {
- err := storage.CheckFile(context.Background(), testCase.volumeName, testCase.objectName)
+ _, err := storage.StatInfoFile(context.Background(), testCase.volumeName, testCase.objectName+"/"+xlStorageFormatFile)
expectErr := (err != nil)
if expectErr != testCase.expectErr {
@@ -515,7 +515,7 @@ func TestStorageRESTClientDeleteVol(t *testing.T) {
testStorageAPIDeleteVol(t, restClient)
}
-func TestStorageRESTClientCheckFile(t *testing.T) {
+func TestStorageRESTClientStatInfoFile(t *testing.T) {
httpServer, restClient, prevGlobalServerConfig, endpointPath := newStorageRESTHTTPServerClient(t)
defer httpServer.Close()
defer func() {
@@ -523,7 +523,7 @@ func TestStorageRESTClientCheckFile(t *testing.T) {
}()
defer os.RemoveAll(endpointPath)
- testStorageAPICheckFile(t, restClient)
+ testStorageAPIStatInfoFile(t, restClient)
}
func TestStorageRESTClientListDir(t *testing.T) {
diff --git a/cmd/storagemetric_string.go b/cmd/storagemetric_string.go
index 1acc69441..9e5d7563f 100644
--- a/cmd/storagemetric_string.go
+++ b/cmd/storagemetric_string.go
@@ -22,23 +22,22 @@ func _() {
_ = x[storageMetricRenameFile-11]
_ = x[storageMetricRenameData-12]
_ = x[storageMetricCheckParts-13]
- _ = x[storageMetricCheckFile-14]
- _ = x[storageMetricDelete-15]
- _ = x[storageMetricDeleteVersions-16]
- _ = x[storageMetricVerifyFile-17]
- _ = x[storageMetricWriteAll-18]
- _ = x[storageMetricDeleteVersion-19]
- _ = x[storageMetricWriteMetadata-20]
- _ = x[storageMetricUpdateMetadata-21]
- _ = x[storageMetricReadVersion-22]
- _ = x[storageMetricReadAll-23]
- _ = x[storageStatInfoFile-24]
- _ = x[storageMetricLast-25]
+ _ = x[storageMetricDelete-14]
+ _ = x[storageMetricDeleteVersions-15]
+ _ = x[storageMetricVerifyFile-16]
+ _ = x[storageMetricWriteAll-17]
+ _ = x[storageMetricDeleteVersion-18]
+ _ = x[storageMetricWriteMetadata-19]
+ _ = x[storageMetricUpdateMetadata-20]
+ _ = x[storageMetricReadVersion-21]
+ _ = x[storageMetricReadAll-22]
+ _ = x[storageStatInfoFile-23]
+ _ = x[storageMetricLast-24]
}
-const _storageMetric_name = "MakeVolBulkMakeVolListVolsStatVolDeleteVolWalkDirListDirReadFileAppendFileCreateFileReadFileStreamRenameFileRenameDataCheckPartsCheckFileDeleteDeleteVersionsVerifyFileWriteAllDeleteVersionWriteMetadataUpdateMetadataReadVersionReadAllstorageStatInfoFileLast"
+const _storageMetric_name = "MakeVolBulkMakeVolListVolsStatVolDeleteVolWalkDirListDirReadFileAppendFileCreateFileReadFileStreamRenameFileRenameDataCheckPartsDeleteDeleteVersionsVerifyFileWriteAllDeleteVersionWriteMetadataUpdateMetadataReadVersionReadAllstorageStatInfoFileLast"
-var _storageMetric_index = [...]uint16{0, 11, 18, 26, 33, 42, 49, 56, 64, 74, 84, 98, 108, 118, 128, 137, 143, 157, 167, 175, 188, 201, 215, 226, 233, 252, 256}
+var _storageMetric_index = [...]uint8{0, 11, 18, 26, 33, 42, 49, 56, 64, 74, 84, 98, 108, 118, 128, 134, 148, 158, 166, 179, 192, 206, 217, 224, 243, 247}
func (i storageMetric) String() string {
if i >= storageMetric(len(_storageMetric_index)-1) {
diff --git a/cmd/xl-storage-disk-id-check.go b/cmd/xl-storage-disk-id-check.go
index 36c3447bf..07cbde406 100644
--- a/cmd/xl-storage-disk-id-check.go
+++ b/cmd/xl-storage-disk-id-check.go
@@ -48,7 +48,6 @@ const (
storageMetricRenameFile
storageMetricRenameData
storageMetricCheckParts
- storageMetricCheckFile
storageMetricDelete
storageMetricDeleteVersions
storageMetricVerifyFile
@@ -436,22 +435,6 @@ func (p *xlStorageDiskIDCheck) CheckParts(ctx context.Context, volume string, pa
return p.storage.CheckParts(ctx, volume, path, fi)
}
-func (p *xlStorageDiskIDCheck) CheckFile(ctx context.Context, volume string, path string) (err error) {
- defer p.updateStorageMetrics(storageMetricCheckFile, volume, path)()
-
- select {
- case <-ctx.Done():
- return ctx.Err()
- default:
- }
-
- if err = p.checkDiskStale(); err != nil {
- return err
- }
-
- return p.storage.CheckFile(ctx, volume, path)
-}
-
func (p *xlStorageDiskIDCheck) Delete(ctx context.Context, volume string, path string, recursive bool) (err error) {
defer p.updateStorageMetrics(storageMetricDelete, volume, path)()
diff --git a/cmd/xl-storage.go b/cmd/xl-storage.go
index 1cd3658f9..97301ceec 100644
--- a/cmd/xl-storage.go
+++ b/cmd/xl-storage.go
@@ -37,7 +37,6 @@ import (
"time"
"github.com/dustin/go-humanize"
- "github.com/google/uuid"
jsoniter "github.com/json-iterator/go"
"github.com/klauspost/readahead"
"github.com/minio/minio/internal/bucket/lifecycle"
@@ -880,16 +879,16 @@ func (s *xlStorage) DeleteVersion(ctx context.Context, volume, path string, fi F
// PR #11758 used DataDir, preserve it
// for users who might have used master
// branch
- xlMeta.data.remove(versionID, dataDir)
- filePath := pathJoin(volumeDir, path, dataDir)
- if err = checkPathLength(filePath); err != nil {
- return err
- }
-
- if err = renameAll(filePath, pathutil.Join(s.diskPath, minioMetaTmpDeletedBucket, mustGetUUID())); err != nil {
- if err != errFileNotFound {
+ if !xlMeta.data.remove(versionID, dataDir) {
+ filePath := pathJoin(volumeDir, path, dataDir)
+ if err = checkPathLength(filePath); err != nil {
return err
}
+ if err = renameAll(filePath, pathutil.Join(s.diskPath, minioMetaTmpDeletedBucket, mustGetUUID())); err != nil {
+ if err != errFileNotFound {
+ return err
+ }
+ }
}
}
if !lastVersion {
@@ -901,19 +900,16 @@ func (s *xlStorage) DeleteVersion(ctx context.Context, volume, path string, fi F
return s.WriteAll(ctx, volume, pathJoin(path, xlStorageFormatFile), buf)
}
- // Move everything to trash.
- filePath := retainSlash(pathJoin(volumeDir, path))
+ // Move xl.meta to trash
+ filePath := pathJoin(volumeDir, path, xlStorageFormatFile)
if err = checkPathLength(filePath); err != nil {
return err
}
- err = renameAll(filePath, pathutil.Join(s.diskPath, minioMetaTmpDeletedBucket, mustGetUUID()))
- // Delete parents if needed.
- filePath = retainSlash(pathutil.Dir(pathJoin(volumeDir, path)))
- if filePath == retainSlash(volumeDir) {
- return err
+ err = Rename(filePath, pathutil.Join(s.diskPath, minioMetaTmpDeletedBucket, mustGetUUID()))
+ if err == nil || err == errFileNotFound {
+ s.deleteFile(volumeDir, pathJoin(volumeDir, path), false)
}
- s.deleteFile(volumeDir, filePath, false)
return err
}
@@ -1701,65 +1697,6 @@ func (s *xlStorage) CheckParts(ctx context.Context, volume string, path string,
return nil
}
-// CheckFile check if path has necessary metadata.
-// This function does the following check, suppose
-// you are creating a metadata file at "a/b/c/d/xl.meta",
-// makes sure that there is no `xl.meta` at
-// - "a/b/c/"
-// - "a/b/"
-// - "a/"
-func (s *xlStorage) CheckFile(ctx context.Context, volume string, path string) error {
- volumeDir, err := s.getVolDir(volume)
- if err != nil {
- return err
- }
- s.RLock()
- formatLegacy := s.formatLegacy
- s.RUnlock()
-
- var checkFile func(p string) error
- checkFile = func(p string) error {
- if p == "." || p == SlashSeparator {
- return errPathNotFound
- }
-
- filePath := pathJoin(volumeDir, p, xlStorageFormatFile)
- if err := checkPathLength(filePath); err != nil {
- return err
- }
- st, _ := Lstat(filePath)
- if st == nil {
-
- if !formatLegacy {
- return errPathNotFound
- }
-
- filePathOld := pathJoin(volumeDir, p, xlStorageFormatFileV1)
- if err := checkPathLength(filePathOld); err != nil {
- return err
- }
-
- st, _ = Lstat(filePathOld)
- if st == nil {
- return errPathNotFound
- }
- }
-
- if st != nil {
- if !st.Mode().IsRegular() {
- // not a regular file return error.
- return errFileNotFound
- }
- // Success fully found
- return nil
- }
-
- return checkFile(pathutil.Dir(p))
- }
-
- return checkFile(path)
-}
-
// deleteFile deletes a file or a directory if its empty unless recursive
// is set to true. If the target is successfully deleted, it will recursively
// move up the tree, deleting empty parent directories until it finds one
@@ -1907,6 +1844,15 @@ func (s *xlStorage) RenameData(ctx context.Context, srcVolume, srcPath string, f
dstBuf, err := xioutil.ReadFile(dstFilePath)
if err != nil {
+ // handle situations when dstFilePath is 'file'
+ // for example such as someone is trying to
+ // upload an object such as `prefix/object/xl.meta`
+ // where `prefix/object` is already an object
+ if isSysErrNotDir(err) && runtime.GOOS != globalWindowsOSName {
+ // NOTE: On windows the error happens at
+ // next line and returns appropriate error.
+ return errFileAccessDenied
+ }
if !osIsNotExist(err) {
return osErrToFileErr(err)
}
@@ -1921,38 +1867,6 @@ func (s *xlStorage) RenameData(ctx context.Context, srcVolume, srcPath string, f
return osErrToFileErr(err)
}
}
- if err == errFileNotFound {
- // Verification to ensure that we
- // don't have objects already created
- // at this location, verify that resultant
- // directories don't have any unexpected
- // directories that we do not understand
- // or expect. If its already there we should
- // make sure to reject further renames
- // for such objects.
- //
- // This elaborate check is necessary to avoid
- // scenarios such as these.
- //
- // bucket1/name1/obj1/xl.meta
- // bucket1/name1/xl.meta --> this should never
- // be allowed.
- {
- entries, err := readDirN(pathutil.Dir(dstFilePath), 1)
- if err != nil && err != errFileNotFound {
- return err
- }
- if len(entries) > 0 {
- entry := pathutil.Clean(entries[0])
- if entry != legacyDataDir {
- _, uerr := uuid.Parse(entry)
- if uerr != nil {
- return errFileParentIsFile
- }
- }
- }
- }
- }
}
var xlMeta xlMetaV2
diff --git a/cmd/xl-storage_test.go b/cmd/xl-storage_test.go
index 83fc5735c..8bdb1880b 100644
--- a/cmd/xl-storage_test.go
+++ b/cmd/xl-storage_test.go
@@ -1628,8 +1628,8 @@ func TestXLStorageRenameFile(t *testing.T) {
}
}
-// TestXLStorage xlStorage.CheckFile()
-func TestXLStorageCheckFile(t *testing.T) {
+// TestXLStorage xlStorage.StatInfoFile()
+func TestXLStorageStatInfoFile(t *testing.T) {
// create xlStorage test setup
xlStorage, path, err := newXLStorageTestSetup()
if err != nil {
@@ -1699,19 +1699,20 @@ func TestXLStorageCheckFile(t *testing.T) {
{
srcVol: "non-existent-vol",
srcPath: "success-file",
- expectedErr: errPathNotFound,
+ expectedErr: errVolumeNotFound,
},
// TestXLStorage case - 7.
// TestXLStorage case with file with directory.
{
srcVol: "success-vol",
srcPath: "path/to",
- expectedErr: errFileNotFound,
+ expectedErr: nil,
},
}
for i, testCase := range testCases {
- if err := xlStorage.CheckFile(context.Background(), testCase.srcVol, testCase.srcPath); err != testCase.expectedErr {
+ _, err := xlStorage.StatInfoFile(context.Background(), testCase.srcVol, testCase.srcPath+"/"+xlStorageFormatFile)
+ if err != testCase.expectedErr {
t.Errorf("TestXLStorage case %d: Expected: \"%s\", got: \"%s\"", i+1, testCase.expectedErr, err)
}
}
diff --git a/cmd/xl-storage_unix_test.go b/cmd/xl-storage_unix_test.go
index 00c7dadcf..5c5bebb01 100644
--- a/cmd/xl-storage_unix_test.go
+++ b/cmd/xl-storage_unix_test.go
@@ -113,7 +113,7 @@ func TestIsValidUmaskFile(t *testing.T) {
}
// CheckFile - stat the file.
- if err := disk.CheckFile(context.Background(), testCase.volName, "hello-world.txt"); err != nil {
+ if _, err := disk.StatInfoFile(context.Background(), testCase.volName, "hello-world.txt/"+xlStorageFormatFile); err != nil {
t.Fatalf("Stat failed with %s expected to pass.", err)
}
}
diff --git a/docs/minio-limits.md b/docs/minio-limits.md
index 9b1bfc9c7..1c3b50ea6 100644
--- a/docs/minio-limits.md
+++ b/docs/minio-limits.md
@@ -1,4 +1,5 @@
## MinIO Server Limits Per Tenant
+For best deployment experience MinIO recommends operating systems RHEL/CentOS 8.x or later, Ubuntu 18.04 LTS or later. These operating systems package the latest 'xfsprogs' that support large scale deployments.
### Erasure Code (Multiple Drives / Servers)
@@ -49,6 +50,16 @@ We found the following APIs to be redundant or less useful outside of AWS S3. If
- ObjectTorrent
### Object name restrictions on MinIO
-Object names that contain characters `^*|\/&";` are unsupported on Windows and other file systems which do not support filenames with these characters. NOTE: This list is not an exhaustive, it depends on the operating system and filesystem under use.
+- Object names that contain characters `^*|\/&";` are unsupported on Windows platform or any other file systems that do not support filenames with special charaters. **This list is non exhaustive, it depends on the operating system and filesystem under use - please consult your operating system vendor**. MinIO recommends using Linux based deployments for production workloads.
-For best experience we recommend that you use distributions that ship fairly recent Linux kernel such as CentOS 8, Ubuntu 18.04 LTS along with XFS as the choice for your backend filesystem.
+- Objects should not have conflicting objects as parents, applications using this behavior should change their behavior and use proper unique keys, for example situations such as following conflicting key patterns are not supported.
+
+```
+PUT /a/b/1.txt
+PUT /a/b
+```
+
+```
+PUT /a/b
+PUT /a/b/1.txt
+```