mirror of
https://github.com/minio/minio.git
synced 2025-01-11 15:03:22 -05:00
Add proper custom errors object creations (#7387)
In scenario 1 ``` - bucket/object-prefix - bucket/object-prefix/object ``` Server responds with `XMinioParentIsObject` In scenario 2 ``` - bucket/object-prefix/object - bucket/object-prefix ``` Server responds with `XMinioObjectExistsAsDirectory` Fixes #6566
This commit is contained in:
parent
12b79d9f3b
commit
c184038b6a
@ -180,6 +180,7 @@ const (
|
||||
// Minio extended errors.
|
||||
ErrReadQuorum
|
||||
ErrWriteQuorum
|
||||
ErrParentIsObject
|
||||
ErrStorageFull
|
||||
ErrRequestBodyParse
|
||||
ErrObjectExistsAsDirectory
|
||||
@ -858,6 +859,11 @@ var errorCodes = errorCodeMap{
|
||||
Description: "Storage backend has reached its minimum free disk threshold. Please delete a few objects to proceed.",
|
||||
HTTPStatusCode: http.StatusInsufficientStorage,
|
||||
},
|
||||
ErrParentIsObject: {
|
||||
Code: "XMinioParentIsObject",
|
||||
Description: "Object-prefix is already an object, please choose a different object-prefix name.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrRequestBodyParse: {
|
||||
Code: "XMinioRequestBodyParse",
|
||||
Description: "The request body failed to parse.",
|
||||
@ -1561,6 +1567,8 @@ func toAPIErrorCode(ctx context.Context, err error) (apiErr APIErrorCode) {
|
||||
apiErr = ErrObjectExistsAsDirectory
|
||||
case PrefixAccessDenied:
|
||||
apiErr = ErrAccessDenied
|
||||
case ParentIsObject:
|
||||
apiErr = ErrParentIsObject
|
||||
case BucketNameInvalid:
|
||||
apiErr = ErrInvalidBucketName
|
||||
case BucketNotFound:
|
||||
|
@ -332,7 +332,7 @@ func (cfs *cacheFSObjects) PutObject(ctx context.Context, bucket string, object
|
||||
if isObjectDir(object, data.Size()) {
|
||||
// Check if an object is present as one of the parent dir.
|
||||
if fs.parentDirIsObject(ctx, bucket, path.Dir(object)) {
|
||||
return ObjectInfo{}, toObjectErr(errFileAccessDenied, bucket, object)
|
||||
return ObjectInfo{}, toObjectErr(errFileParentIsFile, bucket, object)
|
||||
}
|
||||
if err = mkdirAll(pathJoin(fs.fsPath, bucket, object), 0777); err != nil {
|
||||
return ObjectInfo{}, toObjectErr(err, bucket, object)
|
||||
@ -350,7 +350,7 @@ func (cfs *cacheFSObjects) PutObject(ctx context.Context, bucket string, object
|
||||
|
||||
// Check if an object is present as one of the parent dir.
|
||||
if fs.parentDirIsObject(ctx, bucket, path.Dir(object)) {
|
||||
return ObjectInfo{}, toObjectErr(errFileAccessDenied, bucket, object)
|
||||
return ObjectInfo{}, toObjectErr(errFileParentIsFile, bucket, object)
|
||||
}
|
||||
|
||||
// Validate input data size and it can never be less than zero.
|
||||
|
@ -487,7 +487,7 @@ func (fs *FSObjects) CompleteMultipartUpload(ctx context.Context, bucket string,
|
||||
|
||||
// Check if an object is present as one of the parent dir.
|
||||
if fs.parentDirIsObject(ctx, bucket, pathutil.Dir(object)) {
|
||||
return oi, toObjectErr(errFileAccessDenied, bucket, object)
|
||||
return oi, toObjectErr(errFileParentIsFile, bucket, object)
|
||||
}
|
||||
|
||||
if _, err := fs.statBucketDir(ctx, bucket); err != nil {
|
||||
|
@ -839,8 +839,7 @@ func (fs *FSObjects) putObject(ctx context.Context, bucket string, object string
|
||||
if isObjectDir(object, data.Size()) {
|
||||
// Check if an object is present as one of the parent dir.
|
||||
if fs.parentDirIsObject(ctx, bucket, path.Dir(object)) {
|
||||
logger.LogIf(ctx, errFileAccessDenied)
|
||||
return ObjectInfo{}, toObjectErr(errFileAccessDenied, bucket, object)
|
||||
return ObjectInfo{}, toObjectErr(errFileParentIsFile, bucket, object)
|
||||
}
|
||||
if err = mkdirAll(pathJoin(fs.fsPath, bucket, object), 0777); err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
@ -859,8 +858,7 @@ func (fs *FSObjects) putObject(ctx context.Context, bucket string, object string
|
||||
|
||||
// Check if an object is present as one of the parent dir.
|
||||
if fs.parentDirIsObject(ctx, bucket, path.Dir(object)) {
|
||||
logger.LogIf(ctx, errFileAccessDenied)
|
||||
return ObjectInfo{}, toObjectErr(errFileAccessDenied, bucket, object)
|
||||
return ObjectInfo{}, toObjectErr(errFileParentIsFile, bucket, object)
|
||||
}
|
||||
|
||||
// Validate input data size and it can never be less than zero.
|
||||
|
@ -230,8 +230,8 @@ func TestFSPutObject(t *testing.T) {
|
||||
if err == nil {
|
||||
t.Fatal("Unexpected should fail here, backend corruption occurred")
|
||||
}
|
||||
if nerr, ok := err.(PrefixAccessDenied); !ok {
|
||||
t.Fatalf("Expected PrefixAccessDenied, got %#v", err)
|
||||
if nerr, ok := err.(ParentIsObject); !ok {
|
||||
t.Fatalf("Expected ParentIsObject, got %#v", err)
|
||||
} else {
|
||||
if nerr.Bucket != "bucket" {
|
||||
t.Fatalf("Expected 'bucket', got %s", nerr.Bucket)
|
||||
@ -245,8 +245,8 @@ func TestFSPutObject(t *testing.T) {
|
||||
if err == nil {
|
||||
t.Fatal("Unexpected should fail here, backned corruption occurred")
|
||||
}
|
||||
if nerr, ok := err.(PrefixAccessDenied); !ok {
|
||||
t.Fatalf("Expected PrefixAccessDenied, got %#v", err)
|
||||
if nerr, ok := err.(ParentIsObject); !ok {
|
||||
t.Fatalf("Expected ParentIsObject, got %#v", err)
|
||||
} else {
|
||||
if nerr.Bucket != "bucket" {
|
||||
t.Fatalf("Expected 'bucket', got %s", nerr.Bucket)
|
||||
|
@ -19,6 +19,7 @@ package cmd
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"path"
|
||||
)
|
||||
|
||||
// Converts underlying storage error. Convenience function written to
|
||||
@ -47,6 +48,13 @@ func toObjectErr(err error, params ...string) error {
|
||||
Object: params[1],
|
||||
}
|
||||
}
|
||||
case errFileParentIsFile:
|
||||
if len(params) >= 2 {
|
||||
err = ParentIsObject{
|
||||
Bucket: params[0],
|
||||
Object: params[1],
|
||||
}
|
||||
}
|
||||
case errIsNotRegular:
|
||||
if len(params) >= 2 {
|
||||
err = ObjectExistsAsDirectory{
|
||||
@ -182,6 +190,13 @@ func (e PrefixAccessDenied) Error() string {
|
||||
return "Prefix access is denied: " + e.Bucket + "/" + e.Object
|
||||
}
|
||||
|
||||
// ParentIsObject object access is denied.
|
||||
type ParentIsObject GenericError
|
||||
|
||||
func (e ParentIsObject) Error() string {
|
||||
return "Parent is object " + e.Bucket + "/" + path.Dir(e.Object)
|
||||
}
|
||||
|
||||
// BucketExists bucket exists.
|
||||
type BucketExists GenericError
|
||||
|
||||
|
@ -142,6 +142,10 @@ func renameAll(srcFilePath, dstFilePath string) (err error) {
|
||||
return fmt.Errorf("%s (%s)->(%s)", errCrossDeviceLink, srcFilePath, dstFilePath)
|
||||
case os.IsNotExist(err):
|
||||
return errFileNotFound
|
||||
case os.IsExist(err):
|
||||
// This is returned only when destination is a directory and we
|
||||
// are attempting a rename from file to directory.
|
||||
return errIsNotRegular
|
||||
default:
|
||||
return err
|
||||
}
|
||||
|
@ -66,9 +66,12 @@ var errVolumeNotEmpty = errors.New("volume is not empty")
|
||||
// errVolumeAccessDenied - cannot access volume, insufficient permissions.
|
||||
var errVolumeAccessDenied = errors.New("volume access denied")
|
||||
|
||||
// errVolumeAccessDenied - cannot access file, insufficient permissions.
|
||||
// errFileAccessDenied - cannot access file, insufficient permissions.
|
||||
var errFileAccessDenied = errors.New("file access denied")
|
||||
|
||||
// errFileParentIsFile - cannot have overlapping objects, parent is already a file.
|
||||
var errFileParentIsFile = errors.New("parent is a file")
|
||||
|
||||
// errBitrotHashAlgoInvalid - the algo for bit-rot hash
|
||||
// verification is empty or invalid.
|
||||
var errBitrotHashAlgoInvalid = errors.New("bit-rot hash algorithm is invalid")
|
||||
|
@ -624,7 +624,7 @@ func (xl xlObjects) CompleteMultipartUpload(ctx context.Context, bucket string,
|
||||
// Check if an object is present as one of the parent dir.
|
||||
// -- FIXME. (needs a new kind of lock).
|
||||
if xl.parentDirIsObject(ctx, bucket, path.Dir(object)) {
|
||||
return oi, toObjectErr(errFileAccessDenied, bucket, object)
|
||||
return oi, toObjectErr(errFileParentIsFile, bucket, object)
|
||||
}
|
||||
|
||||
// Calculate s3 compatible md5sum for complete multipart.
|
||||
|
@ -577,7 +577,7 @@ func (xl xlObjects) putObject(ctx context.Context, bucket string, object string,
|
||||
// -- FIXME. (needs a new kind of lock).
|
||||
// -- FIXME (this also causes performance issue when disks are down).
|
||||
if xl.parentDirIsObject(ctx, bucket, path.Dir(object)) {
|
||||
return ObjectInfo{}, toObjectErr(errFileAccessDenied, bucket, object)
|
||||
return ObjectInfo{}, toObjectErr(errFileParentIsFile, bucket, object)
|
||||
}
|
||||
|
||||
if err = xl.putObjectDir(ctx, minioMetaTmpBucket, tempObj, writeQuorum); err != nil {
|
||||
@ -608,7 +608,7 @@ func (xl xlObjects) putObject(ctx context.Context, bucket string, object string,
|
||||
// -- FIXME. (needs a new kind of lock).
|
||||
// -- FIXME (this also causes performance issue when disks are down).
|
||||
if xl.parentDirIsObject(ctx, bucket, path.Dir(object)) {
|
||||
return ObjectInfo{}, toObjectErr(errFileAccessDenied, bucket, object)
|
||||
return ObjectInfo{}, toObjectErr(errFileParentIsFile, bucket, object)
|
||||
}
|
||||
|
||||
// Limit the reader to its provided size if specified.
|
||||
|
Loading…
Reference in New Issue
Block a user