mirror of
https://github.com/minio/minio.git
synced 2024-12-23 21:55:53 -05:00
Remove unnecessary error log messages (#6186)
This commit is contained in:
parent
f5df3b4795
commit
c7946ab9ab
@ -345,7 +345,9 @@ func fsCreateFile(ctx context.Context, filePath string, reader io.Reader, buf []
|
||||
if buf != nil {
|
||||
bytesWritten, err = io.CopyBuffer(writer, reader, buf)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
if err != io.ErrUnexpectedEOF {
|
||||
logger.LogIf(ctx, err)
|
||||
}
|
||||
return 0, err
|
||||
}
|
||||
} else {
|
||||
|
@ -193,7 +193,6 @@ func s3MetaToAzureProperties(ctx context.Context, s3Metadata map[string]string)
|
||||
storage.BlobProperties, error) {
|
||||
for k := range s3Metadata {
|
||||
if strings.Contains(k, "--") {
|
||||
logger.LogIf(ctx, minio.UnsupportedMetadata{})
|
||||
return storage.BlobMetadata{}, storage.BlobProperties{}, minio.UnsupportedMetadata{}
|
||||
}
|
||||
}
|
||||
@ -377,18 +376,12 @@ func getAzureUploadID() (string, error) {
|
||||
// checkAzureUploadID - returns error in case of given string is upload ID.
|
||||
func checkAzureUploadID(ctx context.Context, uploadID string) (err error) {
|
||||
if len(uploadID) != 16 {
|
||||
logger.LogIf(ctx, minio.MalformedUploadID{
|
||||
UploadID: uploadID,
|
||||
})
|
||||
return minio.MalformedUploadID{
|
||||
UploadID: uploadID,
|
||||
}
|
||||
}
|
||||
|
||||
if _, err = hex.DecodeString(uploadID); err != nil {
|
||||
logger.LogIf(ctx, minio.MalformedUploadID{
|
||||
UploadID: uploadID,
|
||||
})
|
||||
return minio.MalformedUploadID{
|
||||
UploadID: uploadID,
|
||||
}
|
||||
@ -449,7 +442,6 @@ func (a *azureObjects) MakeBucketWithLocation(ctx context.Context, bucket, locat
|
||||
// in azure documentation, so we will simply use the same function here.
|
||||
// Ref - https://docs.microsoft.com/en-us/rest/api/storageservices/naming-and-referencing-containers--blobs--and-metadata
|
||||
if !minio.IsValidBucketName(bucket) {
|
||||
logger.LogIf(ctx, minio.BucketNameInvalid{Bucket: bucket})
|
||||
return minio.BucketNameInvalid{Bucket: bucket}
|
||||
}
|
||||
|
||||
@ -457,7 +449,6 @@ func (a *azureObjects) MakeBucketWithLocation(ctx context.Context, bucket, locat
|
||||
err := container.Create(&storage.CreateContainerOptions{
|
||||
Access: storage.ContainerAccessTypePrivate,
|
||||
})
|
||||
logger.LogIf(ctx, err)
|
||||
return azureToObjectError(err, bucket)
|
||||
}
|
||||
|
||||
@ -469,7 +460,6 @@ func (a *azureObjects) GetBucketInfo(ctx context.Context, bucket string) (bi min
|
||||
Prefix: bucket,
|
||||
})
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return bi, azureToObjectError(err, bucket)
|
||||
}
|
||||
for _, container := range resp.Containers {
|
||||
@ -490,7 +480,6 @@ func (a *azureObjects) GetBucketInfo(ctx context.Context, bucket string) (bi min
|
||||
func (a *azureObjects) ListBuckets(ctx context.Context) (buckets []minio.BucketInfo, err error) {
|
||||
resp, err := a.client.ListContainers(storage.ListContainersParameters{})
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return nil, azureToObjectError(err)
|
||||
}
|
||||
for _, container := range resp.Containers {
|
||||
@ -511,7 +500,6 @@ func (a *azureObjects) ListBuckets(ctx context.Context) (buckets []minio.BucketI
|
||||
func (a *azureObjects) DeleteBucket(ctx context.Context, bucket string) error {
|
||||
container := a.client.GetContainerReference(bucket)
|
||||
err := container.Delete(nil)
|
||||
logger.LogIf(ctx, err)
|
||||
return azureToObjectError(err, bucket)
|
||||
}
|
||||
|
||||
@ -549,7 +537,6 @@ func (a *azureObjects) ListObjects(ctx context.Context, bucket, prefix, marker,
|
||||
MaxResults: uint(maxKeys),
|
||||
})
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return result, azureToObjectError(err, bucket, prefix)
|
||||
}
|
||||
|
||||
@ -637,7 +624,6 @@ func (a *azureObjects) ListObjectsV2(ctx context.Context, bucket, prefix, contin
|
||||
func (a *azureObjects) GetObject(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, etag string) error {
|
||||
// startOffset cannot be negative.
|
||||
if startOffset < 0 {
|
||||
logger.LogIf(ctx, minio.InvalidRange{})
|
||||
return azureToObjectError(minio.InvalidRange{}, bucket, object)
|
||||
}
|
||||
|
||||
@ -657,7 +643,6 @@ func (a *azureObjects) GetObject(ctx context.Context, bucket, object string, sta
|
||||
})
|
||||
}
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return azureToObjectError(err, bucket, object)
|
||||
}
|
||||
_, err = io.Copy(writer, rc)
|
||||
@ -672,7 +657,6 @@ func (a *azureObjects) GetObjectInfo(ctx context.Context, bucket, object string)
|
||||
blob := a.client.GetContainerReference(bucket).GetBlobReference(object)
|
||||
err = blob.GetProperties(nil)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return objInfo, azureToObjectError(err, bucket, object)
|
||||
}
|
||||
|
||||
@ -698,7 +682,6 @@ func (a *azureObjects) PutObject(ctx context.Context, bucket, object string, dat
|
||||
}
|
||||
err = blob.CreateBlockBlobFromReader(data, nil)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return objInfo, azureToObjectError(err, bucket, object)
|
||||
}
|
||||
return a.GetObjectInfo(ctx, bucket, object)
|
||||
@ -716,7 +699,6 @@ func (a *azureObjects) CopyObject(ctx context.Context, srcBucket, srcObject, des
|
||||
destBlob.Metadata = azureMeta
|
||||
err = destBlob.Copy(srcBlobURL, nil)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return objInfo, azureToObjectError(err, srcBucket, srcObject)
|
||||
}
|
||||
// Azure will copy metadata from the source object when an empty metadata map is provided.
|
||||
@ -726,14 +708,12 @@ func (a *azureObjects) CopyObject(ctx context.Context, srcBucket, srcObject, des
|
||||
destBlob.Metadata = azureMeta
|
||||
err = destBlob.SetMetadata(nil)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return objInfo, azureToObjectError(err, srcBucket, srcObject)
|
||||
}
|
||||
}
|
||||
destBlob.Properties = props
|
||||
err = destBlob.SetProperties(nil)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return objInfo, azureToObjectError(err, srcBucket, srcObject)
|
||||
}
|
||||
return a.GetObjectInfo(ctx, destBucket, destObject)
|
||||
@ -769,14 +749,12 @@ func (a *azureObjects) checkUploadIDExists(ctx context.Context, bucketName, obje
|
||||
blob := a.client.GetContainerReference(bucketName).GetBlobReference(
|
||||
getAzureMetadataObjectName(objectName, uploadID))
|
||||
err = blob.GetMetadata(nil)
|
||||
logger.LogIf(ctx, err)
|
||||
err = azureToObjectError(err, bucketName, objectName)
|
||||
oerr := minio.ObjectNotFound{
|
||||
Bucket: bucketName,
|
||||
Object: objectName,
|
||||
}
|
||||
if err == oerr {
|
||||
logger.LogIf(ctx, minio.InvalidUploadID{UploadID: uploadID})
|
||||
err = minio.InvalidUploadID{
|
||||
UploadID: uploadID,
|
||||
}
|
||||
@ -802,7 +780,6 @@ func (a *azureObjects) NewMultipartUpload(ctx context.Context, bucket, object st
|
||||
blob := a.client.GetContainerReference(bucket).GetBlobReference(metadataObject)
|
||||
err = blob.CreateBlockBlobFromReader(bytes.NewBuffer(jsonData), nil)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return "", azureToObjectError(err, bucket, metadataObject)
|
||||
}
|
||||
|
||||
@ -839,7 +816,6 @@ func (a *azureObjects) PutObjectPart(ctx context.Context, bucket, object, upload
|
||||
blob := a.client.GetContainerReference(bucket).GetBlobReference(object)
|
||||
err = blob.PutBlockWithLength(id, uint64(subPartSize), io.LimitReader(data, subPartSize), nil)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return info, azureToObjectError(err, bucket, object)
|
||||
}
|
||||
subPartNumber++
|
||||
@ -871,7 +847,6 @@ func (a *azureObjects) ListObjectParts(ctx context.Context, bucket, object, uplo
|
||||
return result, nil
|
||||
}
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return result, azureToObjectError(err, bucket, object)
|
||||
}
|
||||
// Build a sorted list of parts and return the requested entries.
|
||||
@ -881,7 +856,6 @@ func (a *azureObjects) ListObjectParts(ctx context.Context, bucket, object, uplo
|
||||
var parsedUploadID string
|
||||
var md5Hex string
|
||||
if partNumber, _, parsedUploadID, md5Hex, err = azureParseBlockID(block.Name); err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("Unexpected error"))
|
||||
return result, azureToObjectError(fmt.Errorf("Unexpected error"), bucket, object)
|
||||
}
|
||||
if parsedUploadID != uploadID {
|
||||
@ -899,7 +873,6 @@ func (a *azureObjects) ListObjectParts(ctx context.Context, bucket, object, uplo
|
||||
if part.ETag != md5Hex {
|
||||
// If two parts of same partNumber were uploaded with different contents
|
||||
// return error as we won't be able to decide which the latest part is.
|
||||
logger.LogIf(ctx, fmt.Errorf("Unexpected error"))
|
||||
return result, azureToObjectError(fmt.Errorf("Unexpected error"), bucket, object)
|
||||
}
|
||||
part.Size += block.Size
|
||||
@ -966,7 +939,6 @@ func (a *azureObjects) CompleteMultipartUpload(ctx context.Context, bucket, obje
|
||||
var metadataReader io.Reader
|
||||
blob := a.client.GetContainerReference(bucket).GetBlobReference(metadataObject)
|
||||
if metadataReader, err = blob.Get(nil); err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return objInfo, azureToObjectError(err, bucket, metadataObject)
|
||||
}
|
||||
|
||||
@ -990,7 +962,6 @@ func (a *azureObjects) CompleteMultipartUpload(ctx context.Context, bucket, obje
|
||||
objBlob := a.client.GetContainerReference(bucket).GetBlobReference(object)
|
||||
resp, err := objBlob.GetBlockList(storage.BlockListTypeUncommitted, nil)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return objInfo, azureToObjectError(err, bucket, object)
|
||||
}
|
||||
|
||||
@ -1038,11 +1009,6 @@ func (a *azureObjects) CompleteMultipartUpload(ctx context.Context, bucket, obje
|
||||
// Error out if parts except last part sizing < 5MiB.
|
||||
for i, size := range partSizes[:len(partSizes)-1] {
|
||||
if size < azureS3MinPartSize {
|
||||
logger.LogIf(ctx, minio.PartTooSmall{
|
||||
PartNumber: uploadedParts[i].PartNumber,
|
||||
PartSize: size,
|
||||
PartETag: uploadedParts[i].ETag,
|
||||
})
|
||||
return objInfo, minio.PartTooSmall{
|
||||
PartNumber: uploadedParts[i].PartNumber,
|
||||
PartSize: size,
|
||||
@ -1053,23 +1019,19 @@ func (a *azureObjects) CompleteMultipartUpload(ctx context.Context, bucket, obje
|
||||
|
||||
err = objBlob.PutBlockList(allBlocks, nil)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return objInfo, azureToObjectError(err, bucket, object)
|
||||
}
|
||||
if len(metadata.Metadata) > 0 {
|
||||
objBlob.Metadata, objBlob.Properties, err = s3MetaToAzureProperties(ctx, metadata.Metadata)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return objInfo, azureToObjectError(err, bucket, object)
|
||||
}
|
||||
err = objBlob.SetProperties(nil)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return objInfo, azureToObjectError(err, bucket, object)
|
||||
}
|
||||
err = objBlob.SetMetadata(nil)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return objInfo, azureToObjectError(err, bucket, object)
|
||||
}
|
||||
}
|
||||
@ -1099,15 +1061,12 @@ func (a *azureObjects) SetBucketPolicy(ctx context.Context, bucket string, bucke
|
||||
}
|
||||
prefix := bucket + "/*" // For all objects inside the bucket.
|
||||
if len(policies) != 1 {
|
||||
logger.LogIf(ctx, minio.NotImplemented{})
|
||||
return minio.NotImplemented{}
|
||||
}
|
||||
if policies[0].Prefix != prefix {
|
||||
logger.LogIf(ctx, minio.NotImplemented{})
|
||||
return minio.NotImplemented{}
|
||||
}
|
||||
if policies[0].Policy != miniogopolicy.BucketPolicyReadOnly {
|
||||
logger.LogIf(ctx, minio.NotImplemented{})
|
||||
return minio.NotImplemented{}
|
||||
}
|
||||
perm := storage.ContainerPermissions{
|
||||
@ -1116,7 +1075,6 @@ func (a *azureObjects) SetBucketPolicy(ctx context.Context, bucket string, bucke
|
||||
}
|
||||
container := a.client.GetContainerReference(bucket)
|
||||
err = container.SetPermissions(perm, nil)
|
||||
logger.LogIf(ctx, err)
|
||||
return azureToObjectError(err, bucket)
|
||||
}
|
||||
|
||||
@ -1125,14 +1083,12 @@ func (a *azureObjects) GetBucketPolicy(ctx context.Context, bucket string) (*pol
|
||||
container := a.client.GetContainerReference(bucket)
|
||||
perm, err := container.GetPermissions(nil)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return nil, azureToObjectError(err, bucket)
|
||||
}
|
||||
|
||||
if perm.AccessType == storage.ContainerAccessTypePrivate {
|
||||
return nil, minio.BucketPolicyNotFound{Bucket: bucket}
|
||||
} else if perm.AccessType != storage.ContainerAccessTypeContainer {
|
||||
logger.LogIf(ctx, minio.NotImplemented{})
|
||||
return nil, azureToObjectError(minio.NotImplemented{})
|
||||
}
|
||||
|
||||
@ -1165,6 +1121,5 @@ func (a *azureObjects) DeleteBucketPolicy(ctx context.Context, bucket string) er
|
||||
}
|
||||
container := a.client.GetContainerReference(bucket)
|
||||
err := container.SetPermissions(perm, nil)
|
||||
logger.LogIf(ctx, err)
|
||||
return azureToObjectError(err)
|
||||
}
|
||||
|
@ -223,13 +223,11 @@ func (l *s3Objects) MakeBucketWithLocation(ctx context.Context, bucket, location
|
||||
// access to these buckets.
|
||||
// Ref - http://docs.aws.amazon.com/AmazonS3/latest/dev/BucketRestrictions.html
|
||||
if s3utils.CheckValidBucketName(bucket) != nil {
|
||||
logger.LogIf(ctx, minio.BucketNameInvalid{Bucket: bucket})
|
||||
return minio.BucketNameInvalid{Bucket: bucket}
|
||||
}
|
||||
|
||||
err := l.Client.MakeBucket(bucket, location)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return minio.ErrorRespToObjectError(err, bucket)
|
||||
}
|
||||
return err
|
||||
@ -239,7 +237,6 @@ func (l *s3Objects) MakeBucketWithLocation(ctx context.Context, bucket, location
|
||||
func (l *s3Objects) GetBucketInfo(ctx context.Context, bucket string) (bi minio.BucketInfo, e error) {
|
||||
buckets, err := l.Client.ListBuckets()
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return bi, minio.ErrorRespToObjectError(err, bucket)
|
||||
}
|
||||
|
||||
@ -261,7 +258,6 @@ func (l *s3Objects) GetBucketInfo(ctx context.Context, bucket string) (bi minio.
|
||||
func (l *s3Objects) ListBuckets(ctx context.Context) ([]minio.BucketInfo, error) {
|
||||
buckets, err := l.Client.ListBuckets()
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return nil, minio.ErrorRespToObjectError(err)
|
||||
}
|
||||
|
||||
@ -280,7 +276,6 @@ func (l *s3Objects) ListBuckets(ctx context.Context) ([]minio.BucketInfo, error)
|
||||
func (l *s3Objects) DeleteBucket(ctx context.Context, bucket string) error {
|
||||
err := l.Client.RemoveBucket(bucket)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return minio.ErrorRespToObjectError(err, bucket)
|
||||
}
|
||||
return nil
|
||||
@ -290,7 +285,6 @@ func (l *s3Objects) DeleteBucket(ctx context.Context, bucket string) error {
|
||||
func (l *s3Objects) ListObjects(ctx context.Context, bucket string, prefix string, marker string, delimiter string, maxKeys int) (loi minio.ListObjectsInfo, e error) {
|
||||
result, err := l.Client.ListObjects(bucket, prefix, marker, delimiter, maxKeys)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return loi, minio.ErrorRespToObjectError(err, bucket)
|
||||
}
|
||||
|
||||
@ -301,7 +295,6 @@ func (l *s3Objects) ListObjects(ctx context.Context, bucket string, prefix strin
|
||||
func (l *s3Objects) ListObjectsV2(ctx context.Context, bucket, prefix, continuationToken, delimiter string, maxKeys int, fetchOwner bool, startAfter string) (loi minio.ListObjectsV2Info, e error) {
|
||||
result, err := l.Client.ListObjectsV2(bucket, prefix, continuationToken, fetchOwner, delimiter, maxKeys, startAfter)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return loi, minio.ErrorRespToObjectError(err, bucket)
|
||||
}
|
||||
|
||||
@ -316,7 +309,6 @@ func (l *s3Objects) ListObjectsV2(ctx context.Context, bucket, prefix, continuat
|
||||
// length indicates the total length of the object.
|
||||
func (l *s3Objects) GetObject(ctx context.Context, bucket string, key string, startOffset int64, length int64, writer io.Writer, etag string) error {
|
||||
if length < 0 && length != -1 {
|
||||
logger.LogIf(ctx, minio.InvalidRange{})
|
||||
return minio.ErrorRespToObjectError(minio.InvalidRange{}, bucket, key)
|
||||
}
|
||||
|
||||
@ -329,7 +321,6 @@ func (l *s3Objects) GetObject(ctx context.Context, bucket string, key string, st
|
||||
}
|
||||
object, _, err := l.Client.GetObject(bucket, key, opts)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return minio.ErrorRespToObjectError(err, bucket, key)
|
||||
}
|
||||
defer object.Close()
|
||||
@ -345,7 +336,6 @@ func (l *s3Objects) GetObject(ctx context.Context, bucket string, key string, st
|
||||
func (l *s3Objects) GetObjectInfo(ctx context.Context, bucket string, object string) (objInfo minio.ObjectInfo, err error) {
|
||||
oi, err := l.Client.StatObject(bucket, object, miniogo.StatObjectOptions{})
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return minio.ObjectInfo{}, minio.ErrorRespToObjectError(err, bucket, object)
|
||||
}
|
||||
|
||||
@ -356,7 +346,6 @@ func (l *s3Objects) GetObjectInfo(ctx context.Context, bucket string, object str
|
||||
func (l *s3Objects) PutObject(ctx context.Context, bucket string, object string, data *hash.Reader, metadata map[string]string) (objInfo minio.ObjectInfo, err error) {
|
||||
oi, err := l.Client.PutObject(bucket, object, data, data.Size(), data.MD5Base64String(), data.SHA256HexString(), minio.ToMinioClientMetadata(metadata))
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return objInfo, minio.ErrorRespToObjectError(err, bucket, object)
|
||||
}
|
||||
|
||||
@ -372,7 +361,6 @@ func (l *s3Objects) CopyObject(ctx context.Context, srcBucket string, srcObject
|
||||
srcInfo.UserDefined["x-amz-metadata-directive"] = "REPLACE"
|
||||
srcInfo.UserDefined["x-amz-copy-source-if-match"] = srcInfo.ETag
|
||||
if _, err = l.Client.CopyObject(srcBucket, srcObject, dstBucket, dstObject, srcInfo.UserDefined); err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return objInfo, minio.ErrorRespToObjectError(err, srcBucket, srcObject)
|
||||
}
|
||||
return l.GetObjectInfo(ctx, dstBucket, dstObject)
|
||||
@ -382,7 +370,6 @@ func (l *s3Objects) CopyObject(ctx context.Context, srcBucket string, srcObject
|
||||
func (l *s3Objects) DeleteObject(ctx context.Context, bucket string, object string) error {
|
||||
err := l.Client.RemoveObject(bucket, object)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return minio.ErrorRespToObjectError(err, bucket, object)
|
||||
}
|
||||
|
||||
@ -405,7 +392,6 @@ func (l *s3Objects) NewMultipartUpload(ctx context.Context, bucket string, objec
|
||||
opts := miniogo.PutObjectOptions{UserMetadata: metadata}
|
||||
uploadID, err = l.Client.NewMultipartUpload(bucket, object, opts)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return uploadID, minio.ErrorRespToObjectError(err, bucket, object)
|
||||
}
|
||||
return uploadID, nil
|
||||
@ -415,7 +401,6 @@ func (l *s3Objects) NewMultipartUpload(ctx context.Context, bucket string, objec
|
||||
func (l *s3Objects) PutObjectPart(ctx context.Context, bucket string, object string, uploadID string, partID int, data *hash.Reader) (pi minio.PartInfo, e error) {
|
||||
info, err := l.Client.PutObjectPart(bucket, object, uploadID, partID, data, data.Size(), data.MD5Base64String(), data.SHA256HexString())
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return pi, minio.ErrorRespToObjectError(err, bucket, object)
|
||||
}
|
||||
|
||||
@ -433,7 +418,6 @@ func (l *s3Objects) CopyObjectPart(ctx context.Context, srcBucket, srcObject, de
|
||||
completePart, err := l.Client.CopyObjectPart(srcBucket, srcObject, destBucket, destObject,
|
||||
uploadID, partID, startOffset, length, srcInfo.UserDefined)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return p, minio.ErrorRespToObjectError(err, srcBucket, srcObject)
|
||||
}
|
||||
p.PartNumber = completePart.PartNumber
|
||||
@ -454,7 +438,6 @@ func (l *s3Objects) ListObjectParts(ctx context.Context, bucket string, object s
|
||||
// AbortMultipartUpload aborts a ongoing multipart upload
|
||||
func (l *s3Objects) AbortMultipartUpload(ctx context.Context, bucket string, object string, uploadID string) error {
|
||||
err := l.Client.AbortMultipartUpload(bucket, object, uploadID)
|
||||
logger.LogIf(ctx, err)
|
||||
return minio.ErrorRespToObjectError(err, bucket, object)
|
||||
}
|
||||
|
||||
@ -462,7 +445,6 @@ func (l *s3Objects) AbortMultipartUpload(ctx context.Context, bucket string, obj
|
||||
func (l *s3Objects) CompleteMultipartUpload(ctx context.Context, bucket string, object string, uploadID string, uploadedParts []minio.CompletePart) (oi minio.ObjectInfo, e error) {
|
||||
err := l.Client.CompleteMultipartUpload(bucket, object, uploadID, minio.ToMinioClientCompleteParts(uploadedParts))
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return oi, minio.ErrorRespToObjectError(err, bucket, object)
|
||||
}
|
||||
|
||||
@ -479,7 +461,6 @@ func (l *s3Objects) SetBucketPolicy(ctx context.Context, bucket string, bucketPo
|
||||
}
|
||||
|
||||
if err := l.Client.SetBucketPolicy(bucket, string(data)); err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return minio.ErrorRespToObjectError(err, bucket)
|
||||
}
|
||||
|
||||
@ -500,7 +481,6 @@ func (l *s3Objects) GetBucketPolicy(ctx context.Context, bucket string) (*policy
|
||||
// DeleteBucketPolicy deletes all policies on bucket
|
||||
func (l *s3Objects) DeleteBucketPolicy(ctx context.Context, bucket string) error {
|
||||
if err := l.Client.SetBucketPolicy(bucket, ""); err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return minio.ErrorRespToObjectError(err, bucket, "")
|
||||
}
|
||||
return nil
|
||||
|
@ -196,11 +196,6 @@ func checkPutObjectArgs(ctx context.Context, bucket, object string, obj ObjectLa
|
||||
hasPrefix(object, slashSeparator) ||
|
||||
(hasSuffix(object, slashSeparator) && size != 0) ||
|
||||
!IsValidObjectPrefix(object) {
|
||||
logger.LogIf(ctx, ObjectNameInvalid{
|
||||
Bucket: bucket,
|
||||
Object: object,
|
||||
})
|
||||
|
||||
return ObjectNameInvalid{
|
||||
Bucket: bucket,
|
||||
Object: object,
|
||||
|
@ -69,11 +69,6 @@ func (xl xlObjects) isObject(bucket, prefix string) (ok bool) {
|
||||
if IsErrIgnored(err, xlTreeWalkIgnoredErrs...) {
|
||||
continue
|
||||
}
|
||||
reqInfo := &logger.ReqInfo{BucketName: bucket}
|
||||
reqInfo.AppendTags("prefix", prefix)
|
||||
reqInfo.AppendTags("xlMetaJSONFile", xlMetaJSONFile)
|
||||
ctx := logger.SetReqInfo(context.Background(), reqInfo)
|
||||
logger.LogIf(ctx, err)
|
||||
} // Exhausted all disks - return false.
|
||||
return false
|
||||
}
|
||||
|
@ -778,7 +778,6 @@ func (xl xlObjects) CompleteMultipartUpload(ctx context.Context, bucket string,
|
||||
// Deny if WORM is enabled
|
||||
if globalWORMEnabled {
|
||||
if xl.isObject(bucket, object) {
|
||||
logger.LogIf(ctx, ObjectAlreadyExists{Bucket: bucket, Object: object})
|
||||
return ObjectInfo{}, ObjectAlreadyExists{Bucket: bucket, Object: object}
|
||||
}
|
||||
}
|
||||
|
@ -462,7 +462,6 @@ func rename(ctx context.Context, disks []StorageAPI, srcBucket, srcEntry, dstBuc
|
||||
defer wg.Done()
|
||||
if err := disk.RenameFile(srcBucket, srcEntry, dstBucket, dstEntry); err != nil {
|
||||
if !IsErrIgnored(err, ignoredErr...) {
|
||||
logger.LogIf(ctx, err)
|
||||
errs[index] = err
|
||||
}
|
||||
}
|
||||
@ -743,7 +742,6 @@ func (xl xlObjects) putObject(ctx context.Context, bucket string, object string,
|
||||
// Deny if WORM is enabled
|
||||
if globalWORMEnabled {
|
||||
if xl.isObject(bucket, object) {
|
||||
logger.LogIf(ctx, ObjectAlreadyExists{Bucket: bucket, Object: object})
|
||||
return ObjectInfo{}, ObjectAlreadyExists{Bucket: bucket, Object: object}
|
||||
}
|
||||
}
|
||||
|
@ -66,12 +66,8 @@ func reduceErrs(errs []error, ignoredErrs []error) (maxCount int, maxErr error)
|
||||
func reduceQuorumErrs(ctx context.Context, errs []error, ignoredErrs []error, quorum int, quorumErr error) error {
|
||||
maxCount, maxErr := reduceErrs(errs, ignoredErrs)
|
||||
if maxCount >= quorum {
|
||||
if maxErr != errFileNotFound && maxErr != errVolumeNotFound {
|
||||
logger.LogIf(ctx, maxErr)
|
||||
}
|
||||
return maxErr
|
||||
}
|
||||
logger.LogIf(ctx, quorumErr)
|
||||
return quorumErr
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user