mirror of
https://github.com/minio/minio.git
synced 2025-11-10 22:10:12 -05:00
run gofumpt cleanup across code-base (#14015)
This commit is contained in:
@@ -349,7 +349,6 @@ func azureTierToS3StorageClass(tierType string) string {
|
||||
default:
|
||||
return "STANDARD"
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// azurePropertiesToS3Meta converts Azure metadata/properties to S3
|
||||
@@ -578,7 +577,6 @@ func (a *azureObjects) GetBucketInfo(ctx context.Context, bucket string) (bi min
|
||||
resp, err := a.client.ListContainersSegment(ctx, marker, azblob.ListContainersSegmentOptions{
|
||||
Prefix: bucket,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return bi, azureToObjectError(err, bucket)
|
||||
}
|
||||
@@ -604,7 +602,6 @@ func (a *azureObjects) ListBuckets(ctx context.Context) (buckets []minio.BucketI
|
||||
|
||||
for marker.NotDone() {
|
||||
resp, err := a.client.ListContainersSegment(ctx, marker, azblob.ListContainersSegmentOptions{})
|
||||
|
||||
if err != nil {
|
||||
return nil, azureToObjectError(err)
|
||||
}
|
||||
|
||||
@@ -192,34 +192,41 @@ func TestAzureCodesToObjectError(t *testing.T) {
|
||||
}{
|
||||
{
|
||||
nil, "ContainerAlreadyExists", 0,
|
||||
minio.BucketExists{Bucket: "bucket"}, "bucket", "",
|
||||
minio.BucketExists{Bucket: "bucket"},
|
||||
"bucket", "",
|
||||
},
|
||||
{
|
||||
nil, "InvalidResourceName", 0,
|
||||
minio.BucketNameInvalid{Bucket: "bucket."}, "bucket.", "",
|
||||
minio.BucketNameInvalid{Bucket: "bucket."},
|
||||
"bucket.", "",
|
||||
},
|
||||
{
|
||||
nil, "RequestBodyTooLarge", 0,
|
||||
minio.PartTooBig{}, "", "",
|
||||
minio.PartTooBig{},
|
||||
"", "",
|
||||
},
|
||||
{
|
||||
nil, "InvalidMetadata", 0,
|
||||
minio.UnsupportedMetadata{}, "", "",
|
||||
minio.UnsupportedMetadata{},
|
||||
"", "",
|
||||
},
|
||||
{
|
||||
nil, "", http.StatusNotFound,
|
||||
minio.ObjectNotFound{
|
||||
Bucket: "bucket",
|
||||
Object: "object",
|
||||
}, "bucket", "object",
|
||||
},
|
||||
"bucket", "object",
|
||||
},
|
||||
{
|
||||
nil, "", http.StatusNotFound,
|
||||
minio.BucketNotFound{Bucket: "bucket"}, "bucket", "",
|
||||
minio.BucketNotFound{Bucket: "bucket"},
|
||||
"bucket", "",
|
||||
},
|
||||
{
|
||||
nil, "", http.StatusBadRequest,
|
||||
minio.BucketNameInvalid{Bucket: "bucket."}, "bucket.", "",
|
||||
minio.BucketNameInvalid{Bucket: "bucket."},
|
||||
"bucket.", "",
|
||||
},
|
||||
{
|
||||
fmt.Errorf("unhandled azure error"), "", http.StatusForbidden,
|
||||
|
||||
@@ -1121,7 +1121,6 @@ func (l *gcsGateway) PutObjectPart(ctx context.Context, bucket string, key strin
|
||||
LastModified: minio.UTCNow(),
|
||||
Size: data.Size(),
|
||||
}, nil
|
||||
|
||||
}
|
||||
|
||||
// gcsGetPartInfo returns PartInfo of a given object part
|
||||
|
||||
@@ -72,7 +72,6 @@ func TestToGCSPageToken(t *testing.T) {
|
||||
t.Errorf("Test %d: Expected %s, got %s", i+1, toGCSPageToken(testCase.Name), testCase.Token)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// TestIsValidGCSProjectIDFormat tests isValidGCSProjectIDFormat
|
||||
@@ -166,7 +165,6 @@ func TestGCSMultipartDataName(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestFromMinioClientListBucketResultToV2Info(t *testing.T) {
|
||||
|
||||
listBucketResult := miniogo.ListBucketResult{
|
||||
IsTruncated: false,
|
||||
Marker: "testMarker",
|
||||
|
||||
@@ -133,7 +133,6 @@ func getKerberosClient() (*krb.Client, error) {
|
||||
realm := env.Get("KRB5REALM", "")
|
||||
if username == "" || realm == "" {
|
||||
return nil, errors.New("empty KRB5USERNAME or KRB5REALM")
|
||||
|
||||
}
|
||||
|
||||
return krb.NewWithKeytab(username, realm, kt, cfg), nil
|
||||
@@ -216,7 +215,7 @@ func (g *HDFS) NewGatewayLayer(creds madmin.Credentials) (minio.ObjectLayer, err
|
||||
return nil, fmt.Errorf("unable to initialize hdfsClient: %v", err)
|
||||
}
|
||||
|
||||
if err = clnt.MkdirAll(minio.PathJoin(commonPath, hdfsSeparator, minioMetaTmpBucket), os.FileMode(0755)); err != nil {
|
||||
if err = clnt.MkdirAll(minio.PathJoin(commonPath, hdfsSeparator, minioMetaTmpBucket), os.FileMode(0o755)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -324,7 +323,7 @@ func (n *hdfsObjects) MakeBucketWithLocation(ctx context.Context, bucket string,
|
||||
if !hdfsIsValidBucketName(bucket) {
|
||||
return minio.BucketNameInvalid{Bucket: bucket}
|
||||
}
|
||||
return hdfsToObjectErr(ctx, n.clnt.Mkdir(n.hdfsPathJoin(bucket), os.FileMode(0755)), bucket)
|
||||
return hdfsToObjectErr(ctx, n.clnt.Mkdir(n.hdfsPathJoin(bucket), os.FileMode(0o755)), bucket)
|
||||
}
|
||||
|
||||
func (n *hdfsObjects) GetBucketInfo(ctx context.Context, bucket string) (bi minio.BucketInfo, err error) {
|
||||
@@ -480,7 +479,6 @@ func fileInfoToObjectInfo(bucket string, entry string, fi os.FileInfo) minio.Obj
|
||||
// a path entry to an `os.FileInfo`. It also saves the listed path's `os.FileInfo` in the cache.
|
||||
func (n *hdfsObjects) populateDirectoryListing(filePath string, fileInfos map[string]os.FileInfo) (os.FileInfo, error) {
|
||||
dirReader, err := n.clnt.Open(filePath)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -494,7 +492,6 @@ func (n *hdfsObjects) populateDirectoryListing(filePath string, fileInfos map[st
|
||||
|
||||
fileInfos[key] = dirStat
|
||||
infos, err := dirReader.Readdir(0)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -602,7 +599,6 @@ func (n *hdfsObjects) GetObjectNInfo(ctx context.Context, bucket, object string,
|
||||
// exit in case of partial read
|
||||
pipeCloser := func() { pr.Close() }
|
||||
return minio.NewGetObjectReaderFromReader(pr, objInfo, opts, pipeCloser)
|
||||
|
||||
}
|
||||
|
||||
func (n *hdfsObjects) CopyObject(ctx context.Context, srcBucket, srcObject, dstBucket, dstObject string, srcInfo minio.ObjectInfo, srcOpts, dstOpts minio.ObjectOptions) (minio.ObjectInfo, error) {
|
||||
@@ -689,7 +685,7 @@ func (n *hdfsObjects) PutObject(ctx context.Context, bucket string, object strin
|
||||
|
||||
// If its a directory create a prefix {
|
||||
if strings.HasSuffix(object, hdfsSeparator) && r.Size() == 0 {
|
||||
if err = n.clnt.MkdirAll(name, os.FileMode(0755)); err != nil {
|
||||
if err = n.clnt.MkdirAll(name, os.FileMode(0o755)); err != nil {
|
||||
n.deleteObject(n.hdfsPathJoin(bucket), name)
|
||||
return objInfo, hdfsToObjectErr(ctx, err, bucket, object)
|
||||
}
|
||||
@@ -707,7 +703,7 @@ func (n *hdfsObjects) PutObject(ctx context.Context, bucket string, object strin
|
||||
}
|
||||
dir := path.Dir(name)
|
||||
if dir != "" {
|
||||
if err = n.clnt.MkdirAll(dir, os.FileMode(0755)); err != nil {
|
||||
if err = n.clnt.MkdirAll(dir, os.FileMode(0o755)); err != nil {
|
||||
w.Close()
|
||||
n.deleteObject(n.hdfsPathJoin(bucket), dir)
|
||||
return objInfo, hdfsToObjectErr(ctx, err, bucket, object)
|
||||
@@ -839,7 +835,7 @@ func (n *hdfsObjects) CompleteMultipartUpload(ctx context.Context, bucket, objec
|
||||
name := n.hdfsPathJoin(bucket, object)
|
||||
dir := path.Dir(name)
|
||||
if dir != "" {
|
||||
if err = n.clnt.MkdirAll(dir, os.FileMode(0755)); err != nil {
|
||||
if err = n.clnt.MkdirAll(dir, os.FileMode(0o755)); err != nil {
|
||||
return objInfo, hdfsToObjectErr(ctx, err, bucket, object)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -89,7 +89,6 @@ func (c *Chain) Retrieve() (credentials.Value, error) {
|
||||
}
|
||||
|
||||
return credentials.Value{}, fmt.Errorf("no credentials found in %s cannot proceed", providers)
|
||||
|
||||
}
|
||||
|
||||
// IsExpired will returned the expired state of the currently cached provider
|
||||
|
||||
@@ -139,7 +139,7 @@ func (m gwMetaV1) ObjectToPartOffset(ctx context.Context, offset int64) (partInd
|
||||
|
||||
// Constructs GWMetaV1 using `jsoniter` lib to retrieve each field.
|
||||
func gwMetaUnmarshalJSON(ctx context.Context, gwMetaBuf []byte) (gwMeta gwMetaV1, err error) {
|
||||
var json = jsoniter.ConfigCompatibleWithStandardLibrary
|
||||
json := jsoniter.ConfigCompatibleWithStandardLibrary
|
||||
err = json.Unmarshal(gwMetaBuf, &gwMeta)
|
||||
return gwMeta, err
|
||||
}
|
||||
|
||||
@@ -75,12 +75,10 @@ func (l *s3EncObjects) ListObjects(ctx context.Context, bucket string, prefix st
|
||||
loi.Objects = res.Objects
|
||||
loi.Prefixes = res.Prefixes
|
||||
return loi, nil
|
||||
|
||||
}
|
||||
|
||||
// ListObjectsV2 lists all blobs in S3 bucket filtered by prefix
|
||||
func (l *s3EncObjects) ListObjectsV2(ctx context.Context, bucket, prefix, continuationToken, delimiter string, maxKeys int, fetchOwner bool, startAfter string) (loi minio.ListObjectsV2Info, e error) {
|
||||
|
||||
var objects []minio.ObjectInfo
|
||||
var prefixes []string
|
||||
var isTruncated bool
|
||||
@@ -423,7 +421,6 @@ func (l *s3EncObjects) DeleteObjects(ctx context.Context, bucket string, objects
|
||||
|
||||
// ListMultipartUploads lists all multipart uploads.
|
||||
func (l *s3EncObjects) ListMultipartUploads(ctx context.Context, bucket string, prefix string, keyMarker string, uploadIDMarker string, delimiter string, maxUploads int) (lmi minio.ListMultipartsInfo, e error) {
|
||||
|
||||
lmi, e = l.s3Objects.ListMultipartUploads(ctx, bucket, prefix, keyMarker, uploadIDMarker, delimiter, maxUploads)
|
||||
if e != nil {
|
||||
return
|
||||
@@ -505,7 +502,6 @@ func (l *s3EncObjects) PutObject(ctx context.Context, bucket string, object stri
|
||||
|
||||
// PutObjectPart puts a part of object in bucket
|
||||
func (l *s3EncObjects) PutObjectPart(ctx context.Context, bucket string, object string, uploadID string, partID int, data *minio.PutObjReader, opts minio.ObjectOptions) (pi minio.PartInfo, e error) {
|
||||
|
||||
if opts.ServerSideEncryption == nil {
|
||||
return l.s3Objects.PutObjectPart(ctx, bucket, object, uploadID, partID, data, opts)
|
||||
}
|
||||
@@ -630,7 +626,6 @@ func (l *s3EncObjects) AbortMultipartUpload(ctx context.Context, bucket string,
|
||||
|
||||
// CompleteMultipartUpload completes ongoing multipart upload and finalizes object
|
||||
func (l *s3EncObjects) CompleteMultipartUpload(ctx context.Context, bucket, object, uploadID string, uploadedParts []minio.CompletePart, opts minio.ObjectOptions) (oi minio.ObjectInfo, e error) {
|
||||
|
||||
tmpMeta, err := l.getGWMetadata(ctx, bucket, getTmpDareMetaPath(object, uploadID))
|
||||
if err != nil {
|
||||
oi, e = l.s3Objects.CompleteMultipartUpload(ctx, bucket, object, uploadID, uploadedParts, opts)
|
||||
|
||||
Reference in New Issue
Block a user