cleanup ignored static analysis (#16767)

This commit is contained in:
ferhat elmas 2023-03-06 17:56:10 +01:00 committed by GitHub
parent 3423028713
commit 714283fae2
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
48 changed files with 182 additions and 261 deletions

View File

@ -1,12 +1,12 @@
linters-settings:
gofumpt:
lang-version: '1.19'
lang-version: '1.20'
misspell:
locale: US
staticcheck:
go: '1.19'
go: '1.20'
checks: ['all', '-ST1005', '-ST1000', '-SA4000', '-SA9004', '-SA1019', '-SA1008', '-U1000', '-ST1016']
linters:
@ -33,9 +33,3 @@ issues:
exclude:
- should have a package comment
- error strings should not be capitalized or end with punctuation or a newline
# todo fix these when we get enough time.
- 'singleCaseSwitch: should rewrite switch statement to if statement'
- 'unlambda: replace'
- 'captLocal:'
- 'ifElseChain:'
- 'elseif:'

View File

@ -686,8 +686,7 @@ func (a adminAPIHandlers) ImportBucketMetadataHandler(w http.ResponseWriter, r *
continue
}
bucket, fileName := slc[0], slc[1]
switch fileName {
case objectLockConfig:
if fileName == objectLockConfig {
reader, err := file.Open()
if err != nil {
rpt.SetStatus(bucket, fileName, err)
@ -756,8 +755,7 @@ func (a adminAPIHandlers) ImportBucketMetadataHandler(w http.ResponseWriter, r *
continue
}
bucket, fileName := slc[0], slc[1]
switch fileName {
case bucketVersioningConfig:
if fileName == bucketVersioningConfig {
reader, err := file.Open()
if err != nil {
rpt.SetStatus(bucket, fileName, err)

View File

@ -424,8 +424,7 @@ func (e errorCodeMap) ToAPIErrWithErr(errCode APIErrorCode, err error) APIError
apiErr.Description = fmt.Sprintf("%s (%s)", apiErr.Description, err)
}
if globalSite.Region != "" {
switch errCode {
case ErrAuthorizationHeaderMalformed:
if errCode == ErrAuthorizationHeaderMalformed {
apiErr.Description = fmt.Sprintf("The authorization header is malformed; the region is wrong; expecting '%s'.", globalSite.Region)
return apiErr
}
@ -2067,15 +2066,13 @@ func toAPIErrorCode(ctx context.Context, err error) (apiErr APIErrorCode) {
case objectlock.ErrMalformedXML:
apiErr = ErrMalformedXML
default:
switch {
case errors.Is(err, errNoSuchPolicy):
if errors.Is(err, errNoSuchPolicy) {
apiErr = ErrAdminNoSuchPolicy
}
}
// Compression errors
switch err {
case errInvalidDecompressedSize:
if err == errInvalidDecompressedSize {
apiErr = ErrInvalidDecompressedSize
}
@ -2281,8 +2278,7 @@ func toAPIError(ctx context.Context, err error) APIError {
}
if apiErr.Code == "NotImplemented" {
switch e := err.(type) {
case NotImplemented:
if e, ok := err.(NotImplemented); ok {
desc := e.Error()
if desc == "" {
desc = apiErr.Description

View File

@ -94,14 +94,8 @@ func s3URLEncode(s string) string {
}
// s3EncodeName encodes string in response when encodingType is specified in AWS S3 requests.
func s3EncodeName(name string, encodingType string) (result string) {
// Quick path to exit
if encodingType == "" {
return name
}
encodingType = strings.ToLower(encodingType)
switch encodingType {
case "url":
func s3EncodeName(name, encodingType string) string {
if strings.ToLower(encodingType) == "url" {
return s3URLEncode(name)
}
return name

View File

@ -323,8 +323,7 @@ func (b *BucketMetadata) getAllLegacyConfigs(ctx context.Context, objectAPI Obje
configData, err := readConfig(ctx, objectAPI, configFile)
if err != nil {
switch err.(type) {
case ObjectExistsAsDirectory:
if _, ok := err.(ObjectExistsAsDirectory); ok {
// in FS mode it possible that we have actual
// files in this folder with `.minio.sys/buckets/bucket/configFile`
continue

View File

@ -90,8 +90,8 @@ func enforceRetentionBypassForDelete(ctx context.Context, r *http.Request, bucke
opts.VersionID = object.VersionID
if gerr != nil { // error from GetObjectInfo
switch gerr.(type) {
case MethodNotAllowed: // This happens usually for a delete marker
if _, ok := gerr.(MethodNotAllowed); ok {
// This happens usually for a delete marker
if oi.DeleteMarker || !oi.VersionPurgeStatus.Empty() {
// Delete marker should be present and valid.
return ErrNone
@ -192,8 +192,7 @@ func enforceRetentionBypassForPut(ctx context.Context, r *http.Request, oi Objec
days, objRetention.RetainUntilDate.Time,
objRetention.Mode, byPassSet, r, cred,
owner)
switch apiErr {
case ErrAccessDenied:
if apiErr == ErrAccessDenied {
return errAuthentication
}
return nil
@ -210,8 +209,7 @@ func enforceRetentionBypassForPut(ctx context.Context, r *http.Request, oi Objec
return ObjectLocked{Bucket: oi.Bucket, Object: oi.Name, VersionID: oi.VersionID}
}
}
switch govPerm {
case ErrAccessDenied:
if govPerm == ErrAccessDenied {
return errAuthentication
}
return nil
@ -224,8 +222,7 @@ func enforceRetentionBypassForPut(ctx context.Context, r *http.Request, oi Objec
apiErr := isPutRetentionAllowed(oi.Bucket, oi.Name,
days, objRetention.RetainUntilDate.Time, objRetention.Mode,
false, r, cred, owner)
switch apiErr {
case ErrAccessDenied:
if apiErr == ErrAccessDenied {
return errAuthentication
}
return nil
@ -236,8 +233,7 @@ func enforceRetentionBypassForPut(ctx context.Context, r *http.Request, oi Objec
apiErr := isPutRetentionAllowed(oi.Bucket, oi.Name,
days, objRetention.RetainUntilDate.Time,
objRetention.Mode, byPassSet, r, cred, owner)
switch apiErr {
case ErrAccessDenied:
if apiErr == ErrAccessDenied {
return errAuthentication
}
return nil

View File

@ -496,8 +496,7 @@ func getHealReplicateObjectInfo(objInfo ObjectInfo, rcfg replicationConfig) Repl
oi.VersionPurgeStatusInternal = fmt.Sprintf("%s=%s;", rcfg.Config.RoleArn, oi.VersionPurgeStatus)
}
for k, v := range oi.UserDefined {
switch {
case strings.EqualFold(k, ReservedMetadataPrefixLower+ReplicationReset):
if strings.EqualFold(k, ReservedMetadataPrefixLower+ReplicationReset) {
delete(oi.UserDefined, k)
oi.UserDefined[targetResetHeader(rcfg.Config.RoleArn)] = v
}

View File

@ -783,8 +783,7 @@ func handleCommonEnvVars() {
// Initialize KMS global variable after valiadating and loading the configuration.
// It depends on KMS env variables and global cli flags.
func handleKMSConfig() {
switch {
case env.IsSet(kms.EnvKMSSecretKey) && env.IsSet(kms.EnvKESEndpoint):
if env.IsSet(kms.EnvKMSSecretKey) && env.IsSet(kms.EnvKESEndpoint) {
logger.Fatal(errors.New("ambigious KMS configuration"), fmt.Sprintf("The environment contains %q as well as %q", kms.EnvKMSSecretKey, kms.EnvKESEndpoint))
}

View File

@ -33,8 +33,7 @@ const crossDomainXMLEntity = "/crossdomain.xml"
func setCrossDomainPolicy(h http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
// Look for 'crossdomain.xml' in the incoming request.
switch r.URL.Path {
case crossDomainXMLEntity:
if r.URL.Path == crossDomainXMLEntity {
// Write the standard cross domain policy xml.
w.Write([]byte(crossDomainXML))
// Request completed, no need to serve to other handlers.

View File

@ -1273,8 +1273,7 @@ func (i *scannerItem) healReplication(ctx context.Context, o ObjectLayer, oi Obj
sizeS.replTargetStats[arn] = tgtSizeS
}
switch oi.ReplicationStatus {
case replication.Replica:
if oi.ReplicationStatus == replication.Replica {
sizeS.replicaSize += oi.Size
}
}

View File

@ -110,7 +110,7 @@ func kmsKeyIDFromMetadata(metadata map[string]string) string {
//
// DecryptETags uses a KMS bulk decryption API, if available, which
// is more efficient than decrypting ETags sequentually.
func DecryptETags(ctx context.Context, KMS kms.KMS, objects []ObjectInfo) error {
func DecryptETags(ctx context.Context, k kms.KMS, objects []ObjectInfo) error {
const BatchSize = 250 // We process the objects in batches - 250 is a reasonable default.
var (
metadata = make([]map[string]string, 0, BatchSize)
@ -170,7 +170,7 @@ func DecryptETags(ctx context.Context, KMS kms.KMS, objects []ObjectInfo) error
// For all SSE-S3 single-part objects we have to
// fetch their decryption keys. We do this using
// a Bulk-Decryption API call, if available.
keys, err := crypto.S3.UnsealObjectKeys(ctx, KMS, metadata, buckets, names)
keys, err := crypto.S3.UnsealObjectKeys(ctx, k, metadata, buckets, names)
if err != nil {
return err
}

View File

@ -62,13 +62,14 @@ func TestNewEndpoint(t *testing.T) {
err = endpoint.UpdateIsLocal()
}
if test.expectedErr == nil {
switch {
case test.expectedErr == nil:
if err != nil {
t.Errorf("error: expected = <nil>, got = %v", err)
}
} else if err == nil {
case err == nil:
t.Errorf("error: expected = %v, got = <nil>", test.expectedErr)
} else if test.expectedErr.Error() != err.Error() {
case test.expectedErr.Error() != err.Error():
t.Errorf("error: expected = %v, got = %v", test.expectedErr, err)
}
@ -115,13 +116,14 @@ func TestNewEndpoints(t *testing.T) {
for _, testCase := range testCases {
_, err := NewEndpoints(testCase.args...)
if testCase.expectedErr == nil {
switch {
case testCase.expectedErr == nil:
if err != nil {
t.Fatalf("error: expected = <nil>, got = %v", err)
}
} else if err == nil {
case err == nil:
t.Fatalf("error: expected = %v, got = <nil>", testCase.expectedErr)
} else if testCase.expectedErr.Error() != err.Error() {
case testCase.expectedErr.Error() != err.Error():
t.Fatalf("error: expected = %v, got = %v", testCase.expectedErr, err)
}
}

View File

@ -910,13 +910,14 @@ func isObjectDirDangling(errs []error) (ok bool) {
var foundNotEmpty int
var otherFound int
for _, readErr := range errs {
if readErr == nil {
switch {
case readErr == nil:
found++
} else if readErr == errFileNotFound || readErr == errVolumeNotFound {
case readErr == errFileNotFound || readErr == errVolumeNotFound:
notFound++
} else if readErr == errVolumeNotEmpty {
case readErr == errVolumeNotEmpty:
foundNotEmpty++
} else {
default:
otherFound++
}
}
@ -938,11 +939,12 @@ func isObjectDangling(metaArr []FileInfo, errs []error, dataErrs []error) (valid
diskNotFoundCount int
)
for _, readErr := range cerrs {
if errors.Is(readErr, errFileNotFound) || errors.Is(readErr, errFileVersionNotFound) {
switch {
case errors.Is(readErr, errFileNotFound) || errors.Is(readErr, errFileVersionNotFound):
notFoundCount++
} else if errors.Is(readErr, errFileCorrupt) {
case errors.Is(readErr, errFileCorrupt):
corruptedCount++
} else if errors.Is(readErr, errDiskNotFound) {
case errors.Is(readErr, errDiskNotFound):
diskNotFoundCount++
}
}

View File

@ -1566,8 +1566,7 @@ func (er erasureObjects) DeleteObject(ctx context.Context, bucket, object string
objInfo = ObjectInfo{VersionID: opts.VersionID} // version id needed in Delete API response.
goi, _, gerr := er.getObjectInfoAndQuorum(ctx, bucket, object, opts)
if gerr != nil && goi.Name == "" {
switch gerr.(type) {
case InsufficientReadQuorum:
if _, ok := gerr.(InsufficientReadQuorum); ok {
return objInfo, InsufficientWriteQuorum{}
}
// For delete marker replication, versionID being replicated will not exist on disk

View File

@ -987,8 +987,7 @@ func (z *erasureServerPools) DeleteObject(ctx context.Context, bucket string, ob
gopts.NoLock = true
pinfo, err := z.getPoolInfoExistingWithOpts(ctx, bucket, object, gopts)
if err != nil {
switch err.(type) {
case InsufficientReadQuorum:
if _, ok := err.(InsufficientReadQuorum); ok {
return objInfo, InsufficientWriteQuorum{}
}
return objInfo, err
@ -1454,8 +1453,7 @@ func (z *erasureServerPools) PutObjectPart(ctx context.Context, bucket, object,
if err == nil {
return pi, nil
}
switch err.(type) {
case InvalidUploadID:
if _, ok := err.(InvalidUploadID); ok {
// Look for information on the next pool
continue
}
@ -1486,8 +1484,7 @@ func (z *erasureServerPools) GetMultipartInfo(ctx context.Context, bucket, objec
if err == nil {
return mi, nil
}
switch err.(type) {
case InvalidUploadID:
if _, ok := err.(InvalidUploadID); ok {
// upload id not found, continue to the next pool.
continue
}
@ -1518,8 +1515,7 @@ func (z *erasureServerPools) ListObjectParts(ctx context.Context, bucket, object
if err == nil {
return result, nil
}
switch err.(type) {
case InvalidUploadID:
if _, ok := err.(InvalidUploadID); ok {
continue
}
return ListPartsInfo{}, err
@ -1549,8 +1545,7 @@ func (z *erasureServerPools) AbortMultipartUpload(ctx context.Context, bucket, o
if err == nil {
return nil
}
switch err.(type) {
case InvalidUploadID:
if _, ok := err.(InvalidUploadID); ok {
// upload id not found move to next pool
continue
}
@ -1581,8 +1576,7 @@ func (z *erasureServerPools) CompleteMultipartUpload(ctx context.Context, bucket
if err == nil {
return objInfo, nil
}
switch err.(type) {
case InvalidUploadID:
if _, ok := err.(InvalidUploadID); ok {
// upload id not found move to next pool
continue
}
@ -1639,8 +1633,7 @@ func (z *erasureServerPools) DeleteBucket(ctx context.Context, bucket string, op
err := z.s3Peer.DeleteBucket(ctx, bucket, opts)
if err == nil || errors.Is(err, errVolumeNotFound) {
// If site replication is configured, hold on to deleted bucket state until sites sync
switch opts.SRDeleteOp {
case MarkDelete:
if opts.SRDeleteOp == MarkDelete {
z.s3Peer.MakeBucket(context.Background(), pathJoin(minioMetaBucket, bucketMetaPrefix, deletedBucketsPrefix, bucket), MakeBucketOptions{})
}
}
@ -1744,8 +1737,7 @@ func (z *erasureServerPools) HealBucket(ctx context.Context, bucket string, opts
for _, pool := range z.serverPools {
result, err := pool.HealBucket(ctx, bucket, opts)
if err != nil {
switch err.(type) {
case BucketNotFound:
if _, ok := err.(BucketNotFound); ok {
continue
}
return result, err

View File

@ -816,15 +816,16 @@ func (m *metaCacheEntriesSorted) merge(other metaCacheEntriesSorted, limit int)
a := m.entries()
b := other.entries()
for len(a) > 0 && len(b) > 0 {
if a[0].name == b[0].name && bytes.Equal(a[0].metadata, b[0].metadata) {
switch {
case a[0].name == b[0].name && bytes.Equal(a[0].metadata, b[0].metadata):
// Same, discard one.
merged = append(merged, a[0])
a = a[1:]
b = b[1:]
} else if a[0].name < b[0].name {
case a[0].name < b[0].name:
merged = append(merged, a[0])
a = a[1:]
} else {
default:
merged = append(merged, b[0])
b = b[1:]
}

View File

@ -870,8 +870,7 @@ func listPathRaw(ctx context.Context, opts listPathRawOptions) (err error) {
var fdMu sync.Mutex
fds := opts.fallbackDisks
fallback := func(err error) StorageAPI {
switch err.(type) {
case StorageErr:
if _, ok := err.(StorageErr); ok {
// Attempt to grab a fallback disk
fdMu.Lock()
defer fdMu.Unlock()

View File

@ -562,8 +562,7 @@ func (r *metacacheReader) readAll(ctx context.Context, dst chan<- metaCacheEntry
}
for {
if more, err := r.mr.ReadBool(); !more {
switch err {
case io.EOF:
if err == io.EOF {
err = io.ErrUnexpectedEOF
}
r.err = err

View File

@ -2322,8 +2322,7 @@ func (c *minioClusterCollector) Collect(out chan<- prometheus.Metric) {
continue
}
metricType := prometheus.GaugeValue
switch metric.Description.Type {
case counterMetric:
if metric.Description.Type == counterMetric {
metricType = prometheus.CounterValue
}
toPost := prometheus.MustNewConstMetric(
@ -2431,8 +2430,7 @@ func (c *minioNodeCollector) Collect(ch chan<- prometheus.Metric) {
}
metricType := prometheus.GaugeValue
switch metric.Description.Type {
case counterMetric:
if metric.Description.Type == counterMetric {
metricType = prometheus.CounterValue
}
ch <- prometheus.MustNewConstMetric(

View File

@ -171,8 +171,7 @@ func (di *distLockInstance) GetLock(ctx context.Context, timeout *dynamicTimeout
}) {
timeout.LogFailure()
cancel()
switch err := newCtx.Err(); err {
case context.Canceled:
if err := newCtx.Err(); err == context.Canceled {
return LockContext{ctx: ctx, cancel: func() {}}, err
}
return LockContext{ctx: ctx, cancel: func() {}}, OperationTimedOut{}
@ -201,8 +200,7 @@ func (di *distLockInstance) GetRLock(ctx context.Context, timeout *dynamicTimeou
}) {
timeout.LogFailure()
cancel()
switch err := newCtx.Err(); err {
case context.Canceled:
if err := newCtx.Err(); err == context.Canceled {
return LockContext{ctx: ctx, cancel: func() {}}, err
}
return LockContext{ctx: ctx, cancel: func() {}}, OperationTimedOut{}
@ -257,8 +255,7 @@ func (li *localLockInstance) GetLock(ctx context.Context, timeout *dynamicTimeou
li.ns.unlock(li.volume, li.paths[si], readLock)
}
}
switch err := ctx.Err(); err {
case context.Canceled:
if err := ctx.Err(); err == context.Canceled {
return LockContext{}, err
}
return LockContext{}, OperationTimedOut{}
@ -294,8 +291,7 @@ func (li *localLockInstance) GetRLock(ctx context.Context, timeout *dynamicTimeo
li.ns.unlock(li.volume, li.paths[si], readLock)
}
}
switch err := ctx.Err(); err {
case context.Canceled:
if err := ctx.Err(); err == context.Canceled {
return LockContext{}, err
}
return LockContext{}, OperationTimedOut{}

View File

@ -138,13 +138,14 @@ func TestGetHostIP(t *testing.T) {
for _, testCase := range testCases {
ipList, err := getHostIP(testCase.host)
if testCase.expectedErr == nil {
switch {
case testCase.expectedErr == nil:
if err != nil {
t.Fatalf("error: expected = <nil>, got = %v", err)
}
} else if err == nil {
case err == nil:
t.Fatalf("error: expected = %v, got = <nil>", testCase.expectedErr)
} else if testCase.expectedErr.Error() != err.Error() {
case testCase.expectedErr.Error() != err.Error():
t.Fatalf("error: expected = %v, got = %v", testCase.expectedErr, err)
}
@ -221,13 +222,14 @@ func TestCheckPortAvailability(t *testing.T) {
}
err := checkPortAvailability(testCase.host, testCase.port)
if testCase.expectedErr == nil {
switch {
case testCase.expectedErr == nil:
if err != nil {
t.Fatalf("error: expected = <nil>, got = %v", err)
}
} else if err == nil {
case err == nil:
t.Fatalf("error: expected = %v, got = <nil>", testCase.expectedErr)
} else if testCase.expectedErr.Error() != err.Error() {
case testCase.expectedErr.Error() != err.Error():
t.Fatalf("error: expected = %v, got = %v", testCase.expectedErr, err)
}
}
@ -252,13 +254,14 @@ func TestCheckLocalServerAddr(t *testing.T) {
testCase := testCase
t.Run("", func(t *testing.T) {
err := CheckLocalServerAddr(testCase.serverAddr)
if testCase.expectedErr == nil {
switch {
case testCase.expectedErr == nil:
if err != nil {
t.Errorf("error: expected = <nil>, got = %v", err)
}
} else if err == nil {
case err == nil:
t.Errorf("error: expected = %v, got = <nil>", testCase.expectedErr)
} else if testCase.expectedErr.Error() != err.Error() {
case testCase.expectedErr.Error() != err.Error():
t.Errorf("error: expected = %v, got = %v", testCase.expectedErr, err)
}
})

View File

@ -2368,8 +2368,7 @@ func (api objectAPIHandlers) DeleteObjectHandler(w http.ResponseWriter, r *http.
// http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectDELETE.html
objInfo, err := deleteObject(ctx, bucket, object, opts)
if err != nil {
switch err.(type) {
case BucketNotFound:
if _, ok := err.(BucketNotFound); ok {
// When bucket doesn't exist specially handle it.
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return

View File

@ -1197,17 +1197,18 @@ func testAPIPutObjectStreamSigV4Handler(obj ObjectLayer, instanceType, bucketNam
rec := httptest.NewRecorder()
// construct HTTP request for Put Object end point.
var req *http.Request
if testCase.fault == chunkDateMismatch {
switch {
case testCase.fault == chunkDateMismatch:
req, err = newTestStreamingSignedBadChunkDateRequest(http.MethodPut,
getPutObjectURL("", testCase.bucketName, testCase.objectName),
int64(testCase.dataLen), testCase.chunkSize, bytes.NewReader(testCase.data),
testCase.accessKey, testCase.secretKey)
} else if testCase.contentEncoding == "" {
case testCase.contentEncoding == "":
req, err = newTestStreamingSignedRequest(http.MethodPut,
getPutObjectURL("", testCase.bucketName, testCase.objectName),
int64(testCase.dataLen), testCase.chunkSize, bytes.NewReader(testCase.data),
testCase.accessKey, testCase.secretKey)
} else if testCase.contentEncoding != "" {
case testCase.contentEncoding != "":
req, err = newTestStreamingSignedCustomEncodingRequest(http.MethodPut,
getPutObjectURL("", testCase.bucketName, testCase.objectName),
int64(testCase.dataLen), testCase.chunkSize, bytes.NewReader(testCase.data),
@ -2045,12 +2046,13 @@ func testAPICopyObjectPartHandler(obj ObjectLayer, instanceType, bucketName stri
var req *http.Request
// initialize HTTP NewRecorder, this records any mutations to response writer inside the handler.
rec := httptest.NewRecorder()
if !testCase.invalidPartNumber || !testCase.maximumPartNumber {
switch {
case !testCase.invalidPartNumber || !testCase.maximumPartNumber:
// construct HTTP request for copy object.
req, err = newTestSignedRequestV4(http.MethodPut, getCopyObjectPartURL("", testCase.bucketName, testObject, testCase.uploadID, "1"), 0, nil, testCase.accessKey, testCase.secretKey, nil)
} else if testCase.invalidPartNumber {
case testCase.invalidPartNumber:
req, err = newTestSignedRequestV4(http.MethodPut, getCopyObjectPartURL("", testCase.bucketName, testObject, testCase.uploadID, "abc"), 0, nil, testCase.accessKey, testCase.secretKey, nil)
} else if testCase.maximumPartNumber {
case testCase.maximumPartNumber:
req, err = newTestSignedRequestV4(http.MethodPut, getCopyObjectPartURL("", testCase.bucketName, testObject, testCase.uploadID, "99999"), 0, nil, testCase.accessKey, testCase.secretKey, nil)
}
if err != nil {
@ -3545,11 +3547,9 @@ func testAPIPutObjectPartHandlerStreaming(obj ObjectLayer, instanceType, bucketN
t.Errorf("Test %d %s expected to fail with error %s, but received %s", i+1, instanceType,
test.expectedErr.Code, errXML.Code)
}
} else {
if rec.Code != http.StatusOK {
t.Errorf("Test %d %s expected to succeed, but failed with HTTP status code %d",
i+1, instanceType, rec.Code)
}
} else if rec.Code != http.StatusOK {
t.Errorf("Test %d %s expected to succeed, but failed with HTTP status code %d",
i+1, instanceType, rec.Code)
}
}
}
@ -4119,10 +4119,8 @@ func testAPIListObjectPartsHandler(obj ObjectLayer, instanceType, bucketName str
reqType, test.expectedErr.Code, errXML.Code)
}
// in case error is not expected response status should be 200OK.
} else {
if rec.Code != http.StatusOK {
t.Errorf("%s, Expected to succeed with response HTTP status 200OK, but failed with HTTP status code %d.", reqType, rec.Code)
}
} else if rec.Code != http.StatusOK {
t.Errorf("%s, Expected to succeed with response HTTP status 200OK, but failed with HTTP status code %d.", reqType, rec.Code)
}
}
})

View File

@ -109,8 +109,7 @@ func parseCredentialHeader(credElement string, region string, stype serviceType)
return ch, ErrAuthorizationHeaderMalformed
}
if credElements[2] != string(stype) {
switch stype {
case serviceSTS:
if stype == serviceSTS {
return ch, ErrInvalidServiceSTS
}
return ch, ErrInvalidServiceS3

View File

@ -630,9 +630,8 @@ func TestDoesV4PresignParamsExist(t *testing.T) {
// TestParsePreSignV4 - Validates the parsing logic of Presignied v4 request from its url query values.
func TestParsePreSignV4(t *testing.T) {
// converts the duration in seconds into string format.
getDurationStr := func(expires int) string {
return strconv.Itoa(expires)
}
getDurationStr := strconv.Itoa
// used in expected preSignValues, preSignValues.Date is of type time.Time .
queryTime := UTCNow()

View File

@ -150,10 +150,8 @@ func TestSkipContentSha256Cksum(t *testing.T) {
q.Add(testCase.inputHeaderKey, testCase.inputHeaderValue)
}
inputReq.URL.RawQuery = q.Encode()
} else {
if testCase.inputHeaderKey != "" {
inputReq.Header.Set(testCase.inputHeaderKey, testCase.inputHeaderValue)
}
} else if testCase.inputHeaderKey != "" {
inputReq.Header.Set(testCase.inputHeaderKey, testCase.inputHeaderValue)
}
inputReq.ParseForm()

View File

@ -1327,8 +1327,7 @@ func (c *SiteReplicationSys) PeerSTSAccHandler(ctx context.Context, stsCred *mad
// Extract the username and lookup DN and groups in LDAP.
ldapUser, isLDAPSTS := claims.Lookup(ldapUserN)
switch {
case isLDAPSTS:
if isLDAPSTS {
// Need to lookup the groups from LDAP.
_, ldapGroups, err := globalIAMSys.LDAPConfig.LookupUserDN(ldapUser)
if err != nil {

View File

@ -1477,9 +1477,9 @@ func getListenNotificationURL(endPoint, bucketName string, prefixes, suffixes, e
}
// getRandomDisks - Creates a slice of N random disks, each of the form - minio-XXX
func getRandomDisks(N int) ([]string, error) {
func getRandomDisks(n int) ([]string, error) {
var erasureDisks []string
for i := 0; i < N; i++ {
for i := 0; i < n; i++ {
path, err := os.MkdirTemp(globalTestTmpDir, "minio-")
if err != nil {
// Remove directories created so far.

View File

@ -300,13 +300,14 @@ func TestDownloadReleaseData(t *testing.T) {
}
result, err := downloadReleaseURL(u, 1*time.Second, "")
if testCase.expectedErr == nil {
switch {
case testCase.expectedErr == nil:
if err != nil {
t.Fatalf("error: expected: %v, got: %v", testCase.expectedErr, err)
}
} else if err == nil {
case err == nil:
t.Fatalf("error: expected: %v, got: %v", testCase.expectedErr, err)
} else if testCase.expectedErr.Error() != err.Error() {
case testCase.expectedErr.Error() != err.Error():
t.Fatalf("error: expected: %v, got: %v", testCase.expectedErr, err)
}

View File

@ -159,8 +159,7 @@ func ErrorRespToObjectError(err error, params ...string) error {
err = PartTooSmall{}
}
switch minioErr.StatusCode {
case http.StatusMethodNotAllowed:
if minioErr.StatusCode == http.StatusMethodNotAllowed {
err = toObjectErr(errMethodNotAllowed, bucket, object)
}
return err
@ -908,15 +907,16 @@ func lcp(strs []string, pre bool) string {
// Returns the mode in which MinIO is running
func getMinioMode() string {
mode := globalMinioModeFS
if globalIsDistErasure {
mode = globalMinioModeDistErasure
} else if globalIsErasure {
mode = globalMinioModeErasure
} else if globalIsErasureSD {
mode = globalMinioModeErasureSD
switch {
case globalIsDistErasure:
return globalMinioModeDistErasure
case globalIsErasure:
return globalMinioModeErasure
case globalIsErasureSD:
return globalMinioModeErasureSD
default:
return globalMinioModeFS
}
return mode
}
func iamPolicyClaimNameOpenID() string {

View File

@ -51,8 +51,7 @@ func checkWarmBackend(ctx context.Context, w WarmBackend) error {
var empty bytes.Reader
rv, err := w.Put(ctx, probeObject, &empty, 0)
if err != nil {
switch err.(type) {
case BackendDown:
if _, ok := err.(BackendDown); ok {
return err
}
return tierPermErr{
@ -64,8 +63,7 @@ func checkWarmBackend(ctx context.Context, w WarmBackend) error {
r, err := w.Get(ctx, probeObject, rv, WarmBackendGetOpts{})
xhttp.DrainBody(r)
if err != nil {
switch err.(type) {
case BackendDown:
if _, ok := err.(BackendDown); ok {
return err
}
switch {
@ -81,8 +79,7 @@ func checkWarmBackend(ctx context.Context, w WarmBackend) error {
}
}
if err = w.Remove(ctx, probeObject, rv); err != nil {
switch err.(type) {
case BackendDown:
if _, ok := err.(BackendDown); ok {
return err
}
return tierPermErr{

View File

@ -1887,11 +1887,12 @@ func mergeXLV2Versions(quorum int, strict bool, requestedVersions int, versions
continue
}
if i == 0 || ver.header.sortsBefore(latest.header) {
if i == 0 || latestCount == 0 {
switch {
case i == 0 || latestCount == 0:
latestCount = 1
} else if !strict && ver.header.matchesNotStrict(latest.header) {
case !strict && ver.header.matchesNotStrict(latest.header):
latestCount++
} else {
default:
latestCount = 1
}
latest = ver

View File

@ -66,8 +66,7 @@ func (j xlMetaV2DeleteMarker) FreeVersion() bool {
// FreeVersion returns true if j represents a free-version, false otherwise.
func (j xlMetaV2Version) FreeVersion() bool {
switch j.Type {
case DeleteType:
if j.Type == DeleteType {
return j.DeleteMarker.FreeVersion()
}
return false

View File

@ -1462,25 +1462,26 @@ func (s *xlStorage) readAllData(ctx context.Context, volumeDir string, filePath
f, err = OpenFile(filePath, readMode, 0o666)
}
if err != nil {
if osIsNotExist(err) {
switch {
case osIsNotExist(err):
// Check if the object doesn't exist because its bucket
// is missing in order to return the correct error.
if err = Access(volumeDir); err != nil && osIsNotExist(err) {
return nil, dmTime, errVolumeNotFound
}
return nil, dmTime, errFileNotFound
} else if osIsPermission(err) {
case osIsPermission(err):
return nil, dmTime, errFileAccessDenied
} else if isSysErrNotDir(err) || isSysErrIsDir(err) {
case isSysErrNotDir(err) || isSysErrIsDir(err):
return nil, dmTime, errFileNotFound
} else if isSysErrHandleInvalid(err) {
case isSysErrHandleInvalid(err):
// This case is special and needs to be handled for windows.
return nil, dmTime, errFileNotFound
} else if isSysErrIO(err) {
case isSysErrIO(err):
return nil, dmTime, errFaultyDisk
} else if isSysErrTooManyFiles(err) {
case isSysErrTooManyFiles(err):
return nil, dmTime, errTooManyOpenFiles
} else if isSysErrInvalidArg(err) {
case isSysErrInvalidArg(err):
st, _ := Lstat(filePath)
if st != nil && st.IsDir() {
// Linux returns InvalidArg for directory O_DIRECT
@ -1577,14 +1578,7 @@ func (s *xlStorage) ReadFile(ctx context.Context, volume string, path string, of
// Stat a volume entry.
if err = Access(volumeDir); err != nil {
if osIsNotExist(err) {
return 0, errVolumeNotFound
} else if isSysErrIO(err) {
return 0, errFaultyDisk
} else if osIsPermission(err) {
return 0, errFileAccessDenied
}
return 0, err
return 0, convertAccessError(err, errFileAccessDenied)
}
// Validate effective path length before reading.
@ -1898,14 +1892,15 @@ func (s *xlStorage) writeAllDirect(ctx context.Context, filePath string, fileSiz
defer w.Close()
var bufp *[]byte
if fileSize > 0 && fileSize >= largestFileThreshold {
switch {
case fileSize > 0 && fileSize >= largestFileThreshold:
// use a larger 4MiB buffer for a really large streams.
bufp = xioutil.ODirectPoolXLarge.Get().(*[]byte)
defer xioutil.ODirectPoolXLarge.Put(bufp)
} else if fileSize <= smallFileThreshold {
case fileSize <= smallFileThreshold:
bufp = xioutil.ODirectPoolSmall.Get().(*[]byte)
defer xioutil.ODirectPoolSmall.Put(bufp)
} else {
default:
bufp = xioutil.ODirectPoolLarge.Get().(*[]byte)
defer xioutil.ODirectPoolLarge.Put(bufp)
}
@ -1989,14 +1984,7 @@ func (s *xlStorage) AppendFile(ctx context.Context, volume string, path string,
// Stat a volume entry.
if err = Access(volumeDir); err != nil {
if osIsNotExist(err) {
return errVolumeNotFound
} else if osIsPermission(err) {
return errVolumeAccessDenied
} else if isSysErrIO(err) {
return errFaultyDisk
}
return err
return convertAccessError(err, errVolumeAccessDenied)
}
filePath := pathJoin(volumeDir, path)
@ -2127,14 +2115,7 @@ func (s *xlStorage) Delete(ctx context.Context, volume string, path string, dele
// Stat a volume entry.
if err = Access(volumeDir); err != nil {
if osIsNotExist(err) {
return errVolumeNotFound
} else if osIsPermission(err) {
return errVolumeAccessDenied
} else if isSysErrIO(err) {
return errFaultyDisk
}
return err
return convertAccessError(err, errVolumeAccessDenied)
}
// Following code is needed so that we retain SlashSeparator suffix if any in
@ -2586,14 +2567,7 @@ func (s *xlStorage) VerifyFile(ctx context.Context, volume, path string, fi File
// Stat a volume entry.
if err = Access(volumeDir); err != nil {
if osIsNotExist(err) {
return errVolumeNotFound
} else if isSysErrIO(err) {
return errFaultyDisk
} else if osIsPermission(err) {
return errVolumeAccessDenied
}
return err
return convertAccessError(err, errVolumeAccessDenied)
}
erasure := fi.Erasure
@ -2691,14 +2665,7 @@ func (s *xlStorage) StatInfoFile(ctx context.Context, volume, path string, glob
// Stat a volume entry.
if err = Access(volumeDir); err != nil {
if osIsNotExist(err) {
return stat, errVolumeNotFound
} else if isSysErrIO(err) {
return stat, errFaultyDisk
} else if osIsPermission(err) {
return stat, errVolumeAccessDenied
}
return stat, err
return stat, convertAccessError(err, errVolumeAccessDenied)
}
files := []string{pathJoin(volumeDir, path)}
if glob {
@ -2827,3 +2794,16 @@ func (s *xlStorage) CleanAbandonedData(ctx context.Context, volume string, path
}
return nil
}
func convertAccessError(err, permErr error) error {
switch {
case osIsNotExist(err):
return errVolumeNotFound
case isSysErrIO(err):
return errFaultyDisk
case osIsPermission(err):
return permErr
default:
return err
}
}

View File

@ -82,8 +82,7 @@ func (f *Filter) UnmarshalXML(d *xml.Decoder, start xml.StartElement) (err error
return err
}
switch se := t.(type) {
case xml.StartElement:
if se, ok := t.(xml.StartElement); ok {
switch se.Name.Local {
case "Prefix":
var p Prefix

View File

@ -104,8 +104,7 @@ func (lc *Lifecycle) UnmarshalXML(d *xml.Decoder, start xml.StartElement) (err e
return err
}
switch se := t.(type) {
case xml.StartElement:
if se, ok := t.(xml.StartElement); ok {
switch se.Name.Local {
case "Rule":
var r Rule

View File

@ -51,8 +51,7 @@ func (tag *Tag) UnmarshalXML(d *xml.Decoder, start xml.StartElement) (err error)
return err
}
switch se := t.(type) {
case xml.StartElement:
if se, ok := t.(xml.StartElement); ok {
var s string
if err = d.DecodeElement(&s, &se); err != nil {
return err

View File

@ -514,8 +514,7 @@ func (l *ObjectLegalHold) UnmarshalXML(d *xml.Decoder, start xml.StartElement) (
return err
}
switch se := t.(type) {
case xml.StartElement:
if se, ok := t.(xml.StartElement); ok {
switch se.Name.Local {
case "Status":
var st LegalHoldStatus

View File

@ -38,8 +38,8 @@ import (
//
// The same context must be provided when decrypting the
// ciphertext.
func EncryptBytes(KMS kms.KMS, plaintext []byte, context kms.Context) ([]byte, error) {
ciphertext, err := Encrypt(KMS, bytes.NewReader(plaintext), context)
func EncryptBytes(k kms.KMS, plaintext []byte, context kms.Context) ([]byte, error) {
ciphertext, err := Encrypt(k, bytes.NewReader(plaintext), context)
if err != nil {
return nil, err
}
@ -49,8 +49,8 @@ func EncryptBytes(KMS kms.KMS, plaintext []byte, context kms.Context) ([]byte, e
// DecryptBytes decrypts the ciphertext using a key managed by the KMS.
// The same context that have been used during encryption must be
// provided.
func DecryptBytes(KMS kms.KMS, ciphertext []byte, context kms.Context) ([]byte, error) {
plaintext, err := Decrypt(KMS, bytes.NewReader(ciphertext), context)
func DecryptBytes(k kms.KMS, ciphertext []byte, context kms.Context) ([]byte, error) {
plaintext, err := Decrypt(k, bytes.NewReader(ciphertext), context)
if err != nil {
return nil, err
}
@ -62,13 +62,13 @@ func DecryptBytes(KMS kms.KMS, ciphertext []byte, context kms.Context) ([]byte,
//
// The same context must be provided when decrypting the
// ciphertext.
func Encrypt(KMS kms.KMS, plaintext io.Reader, ctx kms.Context) (io.Reader, error) {
func Encrypt(k kms.KMS, plaintext io.Reader, ctx kms.Context) (io.Reader, error) {
algorithm := sio.AES_256_GCM
if !fips.Enabled && !sioutil.NativeAES() {
algorithm = sio.ChaCha20Poly1305
}
key, err := KMS.GenerateKey(context.Background(), "", ctx)
key, err := k.GenerateKey(context.Background(), "", ctx)
if err != nil {
return nil, err
}
@ -116,7 +116,7 @@ func Encrypt(KMS kms.KMS, plaintext io.Reader, ctx kms.Context) (io.Reader, erro
// Decrypt decrypts the ciphertext using a key managed by the KMS.
// The same context that have been used during encryption must be
// provided.
func Decrypt(KMS kms.KMS, ciphertext io.Reader, context kms.Context) (io.Reader, error) {
func Decrypt(k kms.KMS, ciphertext io.Reader, context kms.Context) (io.Reader, error) {
const (
MaxMetadataSize = 1 << 20 // max. size of the metadata
Version = 1
@ -149,7 +149,7 @@ func Decrypt(KMS kms.KMS, ciphertext io.Reader, context kms.Context) (io.Reader,
return nil, fmt.Errorf("config: unsupported encryption algorithm: %q is not supported in FIPS mode", metadata.Algorithm)
}
key, err := KMS.DecryptKey(metadata.KeyID, metadata.KMSKey, context)
key, err := k.DecryptKey(metadata.KeyID, metadata.KMSKey, context)
if err != nil {
return nil, err
}

View File

@ -104,8 +104,7 @@ func (c *OperatorDNS) Put(bucket string) error {
var errorStringBuilder strings.Builder
io.Copy(&errorStringBuilder, io.LimitReader(resp.Body, resp.ContentLength))
errorString := errorStringBuilder.String()
switch resp.StatusCode {
case http.StatusConflict:
if resp.StatusCode == http.StatusConflict {
return ErrBucketConflict(Error{bucket, errors.New(errorString)})
}
return newError(bucket, fmt.Errorf("service create for bucket %s, failed with status %s, error %s", bucket, resp.Status, errorString))

View File

@ -107,9 +107,8 @@ func ErrorToErr(err error) Err {
if errors.Is(err, syscall.EADDRINUSE) {
return ErrPortAlreadyInUse(err).Msg("Specified port is already in use")
} else if errors.Is(err, syscall.EACCES) || errors.Is(err, syscall.EPERM) {
switch err.(type) {
case *net.OpError:
return ErrPortAccess(err).Msg("Insufficient permissions to use specified port")
if netErr, ok := err.(*net.OpError); ok {
return ErrPortAccess(netErr).Msg("Insufficient permissions to use specified port")
}
}

View File

@ -172,9 +172,8 @@ func (l *Config) DoesGroupDNExist(groupDN string) (bool, error) {
// some base DNs are subtrees of other base DNs - we should validate
// and error out in such cases.
return false, fmt.Errorf("found multiple DNs for the given group DN")
} else {
return false, nil
}
return false, nil
}
// Bind - binds to ldap, searches LDAP and returns the distinguished name of the

View File

@ -204,11 +204,12 @@ func (h *metrics) accumRequestRTT(reqStartTime time.Time, rttMs float64, isSucce
h.updateLastFullMinute(reqTimeMinute)
}
var entry *serviceRTTMinuteStats
if reqTimeMinute.Equal(h.currentMinute.statsTime) {
switch {
case reqTimeMinute.Equal(h.currentMinute.statsTime):
entry = &h.currentMinute
} else if reqTimeMinute.Equal(h.lastFullMinute.statsTime) {
case reqTimeMinute.Equal(h.lastFullMinute.statsTime):
entry = &h.lastFullMinute
} else {
default:
// This request is too old, it should never happen, ignore it as we
// cannot return an error.
return

View File

@ -106,8 +106,8 @@ func (ssekms) IsEncrypted(metadata map[string]string) bool {
// UnsealObjectKey extracts and decrypts the sealed object key
// from the metadata using KMS and returns the decrypted object
// key.
func (s3 ssekms) UnsealObjectKey(KMS kms.KMS, metadata map[string]string, bucket, object string) (key ObjectKey, err error) {
if KMS == nil {
func (s3 ssekms) UnsealObjectKey(k kms.KMS, metadata map[string]string, bucket, object string) (key ObjectKey, err error) {
if k == nil {
return key, Errorf("KMS not configured")
}
@ -120,7 +120,7 @@ func (s3 ssekms) UnsealObjectKey(KMS kms.KMS, metadata map[string]string, bucket
} else if _, ok := ctx[bucket]; !ok {
ctx[bucket] = path.Join(bucket, object)
}
unsealKey, err := KMS.DecryptKey(keyID, kmsKey, ctx)
unsealKey, err := k.DecryptKey(keyID, kmsKey, ctx)
if err != nil {
return key, err
}

View File

@ -71,15 +71,15 @@ func (sses3) IsEncrypted(metadata map[string]string) bool {
// UnsealObjectKey extracts and decrypts the sealed object key
// from the metadata using KMS and returns the decrypted object
// key.
func (s3 sses3) UnsealObjectKey(KMS kms.KMS, metadata map[string]string, bucket, object string) (key ObjectKey, err error) {
if KMS == nil {
func (s3 sses3) UnsealObjectKey(k kms.KMS, metadata map[string]string, bucket, object string) (key ObjectKey, err error) {
if k == nil {
return key, Errorf("KMS not configured")
}
keyID, kmsKey, sealedKey, err := s3.ParseMetadata(metadata)
if err != nil {
return key, err
}
unsealKey, err := KMS.DecryptKey(keyID, kmsKey, kms.Context{bucket: path.Join(bucket, object)})
unsealKey, err := k.DecryptKey(keyID, kmsKey, kms.Context{bucket: path.Join(bucket, object)})
if err != nil {
return key, err
}
@ -92,8 +92,8 @@ func (s3 sses3) UnsealObjectKey(KMS kms.KMS, metadata map[string]string, bucket,
// keys.
//
// The metadata, buckets and objects slices must have the same length.
func (s3 sses3) UnsealObjectKeys(ctx context.Context, KMS kms.KMS, metadata []map[string]string, buckets, objects []string) ([]ObjectKey, error) {
if KMS == nil {
func (s3 sses3) UnsealObjectKeys(ctx context.Context, k kms.KMS, metadata []map[string]string, buckets, objects []string) ([]ObjectKey, error) {
if k == nil {
return nil, Errorf("KMS not configured")
}
@ -124,7 +124,7 @@ func (s3 sses3) UnsealObjectKeys(ctx context.Context, KMS kms.KMS, metadata []ma
for i := range buckets {
contexts = append(contexts, kms.Context{buckets[i]: path.Join(buckets[i], objects[i])})
}
unsealKeys, err := KMS.DecryptAll(ctx, keyIDs[0], kmsKeys, contexts)
unsealKeys, err := k.DecryptAll(ctx, keyIDs[0], kmsKeys, contexts)
if err != nil {
return nil, err
}
@ -139,7 +139,7 @@ func (s3 sses3) UnsealObjectKeys(ctx context.Context, KMS kms.KMS, metadata []ma
keys := make([]ObjectKey, 0, len(keyIDs))
for i := range keyIDs {
key, err := s3.UnsealObjectKey(KMS, metadata[i], buckets[i], objects[i])
key, err := s3.UnsealObjectKey(k, metadata[i], buckets[i], objects[i])
if err != nil {
return nil, err
}

View File

@ -149,8 +149,7 @@ func (h *Target) Init() error {
xhttp.DrainBody(resp.Body)
if !acceptedResponseStatusCode(resp.StatusCode) {
switch resp.StatusCode {
case http.StatusForbidden:
if resp.StatusCode == http.StatusForbidden {
return fmt.Errorf("%s returned '%s', please check if your auth token is correctly set",
h.config.Endpoint, resp.Status)
}

View File

@ -76,8 +76,7 @@ func (args *ReaderArgs) UnmarshalXML(d *xml.Decoder, start xml.StartElement) (er
return err
}
switch se := t.(type) {
case xml.StartElement:
if se, ok := t.(xml.StartElement); ok {
tagName := se.Name.Local
switch tagName {
case "AllowQuotedRecordDelimiter":
@ -158,8 +157,7 @@ func (args *WriterArgs) UnmarshalXML(d *xml.Decoder, start xml.StartElement) err
return err
}
switch se := t.(type) {
case xml.StartElement:
if se, ok := t.(xml.StartElement); ok {
var s string
if err = d.DecodeElement(&s, &se); err != nil {
return err

View File

@ -28,9 +28,7 @@ import (
// valueBuilders contains one constructor for each value type.
// Values should match if type is the same.
var valueBuilders = []func() *Value{
func() *Value {
return FromNull()
},
FromNull,
func() *Value {
return FromBool(true)
},
@ -58,9 +56,7 @@ var valueBuilders = []func() *Value{
// altValueBuilders contains one constructor for each value type.
// Values are zero values and should NOT match the values in valueBuilders, except Null type.
var altValueBuilders = []func() *Value{
func() *Value {
return FromNull()
},
FromNull,
func() *Value {
return FromBool(false)
},