enable full linter across the codebase (#9620)

enable linter using golangci-lint across
codebase to run a bunch of linters together,
we shall enable new linters as we fix more
things the codebase.

This PR fixes the first stage of this
cleanup.
This commit is contained in:
Harshavardhana
2020-05-18 09:59:45 -07:00
committed by GitHub
parent 96009975d6
commit 1bc32215b9
39 changed files with 128 additions and 239 deletions

View File

@@ -1185,7 +1185,7 @@ func (a adminAPIHandlers) OBDInfoHandler(w http.ResponseWriter, r *http.Request)
defer close(obdInfoCh)
if cpu, ok := vars["syscpu"]; ok && cpu == "true" {
cpuInfo := getLocalCPUOBDInfo(deadlinedCtx)
cpuInfo := getLocalCPUOBDInfo(deadlinedCtx, r)
obdInfo.Sys.CPUInfo = append(obdInfo.Sys.CPUInfo, cpuInfo)
obdInfo.Sys.CPUInfo = append(obdInfo.Sys.CPUInfo, globalNotificationSys.CPUOBDInfo(deadlinedCtx)...)
@@ -1193,7 +1193,7 @@ func (a adminAPIHandlers) OBDInfoHandler(w http.ResponseWriter, r *http.Request)
}
if diskHw, ok := vars["sysdiskhw"]; ok && diskHw == "true" {
diskHwInfo := getLocalDiskHwOBD(deadlinedCtx)
diskHwInfo := getLocalDiskHwOBD(deadlinedCtx, r)
obdInfo.Sys.DiskHwInfo = append(obdInfo.Sys.DiskHwInfo, diskHwInfo)
obdInfo.Sys.DiskHwInfo = append(obdInfo.Sys.DiskHwInfo, globalNotificationSys.DiskHwOBDInfo(deadlinedCtx)...)
@@ -1201,7 +1201,7 @@ func (a adminAPIHandlers) OBDInfoHandler(w http.ResponseWriter, r *http.Request)
}
if osInfo, ok := vars["sysosinfo"]; ok && osInfo == "true" {
osInfo := getLocalOsInfoOBD(deadlinedCtx)
osInfo := getLocalOsInfoOBD(deadlinedCtx, r)
obdInfo.Sys.OsInfo = append(obdInfo.Sys.OsInfo, osInfo)
obdInfo.Sys.OsInfo = append(obdInfo.Sys.OsInfo, globalNotificationSys.OsOBDInfo(deadlinedCtx)...)
@@ -1209,7 +1209,7 @@ func (a adminAPIHandlers) OBDInfoHandler(w http.ResponseWriter, r *http.Request)
}
if mem, ok := vars["sysmem"]; ok && mem == "true" {
memInfo := getLocalMemOBD(deadlinedCtx)
memInfo := getLocalMemOBD(deadlinedCtx, r)
obdInfo.Sys.MemInfo = append(obdInfo.Sys.MemInfo, memInfo)
obdInfo.Sys.MemInfo = append(obdInfo.Sys.MemInfo, globalNotificationSys.MemOBDInfo(deadlinedCtx)...)
@@ -1217,7 +1217,7 @@ func (a adminAPIHandlers) OBDInfoHandler(w http.ResponseWriter, r *http.Request)
}
if proc, ok := vars["sysprocess"]; ok && proc == "true" {
procInfo := getLocalProcOBD(deadlinedCtx)
procInfo := getLocalProcOBD(deadlinedCtx, r)
obdInfo.Sys.ProcInfo = append(obdInfo.Sys.ProcInfo, procInfo)
obdInfo.Sys.ProcInfo = append(obdInfo.Sys.ProcInfo, globalNotificationSys.ProcOBDInfo(deadlinedCtx)...)
@@ -1321,7 +1321,6 @@ func (a adminAPIHandlers) ServerInfoHandler(w http.ResponseWriter, r *http.Reque
usage = madmin.Usage{Size: dataUsageInfo.ObjectsTotalSize}
}
infoMsg := madmin.InfoMessage{}
vault := fetchVaultStatus(cfg)
ldap := madmin.LDAP{}
@@ -1426,7 +1425,7 @@ func (a adminAPIHandlers) ServerInfoHandler(w http.ResponseWriter, r *http.Reque
Notifications: notifyTarget,
}
infoMsg = madmin.InfoMessage{
infoMsg := madmin.InfoMessage{
Mode: mode,
Domain: domain,
Region: globalServerRegion,

View File

@@ -211,7 +211,7 @@ func enforceFIFOQuota(ctx context.Context, objectAPI ObjectLayer) error {
if dataUsageInfo.BucketsSizes[bucket] > cfg.Quota {
toFree = dataUsageInfo.BucketsSizes[bucket] - cfg.Quota
}
if toFree <= 0 {
if toFree == 0 {
continue
}
// Allocate new results channel to receive ObjectInfo.

View File

@@ -23,7 +23,6 @@ import (
const (
defaultTTL = 30
defaultPrefixPath = "/skydns"
defaultContextTimeout = 5 * time.Minute
)

View File

@@ -18,7 +18,9 @@ package openid
import (
"crypto"
"github.com/dgrijalva/jwt-go"
// Needed for SHA3 to work - See: https://golang.org/src/crypto/crypto.go?s=1034:1288
_ "golang.org/x/crypto/sha3"
)

View File

@@ -30,7 +30,7 @@ import (
"github.com/minio/minio/cmd/config"
"github.com/minio/minio/pkg/auth"
"github.com/minio/minio/pkg/env"
"github.com/minio/minio/pkg/iam/policy"
iampolicy "github.com/minio/minio/pkg/iam/policy"
xnet "github.com/minio/minio/pkg/net"
)

View File

@@ -18,7 +18,9 @@ package openid
import (
"crypto"
"github.com/dgrijalva/jwt-go"
// Needed for SHA3 to work - See: https://golang.org/src/crypto/crypto.go?s=1034:1288
_ "golang.org/x/crypto/sha3"
)

View File

@@ -36,7 +36,6 @@ import (
const (
formatNamespace = "namespace"
formatAccess = "access"
)
// ErrTargetsOffline - Indicates single/multiple target failures.

View File

@@ -358,7 +358,7 @@ func (f *fileScorer) adjustSaveBytes(n int64) bool {
} else {
f.saveBytes += uint64(n)
}
if f.saveBytes <= 0 {
if f.saveBytes == 0 {
f.queue.Init()
f.saveBytes = 0
return false

View File

@@ -204,7 +204,7 @@ func formatXLMigrate(export string) error {
return err
}
// Migrate successful v2 => v3, v3 is latest
version = formatXLVersionV3
// version = formatXLVersionV3
fallthrough
case formatXLVersionV3:
// v3 is the latest version, return.

View File

@@ -899,16 +899,20 @@ func (l *gcsGateway) GetObjectInfo(ctx context.Context, bucket string, object st
func (l *gcsGateway) PutObject(ctx context.Context, bucket string, key string, r *minio.PutObjReader, opts minio.ObjectOptions) (minio.ObjectInfo, error) {
data := r.Reader
nctx, cancel := context.WithCancel(ctx)
defer cancel()
// if we want to mimic S3 behavior exactly, we need to verify if bucket exists first,
// otherwise gcs will just return object not exist in case of non-existing bucket
if _, err := l.client.Bucket(bucket).Attrs(ctx); err != nil {
if _, err := l.client.Bucket(bucket).Attrs(nctx); err != nil {
logger.LogIf(ctx, err, logger.Application)
return minio.ObjectInfo{}, gcsToObjectError(err, bucket)
}
object := l.client.Bucket(bucket).Object(key)
w := object.NewWriter(ctx)
w := object.NewWriter(nctx)
// Disable "chunked" uploading in GCS client if the size of the data to be uploaded is below
// the current chunk-size of the writer. This avoids an unnecessary memory allocation.
@@ -919,7 +923,6 @@ func (l *gcsGateway) PutObject(ctx context.Context, bucket string, key string, r
if _, err := io.Copy(w, data); err != nil {
// Close the object writer upon error.
w.CloseWithError(err)
logger.LogIf(ctx, err)
return minio.ObjectInfo{}, gcsToObjectError(err, bucket, key)
}

View File

@@ -642,7 +642,6 @@ func (l *s3EncObjects) CompleteMultipartUpload(ctx context.Context, bucket, obje
for {
loi, lerr := l.s3Objects.ListObjectsV2(ctx, bucket, uploadPrefix, continuationToken, delimiter, 1000, false, startAfter)
if lerr != nil {
done = true
break
}
for _, obj := range loi.Objects {

View File

@@ -48,6 +48,8 @@ func registerMetricsRouter(router *mux.Router) {
switch prometheusAuthType(authType) {
case prometheusPublic:
metricsRouter.Handle(prometheusMetricsPath, metricsHandler())
case prometheusJWT:
fallthrough
default:
metricsRouter.Handle(prometheusMetricsPath, AuthMiddleware(metricsHandler()))
}

View File

@@ -32,12 +32,10 @@ import (
"github.com/shirou/gopsutil/process"
)
func getLocalCPUOBDInfo(ctx context.Context) madmin.ServerCPUOBDInfo {
addr := ""
func getLocalCPUOBDInfo(ctx context.Context, r *http.Request) madmin.ServerCPUOBDInfo {
addr := r.Host
if globalIsDistXL {
addr = GetLocalPeer(globalEndpoints)
} else {
addr = "minio"
}
info, err := cpuhw.InfoWithContext(ctx)
@@ -60,16 +58,15 @@ func getLocalCPUOBDInfo(ctx context.Context) madmin.ServerCPUOBDInfo {
Addr: addr,
CPUStat: info,
TimeStat: time,
Error: "",
}
}
func getLocalDrivesOBD(ctx context.Context, parallel bool, endpointZones EndpointZones, r *http.Request) madmin.ServerDrivesOBDInfo {
var drivesOBDInfo []madmin.DriveOBDInfo
wg := sync.WaitGroup{}
var wg sync.WaitGroup
for _, ep := range endpointZones {
for i, endpoint := range ep.Endpoints {
for _, endpoint := range ep.Endpoints {
// Only proceed for local endpoints
if endpoint.IsLocal {
if _, err := os.Stat(endpoint.Path); err != nil {
@@ -81,34 +78,34 @@ func getLocalDrivesOBD(ctx context.Context, parallel bool, endpointZones Endpoin
continue
}
measurePath := pathJoin(minioMetaTmpBucket, mustGetUUID())
measure := func(index int, path string) {
var driveOBDInfo madmin.DriveOBDInfo
measure := func(path string) {
defer wg.Done()
driveOBDInfo := madmin.DriveOBDInfo{
Path: path,
}
latency, throughput, err := disk.GetOBDInfo(ctx, path, pathJoin(path, measurePath))
if err != nil {
driveOBDInfo.Error = err.Error()
} else {
driveOBDInfo.Latency = latency
driveOBDInfo.Throughput = throughput
}
driveOBDInfo.Path = path
driveOBDInfo.Latency = latency
driveOBDInfo.Throughput = throughput
drivesOBDInfo = append(drivesOBDInfo, driveOBDInfo)
wg.Done()
}
wg.Add(1)
if parallel {
go measure(i, endpoint.Path)
go measure(endpoint.Path)
} else {
measure(i, endpoint.Path)
measure(endpoint.Path)
}
}
}
}
wg.Wait()
addr := ""
addr := r.Host
if globalIsDistXL {
addr = GetLocalPeer(endpointZones)
} else {
addr = "minio"
}
if parallel {
return madmin.ServerDrivesOBDInfo{
@@ -122,12 +119,10 @@ func getLocalDrivesOBD(ctx context.Context, parallel bool, endpointZones Endpoin
}
}
func getLocalMemOBD(ctx context.Context) madmin.ServerMemOBDInfo {
addr := ""
func getLocalMemOBD(ctx context.Context, r *http.Request) madmin.ServerMemOBDInfo {
addr := r.Host
if globalIsDistXL {
addr = GetLocalPeer(globalEndpoints)
} else {
addr = "minio"
}
swap, err := memhw.SwapMemoryWithContext(ctx)
@@ -150,16 +145,13 @@ func getLocalMemOBD(ctx context.Context) madmin.ServerMemOBDInfo {
Addr: addr,
SwapMem: swap,
VirtualMem: vm,
Error: "",
}
}
func getLocalProcOBD(ctx context.Context) madmin.ServerProcOBDInfo {
addr := ""
func getLocalProcOBD(ctx context.Context, r *http.Request) madmin.ServerProcOBDInfo {
addr := r.Host
if globalIsDistXL {
addr = GetLocalPeer(globalEndpoints)
} else {
addr = "minio"
}
errProcInfo := func(err error) madmin.ServerProcOBDInfo {
@@ -374,16 +366,13 @@ func getLocalProcOBD(ctx context.Context) madmin.ServerProcOBDInfo {
return madmin.ServerProcOBDInfo{
Addr: addr,
Processes: sysProcs,
Error: "",
}
}
func getLocalOsInfoOBD(ctx context.Context) madmin.ServerOsOBDInfo {
addr := ""
func getLocalOsInfoOBD(ctx context.Context, r *http.Request) madmin.ServerOsOBDInfo {
addr := r.Host
if globalIsDistXL {
addr = GetLocalPeer(globalEndpoints)
} else {
addr = "minio"
}
info, err := host.InfoWithContext(ctx)
@@ -410,6 +399,5 @@ func getLocalOsInfoOBD(ctx context.Context) madmin.ServerOsOBDInfo {
Info: info,
Sensors: sensors,
Users: users,
Error: "",
}
}

View File

@@ -19,16 +19,15 @@ package cmd
import (
"context"
"net/http"
"github.com/minio/minio/pkg/madmin"
)
func getLocalDiskHwOBD(ctx context.Context) madmin.ServerDiskHwOBDInfo {
addr := ""
func getLocalDiskHwOBD(ctx context.Context, r *http.Request) madmin.ServerDiskHwOBDInfo {
addr := r.Host
if globalIsDistXL {
addr = GetLocalPeer(globalEndpoints)
} else {
addr = "minio"
}
return madmin.ServerDiskHwOBDInfo{

View File

@@ -21,18 +21,17 @@ package cmd
import (
"context"
"net/http"
"strings"
"github.com/minio/minio/pkg/madmin"
diskhw "github.com/shirou/gopsutil/disk"
)
func getLocalDiskHwOBD(ctx context.Context) madmin.ServerDiskHwOBDInfo {
addr := ""
func getLocalDiskHwOBD(ctx context.Context, r *http.Request) madmin.ServerDiskHwOBDInfo {
addr := r.Host
if globalIsDistXL {
addr = GetLocalPeer(globalEndpoints)
} else {
addr = "minio"
}
partitions, err := diskhw.PartitionsWithContext(ctx, true)

View File

@@ -955,7 +955,6 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re
// - the object is encrypted using SSE-S3 and the SSE-S3 header is present
// - the object storage class is not changing
// then execute a key rotation.
var keyRotation bool
if cpSrcDstSame && (sseCopyC && sseC) && !chStorageClass {
oldKey, err = ParseSSECopyCustomerRequest(r.Header, srcInfo.UserDefined)
if err != nil {
@@ -977,13 +976,12 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re
// Since we are rotating the keys, make sure to update the metadata.
srcInfo.metadataOnly = true
keyRotation = true
} else {
if isSourceEncrypted || isTargetEncrypted {
// We are not only copying just metadata instead
// we are creating a new object at this point, even
// if source and destination are same objects.
if !keyRotation {
if !srcInfo.metadataOnly {
srcInfo.metadataOnly = false
}
}
@@ -1512,15 +1510,6 @@ func (api objectAPIHandlers) NewMultipartUploadHandler(w http.ResponseWriter, r
r.Header.Add(crypto.SSEHeader, crypto.SSEAlgorithmAES256)
}
// get gateway encryption options
var opts ObjectOptions
opts, err = putOpts(ctx, r, bucket, object, nil)
if err != nil {
writeErrorResponseHeadersOnly(w, toAPIError(ctx, err))
return
}
// Validate storage class metadata if present
if sc := r.Header.Get(xhttp.AmzStorageClass); sc != "" {
if !storageclass.IsValid(sc) {
@@ -1585,7 +1574,7 @@ func (api objectAPIHandlers) NewMultipartUploadHandler(w http.ResponseWriter, r
metadata[ReservedMetadataPrefix+"compression"] = compressionAlgorithmV2
}
opts, err = putOpts(ctx, r, bucket, object, metadata)
opts, err := putOpts(ctx, r, bucket, object, metadata)
if err != nil {
writeErrorResponseHeadersOnly(w, toAPIError(ctx, err))
return

View File

@@ -534,7 +534,7 @@ func (client *peerRESTClient) cycleServerBloomFilter(ctx context.Context, req bl
if err != nil {
return nil, err
}
respBody, err := client.call(peerRESTMethodCycleBloom, nil, &reader, -1)
respBody, err := client.callWithContext(ctx, peerRESTMethodCycleBloom, nil, &reader, -1)
if err != nil {
return nil, err
}

View File

@@ -493,9 +493,10 @@ func (s *peerRESTServer) CPUOBDInfoHandler(w http.ResponseWriter, r *http.Reques
return
}
ctx, cancel := context.WithCancel(newContext(r, w, "CpuOBDInfo"))
ctx, cancel := context.WithCancel(r.Context())
defer cancel()
info := getLocalCPUOBDInfo(ctx)
info := getLocalCPUOBDInfo(ctx, r)
defer w.(http.Flusher).Flush()
logger.LogIf(ctx, gob.NewEncoder(w).Encode(info))
@@ -508,9 +509,10 @@ func (s *peerRESTServer) DiskHwOBDInfoHandler(w http.ResponseWriter, r *http.Req
return
}
ctx, cancel := context.WithCancel(newContext(r, w, "DiskHwOBDInfo"))
ctx, cancel := context.WithCancel(r.Context())
defer cancel()
info := getLocalDiskHwOBD(ctx)
info := getLocalDiskHwOBD(ctx, r)
defer w.(http.Flusher).Flush()
logger.LogIf(ctx, gob.NewEncoder(w).Encode(info))
@@ -523,9 +525,10 @@ func (s *peerRESTServer) OsOBDInfoHandler(w http.ResponseWriter, r *http.Request
return
}
ctx, cancel := context.WithCancel(newContext(r, w, "OsOBDInfo"))
ctx, cancel := context.WithCancel(r.Context())
defer cancel()
info := getLocalOsInfoOBD(ctx)
info := getLocalOsInfoOBD(ctx, r)
defer w.(http.Flusher).Flush()
logger.LogIf(ctx, gob.NewEncoder(w).Encode(info))
@@ -538,9 +541,10 @@ func (s *peerRESTServer) ProcOBDInfoHandler(w http.ResponseWriter, r *http.Reque
return
}
ctx, cancel := context.WithCancel(newContext(r, w, "ProcOBDInfo"))
ctx, cancel := context.WithCancel(r.Context())
defer cancel()
info := getLocalProcOBD(ctx)
info := getLocalProcOBD(ctx, r)
defer w.(http.Flusher).Flush()
logger.LogIf(ctx, gob.NewEncoder(w).Encode(info))
@@ -553,9 +557,10 @@ func (s *peerRESTServer) MemOBDInfoHandler(w http.ResponseWriter, r *http.Reques
return
}
ctx, cancel := context.WithCancel(newContext(r, w, "MemOBDInfo"))
ctx, cancel := context.WithCancel(r.Context())
defer cancel()
info := getLocalMemOBD(ctx)
info := getLocalMemOBD(ctx, r)
defer w.(http.Flusher).Flush()
logger.LogIf(ctx, gob.NewEncoder(w).Encode(info))

View File

@@ -630,18 +630,17 @@ func TestPosixStatVol(t *testing.T) {
}
for i, testCase := range testCases {
var volInfo VolInfo
if _, ok := posixStorage.(*posixDiskIDCheck); !ok {
t.Errorf("Expected the StorageAPI to be of type *posix")
}
volInfo, err = posixStorage.StatVol(testCase.volName)
volInfo, err := posixStorage.StatVol(testCase.volName)
if err != testCase.expectedErr {
t.Fatalf("TestPosix case : %d, Expected: \"%s\", got: \"%s\"", i+1, testCase.expectedErr, err)
}
if err == nil {
if volInfo.Name != volInfo.Name {
t.Errorf("TestPosix case %d: Expected the volume name to be \"%s\", instead found \"%s\"", i+1, volInfo.Name, volInfo.Name)
if volInfo.Name != testCase.volName {
t.Errorf("TestPosix case %d: Expected the volume name to be \"%s\", instead found \"%s\"", i+1, volInfo.Name, testCase.volName)
}
}
}

View File

@@ -494,8 +494,8 @@ func testPostPolicyBucketHandlerRedirect(obj ObjectLayer, instanceType string, t
expectedLocation := redirectURL.String()
// Check the new location url
if rec.HeaderMap.Get("Location") != expectedLocation {
t.Errorf("Unexpected location, expected = %s, found = `%s`", rec.HeaderMap.Get("Location"), expectedLocation)
if rec.Header().Get("Location") != expectedLocation {
t.Errorf("Unexpected location, expected = %s, found = `%s`", rec.Header().Get("Location"), expectedLocation)
}
}

View File

@@ -45,12 +45,12 @@ func writeSTSErrorResponse(ctx context.Context, w http.ResponseWriter, errCode S
if errCtxt != nil {
stsErrorResponse.Error.Message = fmt.Sprintf("%v", errCtxt)
}
logKind := logger.All
var logKind logger.Kind
switch errCode {
case ErrSTSInternalError, ErrSTSNotInitialized:
logKind = logger.Minio
default:
logKind = logger.Application
logKind = logger.All
}
logger.LogIf(ctx, errCtxt, logKind)
encodedErrorResponse := encodeResponse(stsErrorResponse)

View File

@@ -2425,11 +2425,16 @@ func uploadTestObject(t *testing.T, apiRouter http.Handler, creds auth.Credentia
rec = httptest.NewRecorder()
apiRouter.ServeHTTP(rec, req)
checkRespErr(rec, http.StatusOK)
etag := rec.Header()["ETag"][0]
if etag == "" {
t.Fatalf("Unexpected empty etag")
header := rec.Header()
if v, ok := header["ETag"]; ok {
etag := v[0]
if etag == "" {
t.Fatalf("Unexpected empty etag")
}
cp = append(cp, CompletePart{partID, etag[1 : len(etag)-1]})
} else {
t.Fatalf("Missing etag header")
}
cp = append(cp, CompletePart{partID, etag[1 : len(etag)-1]})
}
// Call CompleteMultipart API

View File

@@ -262,7 +262,6 @@ func (xl xlObjects) crawlAndGetDataUsage(ctx context.Context, buckets []BucketIn
}
close(bucketCh)
buckets = nil
bucketResults := make(chan dataUsageEntryInfo, len(disks))
// Start async collector/saver.