upgrade golang-lint to the latest (#15600)

This commit is contained in:
Harshavardhana 2022-08-26 12:52:29 -07:00 committed by GitHub
parent d7cd857c7c
commit 433b6fa8fe
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
54 changed files with 357 additions and 305 deletions

View File

@ -20,7 +20,7 @@ jobs:
runs-on: ${{ matrix.os }} runs-on: ${{ matrix.os }}
strategy: strategy:
matrix: matrix:
go-version: [1.18.x] go-version: [1.18.x, 1.19.x]
os: [ubuntu-latest] os: [ubuntu-latest]
steps: steps:
- uses: actions/checkout@629c2de402a417ea7690ca6ce3f33229e27606a5 # v2 - uses: actions/checkout@629c2de402a417ea7690ca6ce3f33229e27606a5 # v2

View File

@ -20,7 +20,7 @@ jobs:
runs-on: ${{ matrix.os }} runs-on: ${{ matrix.os }}
strategy: strategy:
matrix: matrix:
go-version: [1.18.x] go-version: [1.18.x, 1.19.x]
os: [ubuntu-latest] os: [ubuntu-latest]
steps: steps:
- uses: actions/checkout@v2 - uses: actions/checkout@v2

View File

@ -20,7 +20,7 @@ jobs:
runs-on: ${{ matrix.os }} runs-on: ${{ matrix.os }}
strategy: strategy:
matrix: matrix:
go-version: [1.18.x] go-version: [1.18.x, 1.19.x]
os: [ubuntu-latest, windows-latest] os: [ubuntu-latest, windows-latest]
steps: steps:
- uses: actions/checkout@v2 - uses: actions/checkout@v2

View File

@ -20,7 +20,7 @@ jobs:
runs-on: ${{ matrix.os }} runs-on: ${{ matrix.os }}
strategy: strategy:
matrix: matrix:
go-version: [1.18.x] go-version: [1.18.x, 1.19.x]
os: [ubuntu-latest] os: [ubuntu-latest]
steps: steps:
- uses: actions/checkout@v2 - uses: actions/checkout@v2

View File

@ -21,7 +21,7 @@ jobs:
strategy: strategy:
matrix: matrix:
go-version: [1.18.x] go-version: [1.18.x, 1.19.x]
steps: steps:
- uses: actions/checkout@v2 - uses: actions/checkout@v2

View File

@ -20,7 +20,7 @@ jobs:
runs-on: ${{ matrix.os }} runs-on: ${{ matrix.os }}
strategy: strategy:
matrix: matrix:
go-version: [1.18.x] go-version: [1.18.x, 1.19.x]
os: [ubuntu-latest] os: [ubuntu-latest]
steps: steps:

View File

@ -1,8 +1,9 @@
linters-settings: linters-settings:
golint: golint:
min-confidence: 0 min-confidence: 0
gofumpt: gofumpt:
lang-version: "1.17" lang-version: "1.18"
# Choose whether or not to use the extra rules that are disabled # Choose whether or not to use the extra rules that are disabled
# by default # by default
@ -20,11 +21,10 @@ linters:
- govet - govet
- revive - revive
- ineffassign - ineffassign
- deadcode
- gomodguard - gomodguard
- gofmt - gofmt
- unconvert - unconvert
- varcheck - unused
- gocritic - gocritic
- gofumpt - gofumpt
- tenv - tenv

View File

@ -19,7 +19,7 @@ help: ## print this help
getdeps: ## fetch necessary dependencies getdeps: ## fetch necessary dependencies
@mkdir -p ${GOPATH}/bin @mkdir -p ${GOPATH}/bin
@echo "Installing golangci-lint" && curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(GOPATH)/bin v1.45.2 @echo "Installing golangci-lint" && curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(GOPATH)/bin
@echo "Installing msgp" && go install -v github.com/tinylib/msgp@v1.1.7-0.20211026165309-e818a1881b0e @echo "Installing msgp" && go install -v github.com/tinylib/msgp@v1.1.7-0.20211026165309-e818a1881b0e
@echo "Installing stringer" && go install -v golang.org/x/tools/cmd/stringer@latest @echo "Installing stringer" && go install -v golang.org/x/tools/cmd/stringer@latest

View File

@ -35,8 +35,8 @@ func shouldEscape(c byte) bool {
// s3URLEncode is based on Golang's url.QueryEscape() code, // s3URLEncode is based on Golang's url.QueryEscape() code,
// while considering some S3 exceptions: // while considering some S3 exceptions:
// - Avoid encoding '/' and '*' // - Avoid encoding '/' and '*'
// - Force encoding of '~' // - Force encoding of '~'
func s3URLEncode(s string) string { func s3URLEncode(s string) string {
spaceCount, hexCount := 0, 0 spaceCount, hexCount := 0, 0
for i := 0; i < len(s); i++ { for i := 0; i < len(s); i++ {

View File

@ -288,9 +288,10 @@ func checkClaimsFromToken(r *http.Request, cred auth.Credentials) (map[string]in
} }
// Check request auth type verifies the incoming http request // Check request auth type verifies the incoming http request
// - validates the request signature // - validates the request signature
// - validates the policy action if anonymous tests bucket policies if any, // - validates the policy action if anonymous tests bucket policies if any,
// for authenticated requests validates IAM policies. // for authenticated requests validates IAM policies.
//
// returns APIErrorCode if any to be replied to the client. // returns APIErrorCode if any to be replied to the client.
func checkRequestAuthType(ctx context.Context, r *http.Request, action policy.Action, bucketName, objectName string) (s3Err APIErrorCode) { func checkRequestAuthType(ctx context.Context, r *http.Request, action policy.Action, bucketName, objectName string) (s3Err APIErrorCode) {
_, _, s3Err = checkRequestAuthTypeCredential(ctx, r, action, bucketName, objectName) _, _, s3Err = checkRequestAuthTypeCredential(ctx, r, action, bucketName, objectName)
@ -298,9 +299,10 @@ func checkRequestAuthType(ctx context.Context, r *http.Request, action policy.Ac
} }
// Check request auth type verifies the incoming http request // Check request auth type verifies the incoming http request
// - validates the request signature // - validates the request signature
// - validates the policy action if anonymous tests bucket policies if any, // - validates the policy action if anonymous tests bucket policies if any,
// for authenticated requests validates IAM policies. // for authenticated requests validates IAM policies.
//
// returns APIErrorCode if any to be replied to the client. // returns APIErrorCode if any to be replied to the client.
// Additionally returns the accessKey used in the request, and if this request is by an admin. // Additionally returns the accessKey used in the request, and if this request is by an admin.
func checkRequestAuthTypeCredential(ctx context.Context, r *http.Request, action policy.Action, bucketName, objectName string) (cred auth.Credentials, owner bool, s3Err APIErrorCode) { func checkRequestAuthTypeCredential(ctx context.Context, r *http.Request, action policy.Action, bucketName, objectName string) (cred auth.Credentials, owner bool, s3Err APIErrorCode) {

View File

@ -26,9 +26,10 @@ import (
) )
// healTask represents what to heal along with options // healTask represents what to heal along with options
// path: '/' => Heal disk formats along with metadata //
// path: 'bucket/' or '/bucket/' => Heal bucket // path: '/' => Heal disk formats along with metadata
// path: 'bucket/object' => Heal object // path: 'bucket/' or '/bucket/' => Heal bucket
// path: 'bucket/object' => Heal object
type healTask struct { type healTask struct {
bucket string bucket string
object string object string

View File

@ -67,11 +67,16 @@ const (
// - Check if a bucket has an entry in etcd backend // - Check if a bucket has an entry in etcd backend
// -- If no, make an entry // -- If no, make an entry
// -- If yes, check if the entry matches local IP check if we // -- If yes, check if the entry matches local IP check if we
// need to update the entry then proceed to update //
// need to update the entry then proceed to update
//
// -- If yes, check if the IP of entry matches local IP. // -- If yes, check if the IP of entry matches local IP.
// This means entry is for this instance. //
// This means entry is for this instance.
//
// -- If IP of the entry doesn't match, this means entry is // -- If IP of the entry doesn't match, this means entry is
// for another instance. Log an error to console. //
// for another instance. Log an error to console.
func initFederatorBackend(buckets []BucketInfo, objLayer ObjectLayer) { func initFederatorBackend(buckets []BucketInfo, objLayer ObjectLayer) {
if len(buckets) == 0 { if len(buckets) == 0 {
return return
@ -227,7 +232,6 @@ func (api objectAPIHandlers) GetBucketLocationHandler(w http.ResponseWriter, r *
// using the Initiate Multipart Upload request, but has not yet been // using the Initiate Multipart Upload request, but has not yet been
// completed or aborted. This operation returns at most 1,000 multipart // completed or aborted. This operation returns at most 1,000 multipart
// uploads in the response. // uploads in the response.
//
func (api objectAPIHandlers) ListMultipartUploadsHandler(w http.ResponseWriter, r *http.Request) { func (api objectAPIHandlers) ListMultipartUploadsHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "ListMultipartUploads") ctx := newContext(r, w, "ListMultipartUploads")

View File

@ -32,9 +32,9 @@ import (
// Validate all the ListObjects query arguments, returns an APIErrorCode // Validate all the ListObjects query arguments, returns an APIErrorCode
// if one of the args do not meet the required conditions. // if one of the args do not meet the required conditions.
// Special conditions required by MinIO server are as below // Special conditions required by MinIO server are as below
// - delimiter if set should be equal to '/', otherwise the request is rejected. // - delimiter if set should be equal to '/', otherwise the request is rejected.
// - marker if set should have a common prefix with 'prefix' param, otherwise // - marker if set should have a common prefix with 'prefix' param, otherwise
// the request is rejected. // the request is rejected.
func validateListObjectsArgs(marker, delimiter, encodingType string, maxKeys int) APIErrorCode { func validateListObjectsArgs(marker, delimiter, encodingType string, maxKeys int) APIErrorCode {
// Max keys cannot be negative. // Max keys cannot be negative.
if maxKeys < 0 { if maxKeys < 0 {
@ -298,7 +298,6 @@ func proxyRequestByNodeIndex(ctx context.Context, w http.ResponseWriter, r *http
// This implementation of the GET operation returns some or all (up to 1000) // This implementation of the GET operation returns some or all (up to 1000)
// of the objects in a bucket. You can use the request parameters as selection // of the objects in a bucket. You can use the request parameters as selection
// criteria to return a subset of the objects in a bucket. // criteria to return a subset of the objects in a bucket.
//
func (api objectAPIHandlers) ListObjectsV1Handler(w http.ResponseWriter, r *http.Request) { func (api objectAPIHandlers) ListObjectsV1Handler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "ListObjectsV1") ctx := newContext(r, w, "ListObjectsV1")

View File

@ -1377,7 +1377,6 @@ type ReplicationPool struct {
mrfWorkerWg sync.WaitGroup mrfWorkerWg sync.WaitGroup
once sync.Once once sync.Once
mu sync.Mutex mu sync.Mutex
mrfMutex sync.Mutex
} }
// NewReplicationPool creates a pool of replication workers of specified size // NewReplicationPool creates a pool of replication workers of specified size

View File

@ -56,7 +56,8 @@ func storeDataUsageInBackend(ctx context.Context, objAPI ObjectLayer, dui <-chan
} }
// loadPrefixUsageFromBackend returns prefix usages found in passed buckets // loadPrefixUsageFromBackend returns prefix usages found in passed buckets
// e.g.: /testbucket/prefix => 355601334 //
// e.g.: /testbucket/prefix => 355601334
func loadPrefixUsageFromBackend(ctx context.Context, objAPI ObjectLayer, bucket string) (map[string]uint64, error) { func loadPrefixUsageFromBackend(ctx context.Context, objAPI ObjectLayer, bucket string) (map[string]uint64, error) {
z, ok := objAPI.(*erasureServerPools) z, ok := objAPI.(*erasureServerPools)
if !ok { if !ok {

View File

@ -45,10 +45,10 @@ type DummyDataGen struct {
// //
// Given the function: // Given the function:
// //
// f := func(r io.Reader) string { // f := func(r io.Reader) string {
// b, _ := ioutil.ReadAll(r) // b, _ := ioutil.ReadAll(r)
// return string(b) // return string(b)
// } // }
// //
// for example, the following is true: // for example, the following is true:
// //

View File

@ -231,8 +231,8 @@ func getLatestFileInfo(ctx context.Context, partsMetadata []FileInfo, defaultPar
// //
// - disks which have all parts specified in the latest xl.meta. // - disks which have all parts specified in the latest xl.meta.
// //
// - slice of errors about the state of data files on disk - can have // - slice of errors about the state of data files on disk - can have
// a not-found error or a hash-mismatch error. // a not-found error or a hash-mismatch error.
func disksWithAllParts(ctx context.Context, onlineDisks []StorageAPI, partsMetadata []FileInfo, func disksWithAllParts(ctx context.Context, onlineDisks []StorageAPI, partsMetadata []FileInfo,
errs []error, latestMeta FileInfo, bucket, object string, errs []error, latestMeta FileInfo, bucket, object string,
scanMode madmin.HealScanMode) ([]StorageAPI, []error, time.Time, scanMode madmin.HealScanMode) ([]StorageAPI, []error, time.Time,

View File

@ -652,6 +652,7 @@ func (z *erasureServerPools) decommissionObject(ctx context.Context, bucket stri
} }
// versionsSorter sorts FileInfo slices by version. // versionsSorter sorts FileInfo slices by version.
//
//msgp:ignore versionsSorter //msgp:ignore versionsSorter
type versionsSorter []FileInfo type versionsSorter []FileInfo

View File

@ -371,9 +371,10 @@ func migrateCacheData(ctx context.Context, c *diskCache, bucket, object, oldfile
// migrate cache contents from old cacheFS format to new backend format // migrate cache contents from old cacheFS format to new backend format
// new format is flat // new format is flat
// sha(bucket,object)/ <== dir name //
// - part.1 <== data // sha(bucket,object)/ <== dir name
// - cache.json <== metadata // - part.1 <== data
// - cache.json <== metadata
func migrateOldCache(ctx context.Context, c *diskCache) error { func migrateOldCache(ctx context.Context, c *diskCache) error {
oldCacheBucketsPath := path.Join(c.dir, minioMetaBucket, "buckets") oldCacheBucketsPath := path.Join(c.dir, minioMetaBucket, "buckets")
cacheFormatPath := pathJoin(c.dir, minioMetaBucket, formatConfigFile) cacheFormatPath := pathJoin(c.dir, minioMetaBucket, formatConfigFile)

View File

@ -253,13 +253,13 @@ func genFormatCacheInvalidDisksOrder() []*formatCacheV2 {
} }
// Wrapper for calling FormatCache tests - validates // Wrapper for calling FormatCache tests - validates
// - valid format // - valid format
// - unrecognized version number // - unrecognized version number
// - unrecognized format tag // - unrecognized format tag
// - unrecognized cache version // - unrecognized cache version
// - wrong number of Disks entries // - wrong number of Disks entries
// - invalid This uuid // - invalid This uuid
// - invalid Disks order // - invalid Disks order
func TestFormatCache(t *testing.T) { func TestFormatCache(t *testing.T) {
formatInputCases := [][]*formatCacheV1{ formatInputCases := [][]*formatCacheV1{
genFormatCacheValid(), genFormatCacheValid(),

View File

@ -100,12 +100,12 @@ func (t *TreeWalkPool) Release(params listParams) (resultCh chan TreeWalkResult,
// Set - adds a treeWalk to the treeWalkPool. // Set - adds a treeWalk to the treeWalkPool.
// Also starts a timer go-routine that ends when: // Also starts a timer go-routine that ends when:
// 1) time.After() expires after t.timeOut seconds. // 1. time.After() expires after t.timeOut seconds.
// The expiration is needed so that the treeWalk go-routine resources are freed after a timeout // The expiration is needed so that the treeWalk go-routine resources are freed after a timeout
// if the S3 client does only partial listing of objects. // if the S3 client does only partial listing of objects.
// 2) Release() signals the timer go-routine to end on endTimerCh. // 2. Release() signals the timer go-routine to end on endTimerCh.
// During listing the timer should not timeout and end the treeWalk go-routine, hence the // During listing the timer should not timeout and end the treeWalk go-routine, hence the
// timer go-routine should be ended. // timer go-routine should be ended.
func (t *TreeWalkPool) Set(params listParams, resultCh chan TreeWalkResult, endWalkCh chan struct{}) { func (t *TreeWalkPool) Set(params listParams, resultCh chan TreeWalkResult, endWalkCh chan struct{}) {
t.mu.Lock() t.mu.Lock()
defer t.mu.Unlock() defer t.mu.Unlock()

View File

@ -139,10 +139,10 @@ func (fsi *fsIOPool) Open(path string) (*lock.RLockedFile, error) {
} }
// Write - Attempt to lock the file if it exists, // Write - Attempt to lock the file if it exists,
// - if the file exists. Then we try to get a write lock this // - if the file exists. Then we try to get a write lock this
// will block if we can't get a lock perhaps another write // will block if we can't get a lock perhaps another write
// or read is in progress. Concurrent calls are protected // or read is in progress. Concurrent calls are protected
// by the global namspace lock within the same process. // by the global namspace lock within the same process.
func (fsi *fsIOPool) Write(path string) (wlk *lock.LockedFile, err error) { func (fsi *fsIOPool) Write(path string) (wlk *lock.LockedFile, err error) {
if err = checkPathLength(path); err != nil { if err = checkPathLength(path); err != nil {
return nil, err return nil, err

View File

@ -37,18 +37,17 @@ import (
// will cache that Provider for all calls to IsExpired(), until Retrieve is // will cache that Provider for all calls to IsExpired(), until Retrieve is
// called again after IsExpired() is true. // called again after IsExpired() is true.
// //
// creds := credentials.NewChainCredentials( // creds := credentials.NewChainCredentials(
// []credentials.Provider{ // []credentials.Provider{
// &credentials.EnvAWSS3{}, // &credentials.EnvAWSS3{},
// &credentials.EnvMinio{}, // &credentials.EnvMinio{},
// }) // })
//
// // Usage of ChainCredentials.
// mc, err := minio.NewWithCredentials(endpoint, creds, secure, "us-east-1")
// if err != nil {
// log.Fatalln(err)
// }
// //
// // Usage of ChainCredentials.
// mc, err := minio.NewWithCredentials(endpoint, creds, secure, "us-east-1")
// if err != nil {
// log.Fatalln(err)
// }
type Chain struct { type Chain struct {
Providers []credentials.Provider Providers []credentials.Provider
curr credentials.Provider curr credentials.Provider

View File

@ -140,21 +140,21 @@ func randString(n int, src rand.Source, prefix string) string {
} }
// Chains all credential types, in the following order: // Chains all credential types, in the following order:
// - AWS env vars (i.e. AWS_ACCESS_KEY_ID) // - AWS env vars (i.e. AWS_ACCESS_KEY_ID)
// - AWS creds file (i.e. AWS_SHARED_CREDENTIALS_FILE or ~/.aws/credentials) // - AWS creds file (i.e. AWS_SHARED_CREDENTIALS_FILE or ~/.aws/credentials)
// - Static credentials provided by user (i.e. MINIO_ROOT_USER/MINIO_ACCESS_KEY) // - Static credentials provided by user (i.e. MINIO_ROOT_USER/MINIO_ACCESS_KEY)
var defaultProviders = []credentials.Provider{ var defaultProviders = []credentials.Provider{
&credentials.EnvAWS{}, &credentials.EnvAWS{},
&credentials.FileAWSCredentials{}, &credentials.FileAWSCredentials{},
} }
// Chains all credential types, in the following order: // Chains all credential types, in the following order:
// - AWS env vars (i.e. AWS_ACCESS_KEY_ID) // - AWS env vars (i.e. AWS_ACCESS_KEY_ID)
// - AWS creds file (i.e. AWS_SHARED_CREDENTIALS_FILE or ~/.aws/credentials) // - AWS creds file (i.e. AWS_SHARED_CREDENTIALS_FILE or ~/.aws/credentials)
// - IAM profile based credentials. (performs an HTTP // - IAM profile based credentials. (performs an HTTP
// call to a pre-defined endpoint, only valid inside // call to a pre-defined endpoint, only valid inside
// configured ec2 instances) // configured ec2 instances)
// - Static credentials provided by user (i.e. MINIO_ROOT_USER/MINIO_ACCESS_KEY) // - Static credentials provided by user (i.e. MINIO_ROOT_USER/MINIO_ACCESS_KEY)
var defaultAWSCredProviders = []credentials.Provider{ var defaultAWSCredProviders = []credentials.Provider{
&credentials.EnvAWS{}, &credentials.EnvAWS{},
&credentials.FileAWSCredentials{}, &credentials.FileAWSCredentials{},

View File

@ -48,10 +48,11 @@ func etcdKvsToSet(prefix string, kvs []*mvccpb.KeyValue) set.StringSet {
// Extract path string by stripping off the `prefix` value and the suffix, // Extract path string by stripping off the `prefix` value and the suffix,
// value, usually in the following form. // value, usually in the following form.
// s := "config/iam/users/foo/config.json" //
// prefix := "config/iam/users/" // s := "config/iam/users/foo/config.json"
// suffix := "config.json" // prefix := "config/iam/users/"
// result is foo // suffix := "config.json"
// result is foo
func extractPathPrefixAndSuffix(s string, prefix string, suffix string) string { func extractPathPrefixAndSuffix(s string, prefix string, suffix string) string {
return pathClean(strings.TrimSuffix(strings.TrimPrefix(s, prefix), suffix)) return pathClean(strings.TrimSuffix(strings.TrimPrefix(s, prefix), suffix))
} }

View File

@ -322,7 +322,8 @@ func isLocalHost(host string, port string, localPort string) (bool, error) {
// sameLocalAddrs - returns true if two addresses, even with different // sameLocalAddrs - returns true if two addresses, even with different
// formats, point to the same machine, e.g: // formats, point to the same machine, e.g:
// ':9000' and 'http://localhost:9000/' will return true //
// ':9000' and 'http://localhost:9000/' will return true
func sameLocalAddrs(addr1, addr2 string) (bool, error) { func sameLocalAddrs(addr1, addr2 string) (bool, error) {
// Extract host & port from given parameters // Extract host & port from given parameters
host1, port1, err := extractHostPort(addr1) host1, port1, err := extractHostPort(addr1)

View File

@ -1125,9 +1125,9 @@ func (sys *NotificationSys) GetClusterMetrics(ctx context.Context) <-chan Metric
// 'freeze' is 'false' would resume all S3 API calls again. // 'freeze' is 'false' would resume all S3 API calls again.
// NOTE: once a tenant is frozen either two things needs to // NOTE: once a tenant is frozen either two things needs to
// happen before resuming normal operations. // happen before resuming normal operations.
// - Server needs to be restarted 'mc admin service restart' // - Server needs to be restarted 'mc admin service restart'
// - 'freeze' should be set to 'false' for this call // - 'freeze' should be set to 'false' for this call
// to resume normal operations. // to resume normal operations.
func (sys *NotificationSys) ServiceFreeze(ctx context.Context, freeze bool) []NotificationPeerErr { func (sys *NotificationSys) ServiceFreeze(ctx context.Context, freeze bool) []NotificationPeerErr {
serviceSig := serviceUnFreeze serviceSig := serviceUnFreeze
if freeze { if freeze {

View File

@ -33,20 +33,22 @@ var etagRegex = regexp.MustCompile("\"*?([^\"]*?)\"*?$")
// Validates the preconditions for CopyObjectPart, returns true if CopyObjectPart // Validates the preconditions for CopyObjectPart, returns true if CopyObjectPart
// operation should not proceed. Preconditions supported are: // operation should not proceed. Preconditions supported are:
// x-amz-copy-source-if-modified-since //
// x-amz-copy-source-if-unmodified-since // x-amz-copy-source-if-modified-since
// x-amz-copy-source-if-match // x-amz-copy-source-if-unmodified-since
// x-amz-copy-source-if-none-match // x-amz-copy-source-if-match
// x-amz-copy-source-if-none-match
func checkCopyObjectPartPreconditions(ctx context.Context, w http.ResponseWriter, r *http.Request, objInfo ObjectInfo) bool { func checkCopyObjectPartPreconditions(ctx context.Context, w http.ResponseWriter, r *http.Request, objInfo ObjectInfo) bool {
return checkCopyObjectPreconditions(ctx, w, r, objInfo) return checkCopyObjectPreconditions(ctx, w, r, objInfo)
} }
// Validates the preconditions for CopyObject, returns true if CopyObject operation should not proceed. // Validates the preconditions for CopyObject, returns true if CopyObject operation should not proceed.
// Preconditions supported are: // Preconditions supported are:
// x-amz-copy-source-if-modified-since //
// x-amz-copy-source-if-unmodified-since // x-amz-copy-source-if-modified-since
// x-amz-copy-source-if-match // x-amz-copy-source-if-unmodified-since
// x-amz-copy-source-if-none-match // x-amz-copy-source-if-match
// x-amz-copy-source-if-none-match
func checkCopyObjectPreconditions(ctx context.Context, w http.ResponseWriter, r *http.Request, objInfo ObjectInfo) bool { func checkCopyObjectPreconditions(ctx context.Context, w http.ResponseWriter, r *http.Request, objInfo ObjectInfo) bool {
// Return false for methods other than GET and HEAD. // Return false for methods other than GET and HEAD.
if r.Method != http.MethodPut { if r.Method != http.MethodPut {
@ -128,10 +130,11 @@ func checkCopyObjectPreconditions(ctx context.Context, w http.ResponseWriter, r
// Validates the preconditions. Returns true if GET/HEAD operation should not proceed. // Validates the preconditions. Returns true if GET/HEAD operation should not proceed.
// Preconditions supported are: // Preconditions supported are:
// If-Modified-Since //
// If-Unmodified-Since // If-Modified-Since
// If-Match // If-Unmodified-Since
// If-None-Match // If-Match
// If-None-Match
func checkPreconditions(ctx context.Context, w http.ResponseWriter, r *http.Request, objInfo ObjectInfo, opts ObjectOptions) bool { func checkPreconditions(ctx context.Context, w http.ResponseWriter, r *http.Request, objInfo ObjectInfo, opts ObjectOptions) bool {
// Return false for methods other than GET and HEAD. // Return false for methods other than GET and HEAD.
if r.Method != http.MethodGet && r.Method != http.MethodHead { if r.Method != http.MethodGet && r.Method != http.MethodHead {

View File

@ -3429,7 +3429,8 @@ func testAPIPutObjectPartHandlerStreaming(obj ObjectLayer, instanceType, bucketN
} }
// TestAPIPutObjectPartHandler - Tests validate the response of PutObjectPart HTTP handler // TestAPIPutObjectPartHandler - Tests validate the response of PutObjectPart HTTP handler
// for variety of inputs. //
// for variety of inputs.
func TestAPIPutObjectPartHandler(t *testing.T) { func TestAPIPutObjectPartHandler(t *testing.T) {
defer DetectTestLeak(t)() defer DetectTestLeak(t)()
ExecExtendedObjectLayerAPITest(t, testAPIPutObjectPartHandler, []string{"PutObjectPart"}) ExecExtendedObjectLayerAPITest(t, testAPIPutObjectPartHandler, []string{"PutObjectPart"})
@ -3743,7 +3744,8 @@ func testAPIPutObjectPartHandler(obj ObjectLayer, instanceType, bucketName strin
} }
// TestAPIListObjectPartsHandlerPreSign - Tests validate the response of ListObjectParts HTTP handler // TestAPIListObjectPartsHandlerPreSign - Tests validate the response of ListObjectParts HTTP handler
// when signature type of the HTTP request is `Presigned`. //
// when signature type of the HTTP request is `Presigned`.
func TestAPIListObjectPartsHandlerPreSign(t *testing.T) { func TestAPIListObjectPartsHandlerPreSign(t *testing.T) {
defer DetectTestLeak(t)() defer DetectTestLeak(t)()
ExecObjectLayerAPITest(t, testAPIListObjectPartsHandlerPreSign, ExecObjectLayerAPITest(t, testAPIListObjectPartsHandlerPreSign,
@ -3832,7 +3834,8 @@ func testAPIListObjectPartsHandlerPreSign(obj ObjectLayer, instanceType, bucketN
} }
// TestAPIListObjectPartsHandler - Tests validate the response of ListObjectParts HTTP handler // TestAPIListObjectPartsHandler - Tests validate the response of ListObjectParts HTTP handler
// for variety of success/failure cases. //
// for variety of success/failure cases.
func TestAPIListObjectPartsHandler(t *testing.T) { func TestAPIListObjectPartsHandler(t *testing.T) {
defer DetectTestLeak(t)() defer DetectTestLeak(t)()
ExecExtendedObjectLayerAPITest(t, testAPIListObjectPartsHandler, []string{"ListObjectParts"}) ExecExtendedObjectLayerAPITest(t, testAPIListObjectPartsHandler, []string{"ListObjectParts"})

View File

@ -50,7 +50,8 @@ const (
) )
// splitZipExtensionPath splits the S3 path to the zip file and the path inside the zip: // splitZipExtensionPath splits the S3 path to the zip file and the path inside the zip:
// e.g /path/to/archive.zip/backup-2021/myimage.png => /path/to/archive.zip, backup/myimage.png //
// e.g /path/to/archive.zip/backup-2021/myimage.png => /path/to/archive.zip, backup/myimage.png
func splitZipExtensionPath(input string) (zipPath, object string, err error) { func splitZipExtensionPath(input string) (zipPath, object string, err error) {
idx := strings.Index(input, archivePattern) idx := strings.Index(input, archivePattern)
if idx < 0 { if idx < 0 {

View File

@ -107,7 +107,8 @@ func unescapeQueries(encodedQuery string) (unescapedQueries []string, err error)
} }
// doesPresignV2SignatureMatch - Verify query headers with presigned signature // doesPresignV2SignatureMatch - Verify query headers with presigned signature
// - http://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html#RESTAuthenticationQueryStringAuth // - http://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html#RESTAuthenticationQueryStringAuth
//
// returns ErrNone if matches. S3 errors otherwise. // returns ErrNone if matches. S3 errors otherwise.
func doesPresignV2SignatureMatch(r *http.Request) APIErrorCode { func doesPresignV2SignatureMatch(r *http.Request) APIErrorCode {
// r.RequestURI will have raw encoded URI as sent by the client. // r.RequestURI will have raw encoded URI as sent by the client.

View File

@ -171,12 +171,12 @@ type preSignValues struct {
// Parses signature version '4' query string of the following form. // Parses signature version '4' query string of the following form.
// //
// querystring = X-Amz-Algorithm=algorithm // querystring = X-Amz-Algorithm=algorithm
// querystring += &X-Amz-Credential= urlencode(accessKey + '/' + credential_scope) // querystring += &X-Amz-Credential= urlencode(accessKey + '/' + credential_scope)
// querystring += &X-Amz-Date=date // querystring += &X-Amz-Date=date
// querystring += &X-Amz-Expires=timeout interval // querystring += &X-Amz-Expires=timeout interval
// querystring += &X-Amz-SignedHeaders=signed_headers // querystring += &X-Amz-SignedHeaders=signed_headers
// querystring += &X-Amz-Signature=signature // querystring += &X-Amz-Signature=signature
// //
// verifies if any of the necessary query params are missing in the presigned request. // verifies if any of the necessary query params are missing in the presigned request.
func doesV4PresignParamsExist(query url.Values) APIErrorCode { func doesV4PresignParamsExist(query url.Values) APIErrorCode {
@ -251,9 +251,8 @@ func parsePreSignV4(query url.Values, region string, stype serviceType) (psv pre
// Parses signature version '4' header of the following form. // Parses signature version '4' header of the following form.
// //
// Authorization: algorithm Credential=accessKeyID/credScope, \ // Authorization: algorithm Credential=accessKeyID/credScope, \
// SignedHeaders=signedHeaders, Signature=signature // SignedHeaders=signedHeaders, Signature=signature
//
func parseSignV4(v4Auth string, region string, stype serviceType) (sv signValues, aec APIErrorCode) { func parseSignV4(v4Auth string, region string, stype serviceType) (sv signValues, aec APIErrorCode) {
// credElement is fetched first to skip replacing the space in access key. // credElement is fetched first to skip replacing the space in access key.
credElement := strings.TrimPrefix(strings.Split(strings.TrimSpace(v4Auth), ",")[0], signV4Algorithm) credElement := strings.TrimPrefix(strings.Split(strings.TrimSpace(v4Auth), ",")[0], signV4Algorithm)

View File

@ -82,10 +82,11 @@ func validateCredentialfields(t *testing.T, testNum int, expectedCredentials cre
// A valid format of creadential should be of the following format. // A valid format of creadential should be of the following format.
// Credential = accessKey + SlashSeparator+ scope // Credential = accessKey + SlashSeparator+ scope
// where scope = string.Join([]string{ currTime.Format(yyyymmdd), // where scope = string.Join([]string{ currTime.Format(yyyymmdd),
// globalMinioDefaultRegion, //
// "s3", // globalMinioDefaultRegion,
// "aws4_request", // "s3",
// },SlashSeparator) // "aws4_request",
// },SlashSeparator)
func TestParseCredentialHeader(t *testing.T) { func TestParseCredentialHeader(t *testing.T) {
sampleTimeStr := UTCNow().Format(yyyymmdd) sampleTimeStr := UTCNow().Format(yyyymmdd)

View File

@ -95,13 +95,13 @@ func getSignedHeaders(signedHeaders http.Header) string {
// getCanonicalRequest generate a canonical request of style // getCanonicalRequest generate a canonical request of style
// //
// canonicalRequest = // canonicalRequest =
// <HTTPMethod>\n
// <CanonicalURI>\n
// <CanonicalQueryString>\n
// <CanonicalHeaders>\n
// <SignedHeaders>\n
// <HashedPayload>
// //
// <HTTPMethod>\n
// <CanonicalURI>\n
// <CanonicalQueryString>\n
// <CanonicalHeaders>\n
// <SignedHeaders>\n
// <HashedPayload>
func getCanonicalRequest(extractedSignedHeaders http.Header, payload, queryStr, urlPath, method string) string { func getCanonicalRequest(extractedSignedHeaders http.Header, payload, queryStr, urlPath, method string) string {
rawQuery := strings.ReplaceAll(queryStr, "+", "%20") rawQuery := strings.ReplaceAll(queryStr, "+", "%20")
encodedPath := s3utils.EncodePath(urlPath) encodedPath := s3utils.EncodePath(urlPath)
@ -169,7 +169,8 @@ func compareSignatureV4(sig1, sig2 string) bool {
} }
// doesPolicySignatureMatch - Verify query headers with post policy // doesPolicySignatureMatch - Verify query headers with post policy
// - http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-HTTPPOSTConstructPolicy.html // - http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-HTTPPOSTConstructPolicy.html
//
// returns ErrNone if the signature matches. // returns ErrNone if the signature matches.
func doesPolicySignatureV4Match(formValues http.Header) (auth.Credentials, APIErrorCode) { func doesPolicySignatureV4Match(formValues http.Header) (auth.Credentials, APIErrorCode) {
// Server region. // Server region.
@ -203,7 +204,8 @@ func doesPolicySignatureV4Match(formValues http.Header) (auth.Credentials, APIEr
} }
// doesPresignedSignatureMatch - Verify query headers with presigned signature // doesPresignedSignatureMatch - Verify query headers with presigned signature
// - http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html // - http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html
//
// returns ErrNone if the signature matches. // returns ErrNone if the signature matches.
func doesPresignedSignatureMatch(hashedPayload string, r *http.Request, region string, stype serviceType) APIErrorCode { func doesPresignedSignatureMatch(hashedPayload string, r *http.Request, region string, stype serviceType) APIErrorCode {
// Copy request // Copy request
@ -329,7 +331,8 @@ func doesPresignedSignatureMatch(hashedPayload string, r *http.Request, region s
} }
// doesSignatureMatch - Verify authorization header with calculated header in accordance with // doesSignatureMatch - Verify authorization header with calculated header in accordance with
// - http://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html // - http://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html
//
// returns ErrNone if signature matches. // returns ErrNone if signature matches.
func doesSignatureMatch(hashedPayload string, r *http.Request, region string, stype serviceType) APIErrorCode { func doesSignatureMatch(hashedPayload string, r *http.Request, region string, stype serviceType) APIErrorCode {
// Copy request. // Copy request.

View File

@ -22,6 +22,7 @@ import (
) )
// DeleteOptions represents the disk level delete options available for the APIs // DeleteOptions represents the disk level delete options available for the APIs
//
//msgp:ignore DeleteOptions //msgp:ignore DeleteOptions
type DeleteOptions struct { type DeleteOptions struct {
Recursive bool Recursive bool
@ -32,8 +33,11 @@ type DeleteOptions struct {
// DiskInfo is an extended type which returns current // DiskInfo is an extended type which returns current
// disk usage per path. // disk usage per path.
//msgp:tuple DiskInfo
// The above means that any added/deleted fields are incompatible. // The above means that any added/deleted fields are incompatible.
//
// The above means that any added/deleted fields are incompatible.
//
//msgp:tuple DiskInfo
type DiskInfo struct { type DiskInfo struct {
Total uint64 Total uint64
Free uint64 Free uint64
@ -65,8 +69,11 @@ type DiskMetrics struct {
type VolsInfo []VolInfo type VolsInfo []VolInfo
// VolInfo - represents volume stat information. // VolInfo - represents volume stat information.
//msgp:tuple VolInfo
// The above means that any added/deleted fields are incompatible. // The above means that any added/deleted fields are incompatible.
//
// The above means that any added/deleted fields are incompatible.
//
//msgp:tuple VolInfo
type VolInfo struct { type VolInfo struct {
// Name of the volume. // Name of the volume.
Name string Name string
@ -77,6 +84,8 @@ type VolInfo struct {
// FilesInfo represent a list of files, additionally // FilesInfo represent a list of files, additionally
// indicates if the list is last. // indicates if the list is last.
//
//msgp:tuple FileInfo
type FilesInfo struct { type FilesInfo struct {
Files []FileInfo Files []FileInfo
IsTruncated bool IsTruncated bool
@ -91,8 +100,11 @@ func (f FileInfoVersions) Size() (size int64) {
} }
// FileInfoVersions represent a list of versions for a given file. // FileInfoVersions represent a list of versions for a given file.
//msgp:tuple FileInfoVersions
// The above means that any added/deleted fields are incompatible. // The above means that any added/deleted fields are incompatible.
//
// The above means that any added/deleted fields are incompatible.
//
//msgp:tuple FileInfoVersions
type FileInfoVersions struct { type FileInfoVersions struct {
// Name of the volume. // Name of the volume.
Volume string `msg:"v,omitempty"` Volume string `msg:"v,omitempty"`
@ -136,7 +148,6 @@ type RawFileInfo struct {
} }
// FileInfo - represents file stat information. // FileInfo - represents file stat information.
//msgp:tuple FileInfo
// The above means that any added/deleted fields are incompatible. // The above means that any added/deleted fields are incompatible.
// Make sure to bump the internode version at storage-rest-common.go // Make sure to bump the internode version at storage-rest-common.go
type FileInfo struct { type FileInfo struct {
@ -235,10 +246,10 @@ func (fi FileInfo) Equals(ofi FileInfo) (ok bool) {
} }
// GetDataDir returns an expected dataDir given FileInfo // GetDataDir returns an expected dataDir given FileInfo
// - deleteMarker returns "delete-marker" // - deleteMarker returns "delete-marker"
// - returns "legacy" if FileInfo is XLV1 and DataDir is // - returns "legacy" if FileInfo is XLV1 and DataDir is
// empty, returns DataDir otherwise // empty, returns DataDir otherwise
// - returns "dataDir" // - returns "dataDir"
func (fi FileInfo) GetDataDir() string { func (fi FileInfo) GetDataDir() string {
if fi.Deleted { if fi.Deleted {
return "delete-marker" return "delete-marker"

View File

@ -63,7 +63,8 @@ func getChunkSignature(cred auth.Credentials, seedSignature string, region strin
} }
// calculateSeedSignature - Calculate seed signature in accordance with // calculateSeedSignature - Calculate seed signature in accordance with
// - http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html // - http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html
//
// returns signature, error otherwise if the signature mismatches or any other // returns signature, error otherwise if the signature mismatches or any other
// error while parsing and validating. // error while parsing and validating.
func calculateSeedSignature(r *http.Request) (cred auth.Credentials, signature string, region string, date time.Time, errCode APIErrorCode) { func calculateSeedSignature(r *http.Request) (cred auth.Credentials, signature string, region string, date time.Time, errCode APIErrorCode) {
@ -195,7 +196,8 @@ func (cr *s3ChunkedReader) Close() (err error) {
// Now, we read one chunk from the underlying reader. // Now, we read one chunk from the underlying reader.
// A chunk has the following format: // A chunk has the following format:
// <chunk-size-as-hex> + ";chunk-signature=" + <signature-as-hex> + "\r\n" + <payload> + "\r\n" //
// <chunk-size-as-hex> + ";chunk-signature=" + <signature-as-hex> + "\r\n" + <payload> + "\r\n"
// //
// First, we read the chunk size but fail if it is larger // First, we read the chunk size but fail if it is larger
// than 16 MiB. We must not accept arbitrary large chunks. // than 16 MiB. We must not accept arbitrary large chunks.
@ -414,7 +416,8 @@ const s3ChunkSignatureStr = ";chunk-signature="
// parses3ChunkExtension removes any s3 specific chunk-extension from buf. // parses3ChunkExtension removes any s3 specific chunk-extension from buf.
// For example, // For example,
// "10000;chunk-signature=..." => "10000", "chunk-signature=..." //
// "10000;chunk-signature=..." => "10000", "chunk-signature=..."
func parseS3ChunkExtension(buf []byte) ([]byte, []byte) { func parseS3ChunkExtension(buf []byte) ([]byte, []byte) {
buf = trimTrailingWhitespace(buf) buf = trimTrailingWhitespace(buf)
semi := bytes.Index(buf, []byte(s3ChunkSignatureStr)) semi := bytes.Index(buf, []byte(s3ChunkSignatureStr))

View File

@ -522,7 +522,8 @@ func (sts *stsAPIHandlers) AssumeRoleWithSSO(w http.ResponseWriter, r *http.Requ
// Connect-compatible identity provider. // Connect-compatible identity provider.
// //
// Eg:- // Eg:-
// $ curl https://minio:9000/?Action=AssumeRoleWithWebIdentity&WebIdentityToken=<jwt> //
// $ curl https://minio:9000/?Action=AssumeRoleWithWebIdentity&WebIdentityToken=<jwt>
func (sts *stsAPIHandlers) AssumeRoleWithWebIdentity(w http.ResponseWriter, r *http.Request) { func (sts *stsAPIHandlers) AssumeRoleWithWebIdentity(w http.ResponseWriter, r *http.Request) {
sts.AssumeRoleWithSSO(w, r) sts.AssumeRoleWithSSO(w, r)
} }
@ -531,7 +532,8 @@ func (sts *stsAPIHandlers) AssumeRoleWithWebIdentity(w http.ResponseWriter, r *h
// OAuth2.0 client credential grants. // OAuth2.0 client credential grants.
// //
// Eg:- // Eg:-
// $ curl https://minio:9000/?Action=AssumeRoleWithClientGrants&Token=<jwt> //
// $ curl https://minio:9000/?Action=AssumeRoleWithClientGrants&Token=<jwt>
func (sts *stsAPIHandlers) AssumeRoleWithClientGrants(w http.ResponseWriter, r *http.Request) { func (sts *stsAPIHandlers) AssumeRoleWithClientGrants(w http.ResponseWriter, r *http.Request) {
sts.AssumeRoleWithSSO(w, r) sts.AssumeRoleWithSSO(w, r)
} }

View File

@ -127,19 +127,17 @@ func TestMain(m *testing.M) {
// concurrency level for certain parallel tests. // concurrency level for certain parallel tests.
const testConcurrencyLevel = 10 const testConcurrencyLevel = 10
//
// Excerpts from @lsegal - https://github.com/aws/aws-sdk-js/issues/659#issuecomment-120477258 // Excerpts from @lsegal - https://github.com/aws/aws-sdk-js/issues/659#issuecomment-120477258
// //
// User-Agent: // User-Agent:
// //
// This is ignored from signing because signing this causes problems with generating pre-signed URLs // This is ignored from signing because signing this causes problems with generating pre-signed URLs
// (that are executed by other agents) or when customers pass requests through proxies, which may // (that are executed by other agents) or when customers pass requests through proxies, which may
// modify the user-agent. // modify the user-agent.
// //
// Authorization: // Authorization:
//
// Is skipped for obvious reasons
// //
// Is skipped for obvious reasons
var ignoredHeaders = map[string]bool{ var ignoredHeaders = map[string]bool{
"Authorization": true, "Authorization": true,
"User-Agent": true, "User-Agent": true,
@ -302,8 +300,9 @@ func isSameType(obj1, obj2 interface{}) bool {
// TestServer encapsulates an instantiation of a MinIO instance with a temporary backend. // TestServer encapsulates an instantiation of a MinIO instance with a temporary backend.
// Example usage: // Example usage:
// s := StartTestServer(t,"Erasure") //
// defer s.Stop() // s := StartTestServer(t,"Erasure")
// defer s.Stop()
type TestServer struct { type TestServer struct {
Root string Root string
Disks EndpointServerPools Disks EndpointServerPools
@ -1563,11 +1562,14 @@ func prepareTestBackend(ctx context.Context, instanceType string) (ObjectLayer,
// response for anonymous/unsigned and unknown signature type HTTP request. // response for anonymous/unsigned and unknown signature type HTTP request.
// Here is the brief description of some of the arguments to the function below. // Here is the brief description of some of the arguments to the function below.
// apiRouter - http.Handler with the relevant API endPoint (API endPoint under test) registered. //
// anonReq - unsigned *http.Request to invoke the handler's response for anonymous requests. // apiRouter - http.Handler with the relevant API endPoint (API endPoint under test) registered.
// policyFunc - function to return bucketPolicy statement which would permit the anonymous request to be served. // anonReq - unsigned *http.Request to invoke the handler's response for anonymous requests.
// policyFunc - function to return bucketPolicy statement which would permit the anonymous request to be served.
//
// The test works in 2 steps, here is the description of the steps. // The test works in 2 steps, here is the description of the steps.
// STEP 1: Call the handler with the unsigned HTTP request (anonReq), assert for the `ErrAccessDenied` error response. //
// STEP 1: Call the handler with the unsigned HTTP request (anonReq), assert for the `ErrAccessDenied` error response.
func ExecObjectLayerAPIAnonTest(t *testing.T, obj ObjectLayer, testName, bucketName, objectName, instanceType string, apiRouter http.Handler, func ExecObjectLayerAPIAnonTest(t *testing.T, obj ObjectLayer, testName, bucketName, objectName, instanceType string, apiRouter http.Handler,
anonReq *http.Request, bucketPolicy *policy.Policy, anonReq *http.Request, bucketPolicy *policy.Policy,
) { ) {

View File

@ -27,15 +27,17 @@ import (
// // Perform a ObjectLayer.GetObjectInfo to fetch object version information // // Perform a ObjectLayer.GetObjectInfo to fetch object version information
// goiOpts := os.GetOpts() // goiOpts := os.GetOpts()
// gerr := objAPI.GetObjectInfo(ctx, bucket, object, goiOpts) // gerr := objAPI.GetObjectInfo(ctx, bucket, object, goiOpts)
// if gerr == nil { //
// os.SetTransitionState(goi) // if gerr == nil {
// } // os.SetTransitionState(goi)
// }
// //
// // After the overwriting object operation is complete. // // After the overwriting object operation is complete.
// if jentry, ok := os.ShouldRemoveRemoteObject(); ok { //
// err := globalTierJournal.AddEntry(jentry) // if jentry, ok := os.ShouldRemoveRemoteObject(); ok {
// logger.LogIf(ctx, err) // err := globalTierJournal.AddEntry(jentry)
// } // logger.LogIf(ctx, err)
// }
type objSweeper struct { type objSweeper struct {
Object string Object string
Bucket string Bucket string

View File

@ -51,8 +51,10 @@ func filterMatchingPrefix(entries []string, prefixEntry string) []string {
// we need to remove this trailing "/" for objects and retain "/" for prefixes before // we need to remove this trailing "/" for objects and retain "/" for prefixes before
// sorting because the trailing "/" can affect the sorting results for certain cases. // sorting because the trailing "/" can affect the sorting results for certain cases.
// Ex. lets say entries = ["a-b/", "a/"] and both are objects. // Ex. lets say entries = ["a-b/", "a/"] and both are objects.
// sorting with out trailing "/" = ["a", "a-b"] //
// sorting with trailing "/" = ["a-b/", "a/"] // sorting with out trailing "/" = ["a", "a-b"]
// sorting with trailing "/" = ["a-b/", "a/"]
//
// Hence if entries[] does not have a case like the above example then isLeaf() check // Hence if entries[] does not have a case like the above example then isLeaf() check
// can be delayed till the entry is pushed into the TreeWalkResult channel. // can be delayed till the entry is pushed into the TreeWalkResult channel.
// delayIsLeafCheck() returns true if isLeaf can be delayed or false if // delayIsLeafCheck() returns true if isLeaf can be delayed or false if
@ -86,10 +88,10 @@ type ListDirFunc func(bucket, prefixDir, prefixEntry string) (emptyDir bool, ent
// IsLeafFunc - A function isLeaf of type isLeafFunc is used to detect if an // IsLeafFunc - A function isLeaf of type isLeafFunc is used to detect if an
// entry is a leaf entry. There are 2 scenarios where isLeaf should behave // entry is a leaf entry. There are 2 scenarios where isLeaf should behave
// differently depending on the backend: // differently depending on the backend:
// 1. FS backend object listing - isLeaf is true if the entry // 1. FS backend object listing - isLeaf is true if the entry
// has no trailing "/" // has no trailing "/"
// 2. Erasure backend object listing - isLeaf is true if the entry // 2. Erasure backend object listing - isLeaf is true if the entry
// is a directory and contains xl.meta // is a directory and contains xl.meta
type IsLeafFunc func(string, string) bool type IsLeafFunc func(string, string) bool
// IsLeafDirFunc - A function isLeafDir of type isLeafDirFunc is used to detect // IsLeafDirFunc - A function isLeafDir of type isLeafDirFunc is used to detect

View File

@ -123,8 +123,7 @@ func GetCurrentReleaseTime() (releaseTime time.Time, err error) {
// //
// https://github.com/moby/moby/blob/master/daemon/initlayer/setup_unix.go#L25 // https://github.com/moby/moby/blob/master/daemon/initlayer/setup_unix.go#L25
// //
// "/.dockerenv": "file", // "/.dockerenv": "file",
//
func IsDocker() bool { func IsDocker() bool {
if !globalIsCICD { if !globalIsCICD {
_, err := os.Stat("/.dockerenv") _, err := os.Stat("/.dockerenv")
@ -220,7 +219,7 @@ func IsPCFTile() bool {
// DO NOT CHANGE USER AGENT STYLE. // DO NOT CHANGE USER AGENT STYLE.
// The style should be // The style should be
// //
// MinIO (<OS>; <ARCH>[; <MODE>][; dcos][; kubernetes][; docker][; source]) MinIO/<VERSION> MinIO/<RELEASE-TAG> MinIO/<COMMIT-ID> [MinIO/universe-<PACKAGE-NAME>] [MinIO/helm-<HELM-VERSION>] // MinIO (<OS>; <ARCH>[; <MODE>][; dcos][; kubernetes][; docker][; source]) MinIO/<VERSION> MinIO/<RELEASE-TAG> MinIO/<COMMIT-ID> [MinIO/universe-<PACKAGE-NAME>] [MinIO/helm-<HELM-VERSION>]
// //
// Any change here should be discussed by opening an issue at // Any change here should be discussed by opening an issue at
// https://github.com/minio/minio/issues. // https://github.com/minio/minio/issues.

View File

@ -390,8 +390,9 @@ func (lc Lifecycle) ComputeAction(obj ObjectOpts) Action {
// ExpectedExpiryTime calculates the expiry, transition or restore date/time based on a object modtime. // ExpectedExpiryTime calculates the expiry, transition or restore date/time based on a object modtime.
// The expected transition or restore time is always a midnight time following the the object // The expected transition or restore time is always a midnight time following the the object
// modification time plus the number of transition/restore days. // modification time plus the number of transition/restore days.
// e.g. If the object modtime is `Thu May 21 13:42:50 GMT 2020` and the object should //
// transition in 1 day, then the expected transition time is `Fri, 23 May 2020 00:00:00 GMT` // e.g. If the object modtime is `Thu May 21 13:42:50 GMT 2020` and the object should
// transition in 1 day, then the expected transition time is `Fri, 23 May 2020 00:00:00 GMT`
func ExpectedExpiryTime(modTime time.Time, days int) time.Time { func ExpectedExpiryTime(modTime time.Time, days int) time.Time {
if days == 0 { if days == 0 {
return modTime return modTime

View File

@ -60,7 +60,9 @@ type Config struct {
// BitrotScanCycle returns the configured cycle for the scanner healing // BitrotScanCycle returns the configured cycle for the scanner healing
// -1 for not enabled // -1 for not enabled
// 0 for contiunous bitrot scanning //
// 0 for contiunous bitrot scanning
//
// >0 interval duration between cycles // >0 interval duration between cycles
func (opts Config) BitrotScanCycle() (d time.Duration) { func (opts Config) BitrotScanCycle() (d time.Duration) {
configMutex.RLock() configMutex.RLock()

View File

@ -216,10 +216,13 @@ func validateParity(ssParity, rrsParity, setDriveCount int) (err error) {
// //
// -- if input storage class is empty then standard is assumed // -- if input storage class is empty then standard is assumed
// -- if input is RRS but RRS is not configured default '2' parity // -- if input is RRS but RRS is not configured default '2' parity
// for RRS is assumed //
// for RRS is assumed
//
// -- if input is STANDARD but STANDARD is not configured '0' parity // -- if input is STANDARD but STANDARD is not configured '0' parity
// is returned, the caller is expected to choose the right parity //
// at that point. // is returned, the caller is expected to choose the right parity
// at that point.
func (sCfg Config) GetParityForSC(sc string) (parity int) { func (sCfg Config) GetParityForSC(sc string) (parity int) {
ConfigLock.RLock() ConfigLock.RLock()
defer ConfigLock.RUnlock() defer ConfigLock.RUnlock()

View File

@ -25,32 +25,30 @@
// with an unique key-encryption-key. Given the correct key-encryption-key the // with an unique key-encryption-key. Given the correct key-encryption-key the
// sealed 'ObjectKey' can be unsealed and the object can be decrypted. // sealed 'ObjectKey' can be unsealed and the object can be decrypted.
// //
//
// ## SSE-C // ## SSE-C
// //
// SSE-C computes the key-encryption-key from the client-provided key, an // SSE-C computes the key-encryption-key from the client-provided key, an
// initialization vector (IV) and the bucket/object path. // initialization vector (IV) and the bucket/object path.
// //
// 1. Encrypt: // 1. Encrypt:
// Input: ClientKey, bucket, object, metadata, object_data // Input: ClientKey, bucket, object, metadata, object_data
// - IV := Random({0,1}²⁵⁶) // - IV := Random({0,1}²⁵⁶)
// - ObjectKey := SHA256(ClientKey || Random({0,1}²⁵⁶)) // - ObjectKey := SHA256(ClientKey || Random({0,1}²⁵⁶))
// - KeyEncKey := HMAC-SHA256(ClientKey, IV || 'SSE-C' || 'DAREv2-HMAC-SHA256' || bucket || '/' || object) // - KeyEncKey := HMAC-SHA256(ClientKey, IV || 'SSE-C' || 'DAREv2-HMAC-SHA256' || bucket || '/' || object)
// - SealedKey := DAREv2_Enc(KeyEncKey, ObjectKey) // - SealedKey := DAREv2_Enc(KeyEncKey, ObjectKey)
// - enc_object_data := DAREv2_Enc(ObjectKey, object_data) // - enc_object_data := DAREv2_Enc(ObjectKey, object_data)
// - metadata <- IV // - metadata <- IV
// - metadata <- SealedKey // - metadata <- SealedKey
// Output: enc_object_data, metadata // Output: enc_object_data, metadata
//
// 2. Decrypt:
// Input: ClientKey, bucket, object, metadata, enc_object_data
// - IV <- metadata
// - SealedKey <- metadata
// - KeyEncKey := HMAC-SHA256(ClientKey, IV || 'SSE-C' || 'DAREv2-HMAC-SHA256' || bucket || '/' || object)
// - ObjectKey := DAREv2_Dec(KeyEncKey, SealedKey)
// - object_data := DAREv2_Dec(ObjectKey, enc_object_data)
// Output: object_data
// //
// 2. Decrypt:
// Input: ClientKey, bucket, object, metadata, enc_object_data
// - IV <- metadata
// - SealedKey <- metadata
// - KeyEncKey := HMAC-SHA256(ClientKey, IV || 'SSE-C' || 'DAREv2-HMAC-SHA256' || bucket || '/' || object)
// - ObjectKey := DAREv2_Dec(KeyEncKey, SealedKey)
// - object_data := DAREv2_Dec(ObjectKey, enc_object_data)
// Output: object_data
// //
// ## SSE-S3 // ## SSE-S3
// //
@ -63,57 +61,57 @@
// SSE-S3 with a single master key works as SSE-C where the master key is // SSE-S3 with a single master key works as SSE-C where the master key is
// used as the client-provided key. // used as the client-provided key.
// //
// 1. Encrypt: // 1. Encrypt:
// Input: MasterKey, bucket, object, metadata, object_data // Input: MasterKey, bucket, object, metadata, object_data
// - IV := Random({0,1}²⁵⁶) // - IV := Random({0,1}²⁵⁶)
// - ObjectKey := SHA256(MasterKey || Random({0,1}²⁵⁶)) // - ObjectKey := SHA256(MasterKey || Random({0,1}²⁵⁶))
// - KeyEncKey := HMAC-SHA256(MasterKey, IV || 'SSE-S3' || 'DAREv2-HMAC-SHA256' || bucket || '/' || object) // - KeyEncKey := HMAC-SHA256(MasterKey, IV || 'SSE-S3' || 'DAREv2-HMAC-SHA256' || bucket || '/' || object)
// - SealedKey := DAREv2_Enc(KeyEncKey, ObjectKey) // - SealedKey := DAREv2_Enc(KeyEncKey, ObjectKey)
// - enc_object_data := DAREv2_Enc(ObjectKey, object_data) // - enc_object_data := DAREv2_Enc(ObjectKey, object_data)
// - metadata <- IV // - metadata <- IV
// - metadata <- SealedKey // - metadata <- SealedKey
// Output: enc_object_data, metadata // Output: enc_object_data, metadata
//
// 2. Decrypt:
// Input: MasterKey, bucket, object, metadata, enc_object_data
// - IV <- metadata
// - SealedKey <- metadata
// - KeyEncKey := HMAC-SHA256(MasterKey, IV || 'SSE-S3' || 'DAREv2-HMAC-SHA256' || bucket || '/' || object)
// - ObjectKey := DAREv2_Dec(KeyEncKey, SealedKey)
// - object_data := DAREv2_Dec(ObjectKey, enc_object_data)
// Output: object_data
// //
// 2. Decrypt:
// Input: MasterKey, bucket, object, metadata, enc_object_data
// - IV <- metadata
// - SealedKey <- metadata
// - KeyEncKey := HMAC-SHA256(MasterKey, IV || 'SSE-S3' || 'DAREv2-HMAC-SHA256' || bucket || '/' || object)
// - ObjectKey := DAREv2_Dec(KeyEncKey, SealedKey)
// - object_data := DAREv2_Dec(ObjectKey, enc_object_data)
// Output: object_data
// //
// ### SSE-S3 and KMS // ### SSE-S3 and KMS
// //
// SSE-S3 requires that the KMS provides two functions: // SSE-S3 requires that the KMS provides two functions:
// 1. Generate(KeyID) -> (Key, EncKey)
// 2. Unseal(KeyID, EncKey) -> Key
// //
// 1. Encrypt: // 1. Generate(KeyID) -> (Key, EncKey)
// Input: KeyID, bucket, object, metadata, object_data
// - Key, EncKey := Generate(KeyID)
// - IV := Random({0,1}²⁵⁶)
// - ObjectKey := SHA256(Key, Random({0,1}²⁵⁶))
// - KeyEncKey := HMAC-SHA256(Key, IV || 'SSE-S3' || 'DAREv2-HMAC-SHA256' || bucket || '/' || object)
// - SealedKey := DAREv2_Enc(KeyEncKey, ObjectKey)
// - enc_object_data := DAREv2_Enc(ObjectKey, object_data)
// - metadata <- IV
// - metadata <- KeyID
// - metadata <- EncKey
// - metadata <- SealedKey
// Output: enc_object_data, metadata
// //
// 2. Decrypt: // 2. Unseal(KeyID, EncKey) -> Key
// Input: bucket, object, metadata, enc_object_data
// - KeyID <- metadata
// - EncKey <- metadata
// - IV <- metadata
// - SealedKey <- metadata
// - Key := Unseal(KeyID, EncKey)
// - KeyEncKey := HMAC-SHA256(Key, IV || 'SSE-S3' || 'DAREv2-HMAC-SHA256' || bucket || '/' || object)
// - ObjectKey := DAREv2_Dec(KeyEncKey, SealedKey)
// - object_data := DAREv2_Dec(ObjectKey, enc_object_data)
// Output: object_data
// //
// 1. Encrypt:
// Input: KeyID, bucket, object, metadata, object_data
// - Key, EncKey := Generate(KeyID)
// - IV := Random({0,1}²⁵⁶)
// - ObjectKey := SHA256(Key, Random({0,1}²⁵⁶))
// - KeyEncKey := HMAC-SHA256(Key, IV || 'SSE-S3' || 'DAREv2-HMAC-SHA256' || bucket || '/' || object)
// - SealedKey := DAREv2_Enc(KeyEncKey, ObjectKey)
// - enc_object_data := DAREv2_Enc(ObjectKey, object_data)
// - metadata <- IV
// - metadata <- KeyID
// - metadata <- EncKey
// - metadata <- SealedKey
// Output: enc_object_data, metadata
//
// 2. Decrypt:
// Input: bucket, object, metadata, enc_object_data
// - KeyID <- metadata
// - EncKey <- metadata
// - IV <- metadata
// - SealedKey <- metadata
// - Key := Unseal(KeyID, EncKey)
// - KeyEncKey := HMAC-SHA256(Key, IV || 'SSE-S3' || 'DAREv2-HMAC-SHA256' || bucket || '/' || object)
// - ObjectKey := DAREv2_Dec(KeyEncKey, SealedKey)
// - object_data := DAREv2_Dec(ObjectKey, enc_object_data)
// Output: object_data
package crypto package crypto

View File

@ -43,9 +43,9 @@ const (
) )
// Type represents an AWS SSE type: // Type represents an AWS SSE type:
// SSE-C // - SSE-C
// SSE-S3 // - SSE-S3
// SSE-KMS // - SSE-KMS
type Type interface { type Type interface {
fmt.Stringer fmt.Stringer

View File

@ -206,7 +206,6 @@ func TestTwoSimultaneousLocksForDifferentResources(t *testing.T) {
} }
// Test refreshing lock - refresh should always return true // Test refreshing lock - refresh should always return true
//
func TestSuccessfulLockRefresh(t *testing.T) { func TestSuccessfulLockRefresh(t *testing.T) {
if testing.Short() { if testing.Short() {
t.Skip("skipping test in short mode.") t.Skip("skipping test in short mode.")

View File

@ -24,35 +24,35 @@
// In general, an S3 ETag is an MD5 checksum of the object // In general, an S3 ETag is an MD5 checksum of the object
// content. However, there are many exceptions to this rule. // content. However, there are many exceptions to this rule.
// //
// // # Single-part Upload
// Single-part Upload
// //
// In case of a basic single-part PUT operation - without server // In case of a basic single-part PUT operation - without server
// side encryption or object compression - the ETag of an object // side encryption or object compression - the ETag of an object
// is its content MD5. // is its content MD5.
// //
// // # Multi-part Upload
// Multi-part Upload
// //
// The ETag of an object does not correspond to its content MD5 // The ETag of an object does not correspond to its content MD5
// when the object is uploaded in multiple parts via the S3 // when the object is uploaded in multiple parts via the S3
// multipart API. Instead, S3 first computes a MD5 of each part: // multipart API. Instead, S3 first computes a MD5 of each part:
// e1 := MD5(part-1) //
// e2 := MD5(part-2) // e1 := MD5(part-1)
// ... // e2 := MD5(part-2)
// eN := MD5(part-N) // ...
// eN := MD5(part-N)
// //
// Then, the ETag of the object is computed as MD5 of all individual // Then, the ETag of the object is computed as MD5 of all individual
// part checksums. S3 also encodes the number of parts into the ETag // part checksums. S3 also encodes the number of parts into the ETag
// by appending a -<number-of-parts> at the end: // by appending a -<number-of-parts> at the end:
// ETag := MD5(e1 || e2 || e3 ... || eN) || -N
// //
// For example: ceb8853ddc5086cc4ab9e149f8f09c88-5 // ETag := MD5(e1 || e2 || e3 ... || eN) || -N
//
// For example: ceb8853ddc5086cc4ab9e149f8f09c88-5
// //
// However, this scheme is only used for multipart objects that are // However, this scheme is only used for multipart objects that are
// not encrypted. // not encrypted.
// //
// Server-side Encryption // # Server-side Encryption
// //
// S3 specifies three types of server-side-encryption - SSE-C, SSE-S3 // S3 specifies three types of server-side-encryption - SSE-C, SSE-S3
// and SSE-KMS - with different semantics w.r.t. ETags. // and SSE-KMS - with different semantics w.r.t. ETags.
@ -75,12 +75,12 @@
// in case of SSE-C or SSE-KMS except that the ETag is well-formed. // in case of SSE-C or SSE-KMS except that the ETag is well-formed.
// //
// To put all of this into a simple rule: // To put all of this into a simple rule:
// SSE-S3 : ETag == MD5
// SSE-C : ETag != MD5
// SSE-KMS: ETag != MD5
// //
// SSE-S3 : ETag == MD5
// SSE-C : ETag != MD5
// SSE-KMS: ETag != MD5
// //
// Encrypted ETags // # Encrypted ETags
// //
// An S3 implementation has to remember the content MD5 of objects // An S3 implementation has to remember the content MD5 of objects
// in case of SSE-S3. However, storing the ETag of an encrypted // in case of SSE-S3. However, storing the ETag of an encrypted
@ -94,8 +94,7 @@
// encryption schemes. Such an ETag must be decrypted before sent to an // encryption schemes. Such an ETag must be decrypted before sent to an
// S3 client. // S3 client.
// //
// // # S3 Clients
// S3 Clients
// //
// There are many different S3 client implementations. Most of them // There are many different S3 client implementations. Most of them
// access the ETag by looking for the HTTP response header key "Etag". // access the ETag by looking for the HTTP response header key "Etag".
@ -206,27 +205,28 @@ func (e ETag) Parts() int {
// ETag. // ETag.
// //
// In general, a caller has to distinguish the following cases: // In general, a caller has to distinguish the following cases:
// - The object is a multipart object. In this case, // - The object is a multipart object. In this case,
// Format returns the ETag unmodified. // Format returns the ETag unmodified.
// - The object is a SSE-KMS or SSE-C encrypted single- // - The object is a SSE-KMS or SSE-C encrypted single-
// part object. In this case, Format returns the last // part object. In this case, Format returns the last
// 16 bytes of the encrypted ETag which will be a random // 16 bytes of the encrypted ETag which will be a random
// value. // value.
// - The object is a SSE-S3 encrypted single-part object. // - The object is a SSE-S3 encrypted single-part object.
// In this case, the caller has to decrypt the ETag first // In this case, the caller has to decrypt the ETag first
// before calling Format. // before calling Format.
// S3 clients expect that the ETag of an SSE-S3 encrypted // S3 clients expect that the ETag of an SSE-S3 encrypted
// single-part object is equal to the object's content MD5. // single-part object is equal to the object's content MD5.
// Formatting the SSE-S3 ETag before decryption will result // Formatting the SSE-S3 ETag before decryption will result
// in a random-looking ETag which an S3 client will not accept. // in a random-looking ETag which an S3 client will not accept.
// //
// Hence, a caller has to check: // Hence, a caller has to check:
// if method == SSE-S3 { //
// ETag, err := Decrypt(key, ETag) // if method == SSE-S3 {
// if err != nil { // ETag, err := Decrypt(key, ETag)
// } // if err != nil {
// } // }
// ETag = ETag.Format() // }
// ETag = ETag.Format()
func (e ETag) Format() ETag { func (e ETag) Format() ETag {
if !e.IsEncrypted() { if !e.IsEncrypted() {
return e return e
@ -359,8 +359,8 @@ func Parse(s string) (ETag, error) {
// parse parse s as an S3 ETag, returning the result. // parse parse s as an S3 ETag, returning the result.
// It operates in one of two modes: // It operates in one of two modes:
// - strict // - strict
// - non-strict // - non-strict
// //
// In strict mode, parse only accepts ETags that // In strict mode, parse only accepts ETags that
// are AWS S3 compatible. In particular, an AWS // are AWS S3 compatible. In particular, an AWS

View File

@ -56,15 +56,14 @@ func (r wrapReader) ETag() ETag {
// It is mainly used to provide a high-level io.Reader // It is mainly used to provide a high-level io.Reader
// access to the ETag computed by a low-level io.Reader: // access to the ETag computed by a low-level io.Reader:
// //
// content := etag.NewReader(r.Body, nil) // content := etag.NewReader(r.Body, nil)
// //
// compressedContent := Compress(content) // compressedContent := Compress(content)
// encryptedContent := Encrypt(compressedContent) // encryptedContent := Encrypt(compressedContent)
//
// // Now, we need an io.Reader that can access
// // the ETag computed over the content.
// reader := etag.Wrap(encryptedContent, content)
// //
// // Now, we need an io.Reader that can access
// // the ETag computed over the content.
// reader := etag.Wrap(encryptedContent, content)
func Wrap(wrapped, content io.Reader) io.Reader { func Wrap(wrapped, content io.Reader) io.Reader {
if t, ok := content.(Tagger); ok { if t, ok := content.(Tagger); ok {
return wrapReader{ return wrapReader{

View File

@ -72,7 +72,8 @@ func (f *Forwarder) ServeHTTP(w http.ResponseWriter, inReq *http.Request) {
} }
// customErrHandler is originally implemented to avoid having the following error // customErrHandler is originally implemented to avoid having the following error
// `http: proxy error: context canceled` printed by Golang //
// `http: proxy error: context canceled` printed by Golang
func (f *Forwarder) customErrHandler(w http.ResponseWriter, r *http.Request, err error) { func (f *Forwarder) customErrHandler(w http.ResponseWriter, r *http.Request, err error) {
if f.Logger != nil && err != context.Canceled { if f.Logger != nil && err != context.Canceled {
f.Logger(err) f.Logger(err)

View File

@ -39,7 +39,8 @@ import (
// Parse parses s as single-key KMS. The given string // Parse parses s as single-key KMS. The given string
// is expected to have the following format: // is expected to have the following format:
// <key-id>:<base64-key> //
// <key-id>:<base64-key>
// //
// The returned KMS implementation uses the parsed // The returned KMS implementation uses the parsed
// key ID and key to derive new DEKs and decrypt ciphertext. // key ID and key to derive new DEKs and decrypt ciphertext.

View File

@ -27,7 +27,8 @@ import (
// Target is the entity that we will receive // Target is the entity that we will receive
// a single log entry and Send it to the log target // a single log entry and Send it to the log target
// e.g. Send the log to a http server //
// e.g. Send the log to a http server
type Target interface { type Target interface {
String() string String() string
Endpoint() string Endpoint() string
@ -126,8 +127,9 @@ func initKafkaTargets(cfgMap map[string]kafka.Config) (tgts []Target, err error)
} }
// Split targets into two groups: // Split targets into two groups:
// group1 contains all targets of type t //
// group2 contains the remaining targets // group1 contains all targets of type t
// group2 contains the remaining targets
func splitTargets(targets []Target, t types.TargetType) (group1 []Target, group2 []Target) { func splitTargets(targets []Target, t types.TargetType) (group1 []Target, group2 []Target) {
for _, target := range targets { for _, target := range targets {
if target.Type() == t { if target.Type() == t {

View File

@ -128,9 +128,9 @@ var progressHeader = []byte{
// //
// Payload specification: // Payload specification:
// Progress message payload is an XML document containing information about the progress of a request. // Progress message payload is an XML document containing information about the progress of a request.
// * BytesScanned => Number of bytes that have been processed before being uncompressed (if the file is compressed). // - BytesScanned => Number of bytes that have been processed before being uncompressed (if the file is compressed).
// * BytesProcessed => Number of bytes that have been processed after being uncompressed (if the file is compressed). // - BytesProcessed => Number of bytes that have been processed after being uncompressed (if the file is compressed).
// * BytesReturned => Current number of bytes of records payload data returned by S3. // - BytesReturned => Current number of bytes of records payload data returned by S3.
// //
// For uncompressed files, BytesScanned and BytesProcessed are equal. // For uncompressed files, BytesScanned and BytesProcessed are equal.
// //
@ -138,11 +138,12 @@ var progressHeader = []byte{
// //
// <?xml version="1.0" encoding="UTF-8"?> // <?xml version="1.0" encoding="UTF-8"?>
// <Progress> // <Progress>
// <BytesScanned>512</BytesScanned>
// <BytesProcessed>1024</BytesProcessed>
// <BytesReturned>1024</BytesReturned>
// </Progress>
// //
// <BytesScanned>512</BytesScanned>
// <BytesProcessed>1024</BytesProcessed>
// <BytesReturned>1024</BytesReturned>
//
// </Progress>
func newProgressMessage(bytesScanned, bytesProcessed, bytesReturned int64) []byte { func newProgressMessage(bytesScanned, bytesProcessed, bytesReturned int64) []byte {
payload := []byte(`<?xml version="1.0" encoding="UTF-8"?><Progress><BytesScanned>` + payload := []byte(`<?xml version="1.0" encoding="UTF-8"?><Progress><BytesScanned>` +
strconv.FormatInt(bytesScanned, 10) + `</BytesScanned><BytesProcessed>` + strconv.FormatInt(bytesScanned, 10) + `</BytesScanned><BytesProcessed>` +
@ -167,9 +168,9 @@ var statsHeader = []byte{
// //
// Payload specification: // Payload specification:
// Stats message payload is an XML document containing information about a request's stats when processing is complete. // Stats message payload is an XML document containing information about a request's stats when processing is complete.
// * BytesScanned => Number of bytes that have been processed before being uncompressed (if the file is compressed). // - BytesScanned => Number of bytes that have been processed before being uncompressed (if the file is compressed).
// * BytesProcessed => Number of bytes that have been processed after being uncompressed (if the file is compressed). // - BytesProcessed => Number of bytes that have been processed after being uncompressed (if the file is compressed).
// * BytesReturned => Total number of bytes of records payload data returned by S3. // - BytesReturned => Total number of bytes of records payload data returned by S3.
// //
// For uncompressed files, BytesScanned and BytesProcessed are equal. // For uncompressed files, BytesScanned and BytesProcessed are equal.
// //
@ -177,9 +178,11 @@ var statsHeader = []byte{
// //
// <?xml version="1.0" encoding="UTF-8"?> // <?xml version="1.0" encoding="UTF-8"?>
// <Stats> // <Stats>
// <BytesScanned>512</BytesScanned> //
// <BytesProcessed>1024</BytesProcessed> // <BytesScanned>512</BytesScanned>
// <BytesReturned>1024</BytesReturned> // <BytesProcessed>1024</BytesProcessed>
// <BytesReturned>1024</BytesReturned>
//
// </Stats> // </Stats>
func newStatsMessage(bytesScanned, bytesProcessed, bytesReturned int64) []byte { func newStatsMessage(bytesScanned, bytesProcessed, bytesReturned int64) []byte {
payload := []byte(`<?xml version="1.0" encoding="UTF-8"?><Stats><BytesScanned>` + payload := []byte(`<?xml version="1.0" encoding="UTF-8"?><Stats><BytesScanned>` +

View File

@ -18,6 +18,7 @@
package smart package smart
// Defined in <linux/nvme_ioctl.h> // Defined in <linux/nvme_ioctl.h>
//
//nolint:structcheck,deadcode //nolint:structcheck,deadcode
type nvmePassthruCommand struct { type nvmePassthruCommand struct {
opcode uint8 opcode uint8
@ -138,6 +139,7 @@ type nvmeSMARTLog struct {
} // 512 bytes } // 512 bytes
// NVMeDevice represents drive data about NVMe drives // NVMeDevice represents drive data about NVMe drives
//
//nolint:structcheck //nolint:structcheck
type NVMeDevice struct { type NVMeDevice struct {
Name string Name string