mirror of
https://github.com/minio/minio.git
synced 2024-12-24 06:05:55 -05:00
add codespell action (#18818)
Original work here, #18474, refixed and updated.
This commit is contained in:
parent
21d60eab7c
commit
dd2542e96c
8
.codespellrc
Normal file
8
.codespellrc
Normal file
@ -0,0 +1,8 @@
|
|||||||
|
|
||||||
|
[codespell]
|
||||||
|
# certs_test.go - has lots of ceritificates.
|
||||||
|
skip = go.mod,go.sum,*.txt,LICENSE,*.zip,.git,*.pdf,*.svg,.codespellrc,CREDITS,certs_test.go
|
||||||
|
check-hidden = true
|
||||||
|
ignore-regex = \b(newfolder/afile|filterIn|HelpES)\b
|
||||||
|
ignore-words-list = inout,bui,to,bu,te,ot,toi,ist,parms,flate
|
||||||
|
|
20
.github/workflows/codespell.yml
vendored
Normal file
20
.github/workflows/codespell.yml
vendored
Normal file
@ -0,0 +1,20 @@
|
|||||||
|
---
|
||||||
|
name: Codespell
|
||||||
|
|
||||||
|
on:
|
||||||
|
pull_request:
|
||||||
|
branches: [master]
|
||||||
|
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
codespell:
|
||||||
|
name: Check for spelling errors
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v3
|
||||||
|
- name: Codespell
|
||||||
|
uses: codespell-project/actions-codespell@v2
|
@ -3054,7 +3054,7 @@ func createHostAnonymizerForFSMode() map[string]string {
|
|||||||
// anonymizeHost - Add entries related to given endpoint in the host anonymizer map
|
// anonymizeHost - Add entries related to given endpoint in the host anonymizer map
|
||||||
// The health report data can contain the hostname in various forms e.g. host, host:port,
|
// The health report data can contain the hostname in various forms e.g. host, host:port,
|
||||||
// host:port/drivepath, full url (http://host:port/drivepath)
|
// host:port/drivepath, full url (http://host:port/drivepath)
|
||||||
// The anonymizer map will have mappings for all these varients for efficiently replacing
|
// The anonymizer map will have mappings for all these variants for efficiently replacing
|
||||||
// any of these strings to the anonymized versions at the time of health report generation.
|
// any of these strings to the anonymized versions at the time of health report generation.
|
||||||
func anonymizeHost(hostAnonymizer map[string]string, endpoint Endpoint, poolNum int, srvrNum int) {
|
func anonymizeHost(hostAnonymizer map[string]string, endpoint Endpoint, poolNum int, srvrNum int) {
|
||||||
if len(endpoint.Host) == 0 {
|
if len(endpoint.Host) == 0 {
|
||||||
@ -3102,7 +3102,7 @@ func anonymizeHost(hostAnonymizer map[string]string, endpoint Endpoint, poolNum
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// createHostAnonymizer - Creats a map of various strings to corresponding anonymized names
|
// createHostAnonymizer - Creates a map of various strings to corresponding anonymized names
|
||||||
func createHostAnonymizer() map[string]string {
|
func createHostAnonymizer() map[string]string {
|
||||||
if !globalIsDistErasure {
|
if !globalIsDistErasure {
|
||||||
return createHostAnonymizerForFSMode()
|
return createHostAnonymizerForFSMode()
|
||||||
|
@ -959,7 +959,7 @@ var errorCodes = errorCodeMap{
|
|||||||
},
|
},
|
||||||
ErrReplicationBandwidthLimitError: {
|
ErrReplicationBandwidthLimitError: {
|
||||||
Code: "XMinioAdminReplicationBandwidthLimitError",
|
Code: "XMinioAdminReplicationBandwidthLimitError",
|
||||||
Description: "Bandwidth limit for remote target must be atleast 100MBps",
|
Description: "Bandwidth limit for remote target must be at least 100MBps",
|
||||||
HTTPStatusCode: http.StatusBadRequest,
|
HTTPStatusCode: http.StatusBadRequest,
|
||||||
},
|
},
|
||||||
ErrReplicationNoExistingObjects: {
|
ErrReplicationNoExistingObjects: {
|
||||||
|
@ -287,7 +287,7 @@ func mustNewSignedRequest(method string, urlStr string, contentLength int64, bod
|
|||||||
req := mustNewRequest(method, urlStr, contentLength, body, t)
|
req := mustNewRequest(method, urlStr, contentLength, body, t)
|
||||||
cred := globalActiveCred
|
cred := globalActiveCred
|
||||||
if err := signRequestV4(req, cred.AccessKey, cred.SecretKey); err != nil {
|
if err := signRequestV4(req, cred.AccessKey, cred.SecretKey); err != nil {
|
||||||
t.Fatalf("Unable to inititalized new signed http request %s", err)
|
t.Fatalf("Unable to initialized new signed http request %s", err)
|
||||||
}
|
}
|
||||||
return req
|
return req
|
||||||
}
|
}
|
||||||
@ -298,7 +298,7 @@ func mustNewSignedV2Request(method string, urlStr string, contentLength int64, b
|
|||||||
req := mustNewRequest(method, urlStr, contentLength, body, t)
|
req := mustNewRequest(method, urlStr, contentLength, body, t)
|
||||||
cred := globalActiveCred
|
cred := globalActiveCred
|
||||||
if err := signRequestV2(req, cred.AccessKey, cred.SecretKey); err != nil {
|
if err := signRequestV2(req, cred.AccessKey, cred.SecretKey); err != nil {
|
||||||
t.Fatalf("Unable to inititalized new signed http request %s", err)
|
t.Fatalf("Unable to initialized new signed http request %s", err)
|
||||||
}
|
}
|
||||||
return req
|
return req
|
||||||
}
|
}
|
||||||
@ -309,7 +309,7 @@ func mustNewPresignedV2Request(method string, urlStr string, contentLength int64
|
|||||||
req := mustNewRequest(method, urlStr, contentLength, body, t)
|
req := mustNewRequest(method, urlStr, contentLength, body, t)
|
||||||
cred := globalActiveCred
|
cred := globalActiveCred
|
||||||
if err := preSignV2(req, cred.AccessKey, cred.SecretKey, time.Now().Add(10*time.Minute).Unix()); err != nil {
|
if err := preSignV2(req, cred.AccessKey, cred.SecretKey, time.Now().Add(10*time.Minute).Unix()); err != nil {
|
||||||
t.Fatalf("Unable to inititalized new signed http request %s", err)
|
t.Fatalf("Unable to initialized new signed http request %s", err)
|
||||||
}
|
}
|
||||||
return req
|
return req
|
||||||
}
|
}
|
||||||
@ -320,7 +320,7 @@ func mustNewPresignedRequest(method string, urlStr string, contentLength int64,
|
|||||||
req := mustNewRequest(method, urlStr, contentLength, body, t)
|
req := mustNewRequest(method, urlStr, contentLength, body, t)
|
||||||
cred := globalActiveCred
|
cred := globalActiveCred
|
||||||
if err := preSignV4(req, cred.AccessKey, cred.SecretKey, time.Now().Add(10*time.Minute).Unix()); err != nil {
|
if err := preSignV4(req, cred.AccessKey, cred.SecretKey, time.Now().Add(10*time.Minute).Unix()); err != nil {
|
||||||
t.Fatalf("Unable to inititalized new signed http request %s", err)
|
t.Fatalf("Unable to initialized new signed http request %s", err)
|
||||||
}
|
}
|
||||||
return req
|
return req
|
||||||
}
|
}
|
||||||
@ -491,7 +491,7 @@ func TestValidateAdminSignature(t *testing.T) {
|
|||||||
for i, testCase := range testCases {
|
for i, testCase := range testCases {
|
||||||
req := mustNewRequest(http.MethodGet, "http://localhost:9000/", 0, nil, t)
|
req := mustNewRequest(http.MethodGet, "http://localhost:9000/", 0, nil, t)
|
||||||
if err := signRequestV4(req, testCase.AccessKey, testCase.SecretKey); err != nil {
|
if err := signRequestV4(req, testCase.AccessKey, testCase.SecretKey); err != nil {
|
||||||
t.Fatalf("Unable to inititalized new signed http request %s", err)
|
t.Fatalf("Unable to initialized new signed http request %s", err)
|
||||||
}
|
}
|
||||||
_, _, s3Error := validateAdminSignature(ctx, req, globalMinioDefaultRegion)
|
_, _, s3Error := validateAdminSignature(ctx, req, globalMinioDefaultRegion)
|
||||||
if s3Error != testCase.ErrCode {
|
if s3Error != testCase.ErrCode {
|
||||||
|
@ -44,7 +44,7 @@ import (
|
|||||||
// bucket: mybucket # Bucket where this batch job will expire matching objects from
|
// bucket: mybucket # Bucket where this batch job will expire matching objects from
|
||||||
// prefix: myprefix # (Optional) Prefix under which this job will expire objects matching the rules below.
|
// prefix: myprefix # (Optional) Prefix under which this job will expire objects matching the rules below.
|
||||||
// rules:
|
// rules:
|
||||||
// - type: object # regular objects with zero ore more older versions
|
// - type: object # regular objects with zero or more older versions
|
||||||
// name: NAME # match object names that satisfy the wildcard expression.
|
// name: NAME # match object names that satisfy the wildcard expression.
|
||||||
// olderThan: 70h # match objects older than this value
|
// olderThan: 70h # match objects older than this value
|
||||||
// createdBefore: "2006-01-02T15:04:05.00Z" # match objects created before "date"
|
// createdBefore: "2006-01-02T15:04:05.00Z" # match objects created before "date"
|
||||||
@ -558,7 +558,7 @@ func (r *BatchJobExpire) Start(ctx context.Context, api ObjectLayer, job BatchJo
|
|||||||
logger.LogIf(ctx, ri.updateAfter(ctx, api, 10*time.Second, job))
|
logger.LogIf(ctx, ri.updateAfter(ctx, api, 10*time.Second, job))
|
||||||
|
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
// persist in-memory state immediately before exiting due to context cancelation.
|
// persist in-memory state immediately before exiting due to context cancellation.
|
||||||
logger.LogIf(ctx, ri.updateAfter(ctx, api, 0, job))
|
logger.LogIf(ctx, ri.updateAfter(ctx, api, 0, job))
|
||||||
return
|
return
|
||||||
|
|
||||||
|
@ -30,7 +30,7 @@ expire: # Expire objects that match a condition
|
|||||||
bucket: mybucket # Bucket where this batch job will expire matching objects from
|
bucket: mybucket # Bucket where this batch job will expire matching objects from
|
||||||
prefix: myprefix # (Optional) Prefix under which this job will expire objects matching the rules below.
|
prefix: myprefix # (Optional) Prefix under which this job will expire objects matching the rules below.
|
||||||
rules:
|
rules:
|
||||||
- type: object # regular objects with zero ore more older versions
|
- type: object # regular objects with zero or more older versions
|
||||||
name: NAME # match object names that satisfy the wildcard expression.
|
name: NAME # match object names that satisfy the wildcard expression.
|
||||||
olderThan: 70h # match objects older than this value
|
olderThan: 70h # match objects older than this value
|
||||||
createdBefore: "2006-01-02T15:04:05.00Z" # match objects created before "date"
|
createdBefore: "2006-01-02T15:04:05.00Z" # match objects created before "date"
|
||||||
|
@ -134,7 +134,7 @@ func (b *streamingBitrotReader) Close() error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
if closer, ok := b.rc.(io.Closer); ok {
|
if closer, ok := b.rc.(io.Closer); ok {
|
||||||
// drain the body for connection re-use at network layer.
|
// drain the body for connection reuse at network layer.
|
||||||
xhttp.DrainBody(struct {
|
xhttp.DrainBody(struct {
|
||||||
io.Reader
|
io.Reader
|
||||||
io.Closer
|
io.Closer
|
||||||
|
@ -261,7 +261,7 @@ func verifyServerSystemConfig(ctx context.Context, endpointServerPools EndpointS
|
|||||||
retries++
|
retries++
|
||||||
// after 20 retries start logging that servers are not reachable yet
|
// after 20 retries start logging that servers are not reachable yet
|
||||||
if retries >= 20 {
|
if retries >= 20 {
|
||||||
logger.Info(fmt.Sprintf("Waiting for atleast %d remote servers with valid configuration to be online", len(clnts)/2))
|
logger.Info(fmt.Sprintf("Waiting for at least %d remote servers with valid configuration to be online", len(clnts)/2))
|
||||||
if len(offlineEndpoints) > 0 {
|
if len(offlineEndpoints) > 0 {
|
||||||
logger.Info(fmt.Sprintf("Following servers are currently offline or unreachable %s", offlineEndpoints))
|
logger.Info(fmt.Sprintf("Following servers are currently offline or unreachable %s", offlineEndpoints))
|
||||||
}
|
}
|
||||||
|
@ -424,7 +424,7 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// Content-Md5 is requied should be set
|
// Content-Md5 is required should be set
|
||||||
// http://docs.aws.amazon.com/AmazonS3/latest/API/multiobjectdeleteapi.html
|
// http://docs.aws.amazon.com/AmazonS3/latest/API/multiobjectdeleteapi.html
|
||||||
if _, ok := r.Header[xhttp.ContentMD5]; !ok {
|
if _, ok := r.Header[xhttp.ContentMD5]; !ok {
|
||||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMissingContentMD5), r.URL)
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMissingContentMD5), r.URL)
|
||||||
|
@ -252,7 +252,7 @@ func testHeadBucketHandler(obj ObjectLayer, instanceType, bucketName string, api
|
|||||||
},
|
},
|
||||||
// Test case - 3.
|
// Test case - 3.
|
||||||
// Testing for signature mismatch error.
|
// Testing for signature mismatch error.
|
||||||
// setting invalid acess and secret key.
|
// setting invalid access and secret key.
|
||||||
{
|
{
|
||||||
bucketName: bucketName,
|
bucketName: bucketName,
|
||||||
accessKey: "abcd",
|
accessKey: "abcd",
|
||||||
@ -415,7 +415,7 @@ func testListMultipartUploadsHandler(obj ObjectLayer, instanceType, bucketName s
|
|||||||
shouldPass: false,
|
shouldPass: false,
|
||||||
},
|
},
|
||||||
// Test case - 6.
|
// Test case - 6.
|
||||||
// Setting a negative value to max-uploads paramater, should result in http.StatusBadRequest.
|
// Setting a negative value to max-uploads parameter, should result in http.StatusBadRequest.
|
||||||
{
|
{
|
||||||
bucket: bucketName,
|
bucket: bucketName,
|
||||||
prefix: "",
|
prefix: "",
|
||||||
|
@ -111,7 +111,7 @@ func (api objectAPIHandlers) listObjectVersionsHandler(w http.ResponseWriter, r
|
|||||||
|
|
||||||
listObjectVersions := objectAPI.ListObjectVersions
|
listObjectVersions := objectAPI.ListObjectVersions
|
||||||
|
|
||||||
// Inititate a list object versions operation based on the input params.
|
// Initiate a list object versions operation based on the input params.
|
||||||
// On success would return back ListObjectsInfo object to be
|
// On success would return back ListObjectsInfo object to be
|
||||||
// marshaled into S3 compatible XML header.
|
// marshaled into S3 compatible XML header.
|
||||||
listObjectVersionsInfo, err := listObjectVersions(ctx, bucket, prefix, marker, versionIDMarker, delimiter, maxkeys)
|
listObjectVersionsInfo, err := listObjectVersions(ctx, bucket, prefix, marker, versionIDMarker, delimiter, maxkeys)
|
||||||
@ -201,10 +201,10 @@ func (api objectAPIHandlers) listObjectsV2Handler(ctx context.Context, w http.Re
|
|||||||
)
|
)
|
||||||
|
|
||||||
if r.Header.Get(xMinIOExtract) == "true" && strings.Contains(prefix, archivePattern) {
|
if r.Header.Get(xMinIOExtract) == "true" && strings.Contains(prefix, archivePattern) {
|
||||||
// Inititate a list objects operation inside a zip file based in the input params
|
// Initiate a list objects operation inside a zip file based in the input params
|
||||||
listObjectsV2Info, err = listObjectsV2InArchive(ctx, objectAPI, bucket, prefix, token, delimiter, maxKeys, fetchOwner, startAfter)
|
listObjectsV2Info, err = listObjectsV2InArchive(ctx, objectAPI, bucket, prefix, token, delimiter, maxKeys, fetchOwner, startAfter)
|
||||||
} else {
|
} else {
|
||||||
// Inititate a list objects operation based on the input params.
|
// Initiate a list objects operation based on the input params.
|
||||||
// On success would return back ListObjectsInfo object to be
|
// On success would return back ListObjectsInfo object to be
|
||||||
// marshaled into S3 compatible XML header.
|
// marshaled into S3 compatible XML header.
|
||||||
listObjectsV2Info, err = objectAPI.ListObjectsV2(ctx, bucket, prefix, token, delimiter, maxKeys, fetchOwner, startAfter)
|
listObjectsV2Info, err = objectAPI.ListObjectsV2(ctx, bucket, prefix, token, delimiter, maxKeys, fetchOwner, startAfter)
|
||||||
@ -304,7 +304,7 @@ func (api objectAPIHandlers) ListObjectsV1Handler(w http.ResponseWriter, r *http
|
|||||||
|
|
||||||
listObjects := objectAPI.ListObjects
|
listObjects := objectAPI.ListObjects
|
||||||
|
|
||||||
// Inititate a list objects operation based on the input params.
|
// Initiate a list objects operation based on the input params.
|
||||||
// On success would return back ListObjectsInfo object to be
|
// On success would return back ListObjectsInfo object to be
|
||||||
// marshaled into S3 compatible XML header.
|
// marshaled into S3 compatible XML header.
|
||||||
listObjectsInfo, err := listObjects(ctx, bucket, prefix, marker, delimiter, maxKeys)
|
listObjectsInfo, err := listObjects(ctx, bucket, prefix, marker, delimiter, maxKeys)
|
||||||
|
@ -317,7 +317,7 @@ func testPutBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Test %d: %s: Failed to create HTTP request for PutBucketPolicyHandler: <ERROR> %v", i+1, instanceType, err)
|
t.Fatalf("Test %d: %s: Failed to create HTTP request for PutBucketPolicyHandler: <ERROR> %v", i+1, instanceType, err)
|
||||||
}
|
}
|
||||||
// Since `apiRouter` satisfies `http.Handler` it has a ServeHTTP to execute the logic ofthe handler.
|
// Since `apiRouter` satisfies `http.Handler` it has a ServeHTTP to execute the logic of the handler.
|
||||||
// Call the ServeHTTP to execute the handler.
|
// Call the ServeHTTP to execute the handler.
|
||||||
apiRouter.ServeHTTP(recV4, reqV4)
|
apiRouter.ServeHTTP(recV4, reqV4)
|
||||||
if recV4.Code != testCase.expectedRespStatus {
|
if recV4.Code != testCase.expectedRespStatus {
|
||||||
@ -331,7 +331,7 @@ func testPutBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Test %d: %s: Failed to create HTTP request for PutBucketPolicyHandler: <ERROR> %v", i+1, instanceType, err)
|
t.Fatalf("Test %d: %s: Failed to create HTTP request for PutBucketPolicyHandler: <ERROR> %v", i+1, instanceType, err)
|
||||||
}
|
}
|
||||||
// Since `apiRouter` satisfies `http.Handler` it has a ServeHTTP to execute the logic ofthe handler.
|
// Since `apiRouter` satisfies `http.Handler` it has a ServeHTTP to execute the logic of the handler.
|
||||||
// Call the ServeHTTP to execute the handler.
|
// Call the ServeHTTP to execute the handler.
|
||||||
apiRouter.ServeHTTP(recV2, reqV2)
|
apiRouter.ServeHTTP(recV2, reqV2)
|
||||||
if recV2.Code != testCase.expectedRespStatus {
|
if recV2.Code != testCase.expectedRespStatus {
|
||||||
@ -407,7 +407,7 @@ func testGetBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Test %d: Failed to create HTTP request for PutBucketPolicyHandler: <ERROR> %v", i+1, err)
|
t.Fatalf("Test %d: Failed to create HTTP request for PutBucketPolicyHandler: <ERROR> %v", i+1, err)
|
||||||
}
|
}
|
||||||
// Since `apiRouter` satisfies `http.Handler` it has a ServeHTTP to execute the logic ofthe handler.
|
// Since `apiRouter` satisfies `http.Handler` it has a ServeHTTP to execute the logic of the handler.
|
||||||
// Call the ServeHTTP to execute the handler.
|
// Call the ServeHTTP to execute the handler.
|
||||||
apiRouter.ServeHTTP(recV4, reqV4)
|
apiRouter.ServeHTTP(recV4, reqV4)
|
||||||
if recV4.Code != testPolicy.expectedRespStatus {
|
if recV4.Code != testPolicy.expectedRespStatus {
|
||||||
@ -421,7 +421,7 @@ func testGetBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Test %d: Failed to create HTTP request for PutBucketPolicyHandler: <ERROR> %v", i+1, err)
|
t.Fatalf("Test %d: Failed to create HTTP request for PutBucketPolicyHandler: <ERROR> %v", i+1, err)
|
||||||
}
|
}
|
||||||
// Since `apiRouter` satisfies `http.Handler` it has a ServeHTTP to execute the logic ofthe handler.
|
// Since `apiRouter` satisfies `http.Handler` it has a ServeHTTP to execute the logic of the handler.
|
||||||
// Call the ServeHTTP to execute the handler.
|
// Call the ServeHTTP to execute the handler.
|
||||||
apiRouter.ServeHTTP(recV2, reqV2)
|
apiRouter.ServeHTTP(recV2, reqV2)
|
||||||
if recV2.Code != testPolicy.expectedRespStatus {
|
if recV2.Code != testPolicy.expectedRespStatus {
|
||||||
|
@ -219,7 +219,7 @@ type ReplicateDecision struct {
|
|||||||
targetsMap map[string]replicateTargetDecision
|
targetsMap map[string]replicateTargetDecision
|
||||||
}
|
}
|
||||||
|
|
||||||
// ReplicateAny returns true if atleast one target qualifies for replication
|
// ReplicateAny returns true if at least one target qualifies for replication
|
||||||
func (d ReplicateDecision) ReplicateAny() bool {
|
func (d ReplicateDecision) ReplicateAny() bool {
|
||||||
for _, t := range d.targetsMap {
|
for _, t := range d.targetsMap {
|
||||||
if t.Replicate {
|
if t.Replicate {
|
||||||
@ -229,7 +229,7 @@ func (d ReplicateDecision) ReplicateAny() bool {
|
|||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
// Synchronous returns true if atleast one target qualifies for synchronous replication
|
// Synchronous returns true if at least one target qualifies for synchronous replication
|
||||||
func (d ReplicateDecision) Synchronous() bool {
|
func (d ReplicateDecision) Synchronous() bool {
|
||||||
for _, t := range d.targetsMap {
|
for _, t := range d.targetsMap {
|
||||||
if t.Synchronous {
|
if t.Synchronous {
|
||||||
@ -336,7 +336,7 @@ type ReplicationState struct {
|
|||||||
DeleteMarker bool // represents DeleteMarker replication state
|
DeleteMarker bool // represents DeleteMarker replication state
|
||||||
ReplicationTimeStamp time.Time // timestamp when last replication activity happened
|
ReplicationTimeStamp time.Time // timestamp when last replication activity happened
|
||||||
ReplicationStatusInternal string // stringified representation of all replication activity
|
ReplicationStatusInternal string // stringified representation of all replication activity
|
||||||
// VersionPurgeStatusInternal is internally in the format "arn1=PENDING;arn2=COMMPLETED;"
|
// VersionPurgeStatusInternal is internally in the format "arn1=PENDING;arn2=COMPLETED;"
|
||||||
VersionPurgeStatusInternal string // stringified representation of all version purge statuses
|
VersionPurgeStatusInternal string // stringified representation of all version purge statuses
|
||||||
ReplicateDecisionStr string // stringified representation of replication decision for each target
|
ReplicateDecisionStr string // stringified representation of replication decision for each target
|
||||||
Targets map[string]replication.StatusType // map of ARN->replication status for ongoing replication activity
|
Targets map[string]replication.StatusType // map of ARN->replication status for ongoing replication activity
|
||||||
|
@ -495,7 +495,7 @@ func replicateDelete(ctx context.Context, dobj DeletedObjectReplicationInfo, obj
|
|||||||
}
|
}
|
||||||
tgtClnt := globalBucketTargetSys.GetRemoteTargetClient(bucket, tgtEntry.Arn)
|
tgtClnt := globalBucketTargetSys.GetRemoteTargetClient(bucket, tgtEntry.Arn)
|
||||||
if tgtClnt == nil {
|
if tgtClnt == nil {
|
||||||
// Skip stale targets if any and log them to be missing atleast once.
|
// Skip stale targets if any and log them to be missing at least once.
|
||||||
logger.LogOnceIf(ctx, fmt.Errorf("failed to get target for bucket:%s arn:%s", bucket, tgtEntry.Arn), tgtEntry.Arn)
|
logger.LogOnceIf(ctx, fmt.Errorf("failed to get target for bucket:%s arn:%s", bucket, tgtEntry.Arn), tgtEntry.Arn)
|
||||||
sendEvent(eventArgs{
|
sendEvent(eventArgs{
|
||||||
EventName: event.ObjectReplicationNotTracked,
|
EventName: event.ObjectReplicationNotTracked,
|
||||||
@ -2997,7 +2997,7 @@ func (p *ReplicationPool) startResyncRoutine(ctx context.Context, buckets []Buck
|
|||||||
}
|
}
|
||||||
duration := time.Duration(r.Float64() * float64(time.Minute))
|
duration := time.Duration(r.Float64() * float64(time.Minute))
|
||||||
if duration < time.Second {
|
if duration < time.Second {
|
||||||
// Make sure to sleep atleast a second to avoid high CPU ticks.
|
// Make sure to sleep at least a second to avoid high CPU ticks.
|
||||||
duration = time.Second
|
duration = time.Second
|
||||||
}
|
}
|
||||||
time.Sleep(duration)
|
time.Sleep(duration)
|
||||||
|
@ -60,7 +60,7 @@ func initCallhome(ctx context.Context, objAPI ObjectLayer) {
|
|||||||
// sleep for some time and try again.
|
// sleep for some time and try again.
|
||||||
duration := time.Duration(r.Float64() * float64(globalCallhomeConfig.FrequencyDur()))
|
duration := time.Duration(r.Float64() * float64(globalCallhomeConfig.FrequencyDur()))
|
||||||
if duration < time.Second {
|
if duration < time.Second {
|
||||||
// Make sure to sleep atleast a second to avoid high CPU ticks.
|
// Make sure to sleep at least a second to avoid high CPU ticks.
|
||||||
duration = time.Second
|
duration = time.Second
|
||||||
}
|
}
|
||||||
time.Sleep(duration)
|
time.Sleep(duration)
|
||||||
|
@ -847,7 +847,7 @@ func loadRootCredentials() {
|
|||||||
// It depends on KMS env variables and global cli flags.
|
// It depends on KMS env variables and global cli flags.
|
||||||
func handleKMSConfig() {
|
func handleKMSConfig() {
|
||||||
if env.IsSet(kms.EnvKMSSecretKey) && env.IsSet(kms.EnvKESEndpoint) {
|
if env.IsSet(kms.EnvKMSSecretKey) && env.IsSet(kms.EnvKESEndpoint) {
|
||||||
logger.Fatal(errors.New("ambigious KMS configuration"), fmt.Sprintf("The environment contains %q as well as %q", kms.EnvKMSSecretKey, kms.EnvKESEndpoint))
|
logger.Fatal(errors.New("ambiguous KMS configuration"), fmt.Sprintf("The environment contains %q as well as %q", kms.EnvKMSSecretKey, kms.EnvKESEndpoint))
|
||||||
}
|
}
|
||||||
|
|
||||||
if env.IsSet(kms.EnvKMSSecretKey) {
|
if env.IsSet(kms.EnvKMSSecretKey) {
|
||||||
@ -860,10 +860,10 @@ func handleKMSConfig() {
|
|||||||
if env.IsSet(kms.EnvKESEndpoint) {
|
if env.IsSet(kms.EnvKESEndpoint) {
|
||||||
if env.IsSet(kms.EnvKESAPIKey) {
|
if env.IsSet(kms.EnvKESAPIKey) {
|
||||||
if env.IsSet(kms.EnvKESClientKey) {
|
if env.IsSet(kms.EnvKESClientKey) {
|
||||||
logger.Fatal(errors.New("ambigious KMS configuration"), fmt.Sprintf("The environment contains %q as well as %q", kms.EnvKESAPIKey, kms.EnvKESClientKey))
|
logger.Fatal(errors.New("ambiguous KMS configuration"), fmt.Sprintf("The environment contains %q as well as %q", kms.EnvKESAPIKey, kms.EnvKESClientKey))
|
||||||
}
|
}
|
||||||
if env.IsSet(kms.EnvKESClientCert) {
|
if env.IsSet(kms.EnvKESClientCert) {
|
||||||
logger.Fatal(errors.New("ambigious KMS configuration"), fmt.Sprintf("The environment contains %q as well as %q", kms.EnvKESAPIKey, kms.EnvKESClientCert))
|
logger.Fatal(errors.New("ambiguous KMS configuration"), fmt.Sprintf("The environment contains %q as well as %q", kms.EnvKESAPIKey, kms.EnvKESClientCert))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if !env.IsSet(kms.EnvKESKeyName) {
|
if !env.IsSet(kms.EnvKESKeyName) {
|
||||||
|
@ -499,7 +499,7 @@ func lookupConfigs(s config.Config, objAPI ObjectLayer) {
|
|||||||
|
|
||||||
if len(globalDomainNames) != 0 && !globalDomainIPs.IsEmpty() && globalEtcdClient != nil {
|
if len(globalDomainNames) != 0 && !globalDomainIPs.IsEmpty() && globalEtcdClient != nil {
|
||||||
if globalDNSConfig != nil {
|
if globalDNSConfig != nil {
|
||||||
// if global DNS is already configured, indicate with a warning, incase
|
// if global DNS is already configured, indicate with a warning, in case
|
||||||
// users are confused.
|
// users are confused.
|
||||||
logger.LogIf(ctx, fmt.Errorf("DNS store is already configured with %s, etcd is not used for DNS store", globalDNSConfig))
|
logger.LogIf(ctx, fmt.Errorf("DNS store is already configured with %s, etcd is not used for DNS store", globalDNSConfig))
|
||||||
} else {
|
} else {
|
||||||
|
@ -79,7 +79,7 @@ func initDataScanner(ctx context.Context, objAPI ObjectLayer) {
|
|||||||
runDataScanner(ctx, objAPI)
|
runDataScanner(ctx, objAPI)
|
||||||
duration := time.Duration(r.Float64() * float64(scannerCycle.Load()))
|
duration := time.Duration(r.Float64() * float64(scannerCycle.Load()))
|
||||||
if duration < time.Second {
|
if duration < time.Second {
|
||||||
// Make sure to sleep atleast a second to avoid high CPU ticks.
|
// Make sure to sleep at least a second to avoid high CPU ticks.
|
||||||
duration = time.Second
|
duration = time.Second
|
||||||
}
|
}
|
||||||
time.Sleep(duration)
|
time.Sleep(duration)
|
||||||
@ -710,7 +710,7 @@ func (f *folderScanner) scanFolder(ctx context.Context, folder cachedFolder, int
|
|||||||
partial: func(entries metaCacheEntries, errs []error) {
|
partial: func(entries metaCacheEntries, errs []error) {
|
||||||
entry, ok := entries.resolve(&resolver)
|
entry, ok := entries.resolve(&resolver)
|
||||||
if !ok {
|
if !ok {
|
||||||
// check if we can get one entry atleast
|
// check if we can get one entry at least
|
||||||
// proceed to heal nonetheless, since
|
// proceed to heal nonetheless, since
|
||||||
// this object might be dangling.
|
// this object might be dangling.
|
||||||
entry, _ = entries.firstFound()
|
entry, _ = entries.firstFound()
|
||||||
@ -1471,7 +1471,7 @@ func (d *dynamicSleeper) Sleep(ctx context.Context, base time.Duration) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Update the current settings and cycle all waiting.
|
// Update the current settings and cycle all waiting.
|
||||||
// Parameters are the same as in the contructor.
|
// Parameters are the same as in the constructor.
|
||||||
func (d *dynamicSleeper) Update(factor float64, maxWait time.Duration) error {
|
func (d *dynamicSleeper) Update(factor float64, maxWait time.Duration) error {
|
||||||
d.mu.Lock()
|
d.mu.Lock()
|
||||||
defer d.mu.Unlock()
|
defer d.mu.Unlock()
|
||||||
|
@ -980,7 +980,7 @@ type objectIO interface {
|
|||||||
// The loader is optimistic and has no locking, but tries 5 times before giving up.
|
// The loader is optimistic and has no locking, but tries 5 times before giving up.
|
||||||
// If the object is not found, a nil error with empty data usage cache is returned.
|
// If the object is not found, a nil error with empty data usage cache is returned.
|
||||||
func (d *dataUsageCache) load(ctx context.Context, store objectIO, name string) error {
|
func (d *dataUsageCache) load(ctx context.Context, store objectIO, name string) error {
|
||||||
// By defaut, empty data usage cache
|
// By default, empty data usage cache
|
||||||
*d = dataUsageCache{}
|
*d = dataUsageCache{}
|
||||||
|
|
||||||
load := func(name string, timeout time.Duration) (bool, error) {
|
load := func(name string, timeout time.Duration) (bool, error) {
|
||||||
|
@ -664,7 +664,7 @@ func (d *DecryptBlocksReader) buildDecrypter(partID int) error {
|
|||||||
mac.Write(partIDbin[:])
|
mac.Write(partIDbin[:])
|
||||||
partEncryptionKey := mac.Sum(nil)
|
partEncryptionKey := mac.Sum(nil)
|
||||||
|
|
||||||
// Limit the reader, so the decryptor doesnt receive bytes
|
// Limit the reader, so the decryptor doesn't receive bytes
|
||||||
// from the next part (different DARE stream)
|
// from the next part (different DARE stream)
|
||||||
encLenToRead := d.parts[d.partIndex].Size - d.partEncRelOffset
|
encLenToRead := d.parts[d.partIndex].Size - d.partEncRelOffset
|
||||||
decrypter, err := newDecryptReaderWithObjectKey(io.LimitReader(d.reader, encLenToRead), partEncryptionKey, d.startSeqNum)
|
decrypter, err := newDecryptReaderWithObjectKey(io.LimitReader(d.reader, encLenToRead), partEncryptionKey, d.startSeqNum)
|
||||||
@ -751,7 +751,7 @@ func (o ObjectInfo) DecryptedSize() (int64, error) {
|
|||||||
// However, DecryptETag does not try to decrypt the ETag if
|
// However, DecryptETag does not try to decrypt the ETag if
|
||||||
// it consists of a 128 bit hex value (32 hex chars) and exactly
|
// it consists of a 128 bit hex value (32 hex chars) and exactly
|
||||||
// one '-' followed by a 32-bit number.
|
// one '-' followed by a 32-bit number.
|
||||||
// This special case adresses randomly-generated ETags generated
|
// This special case addresses randomly-generated ETags generated
|
||||||
// by the MinIO server when running in non-compat mode. These
|
// by the MinIO server when running in non-compat mode. These
|
||||||
// random ETags are not encrypt.
|
// random ETags are not encrypt.
|
||||||
//
|
//
|
||||||
|
@ -158,7 +158,7 @@ func (p *parallelReader) Read(dst [][]byte) ([][]byte, error) {
|
|||||||
bufIdx := p.readerToBuf[i]
|
bufIdx := p.readerToBuf[i]
|
||||||
if p.buf[bufIdx] == nil {
|
if p.buf[bufIdx] == nil {
|
||||||
// Reading first time on this disk, hence the buffer needs to be allocated.
|
// Reading first time on this disk, hence the buffer needs to be allocated.
|
||||||
// Subsequent reads will re-use this buffer.
|
// Subsequent reads will reuse this buffer.
|
||||||
p.buf[bufIdx] = make([]byte, p.shardSize)
|
p.buf[bufIdx] = make([]byte, p.shardSize)
|
||||||
}
|
}
|
||||||
// For the last shard, the shardsize might be less than previous shard sizes.
|
// For the last shard, the shardsize might be less than previous shard sizes.
|
||||||
|
@ -144,7 +144,7 @@ func TestErasureDecode(t *testing.T) {
|
|||||||
}
|
}
|
||||||
if err == nil {
|
if err == nil {
|
||||||
if content := writer.Bytes(); !bytes.Equal(content, data[test.offset:test.offset+test.length]) {
|
if content := writer.Bytes(); !bytes.Equal(content, data[test.offset:test.offset+test.length]) {
|
||||||
t.Errorf("Test %d: read retruns wrong file content.", i)
|
t.Errorf("Test %d: read returns wrong file content.", i)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -31,7 +31,7 @@ import (
|
|||||||
|
|
||||||
// Returns the latest updated FileInfo files and error in case of failure.
|
// Returns the latest updated FileInfo files and error in case of failure.
|
||||||
func getLatestFileInfo(ctx context.Context, partsMetadata []FileInfo, defaultParityCount int, errs []error) (FileInfo, error) {
|
func getLatestFileInfo(ctx context.Context, partsMetadata []FileInfo, defaultParityCount int, errs []error) (FileInfo, error) {
|
||||||
// There should be atleast half correct entries, if not return failure
|
// There should be at least half correct entries, if not return failure
|
||||||
expectedRQuorum := len(partsMetadata) / 2
|
expectedRQuorum := len(partsMetadata) / 2
|
||||||
if defaultParityCount == 0 {
|
if defaultParityCount == 0 {
|
||||||
// if parity count is '0', we expected all entries to be present.
|
// if parity count is '0', we expected all entries to be present.
|
||||||
@ -57,7 +57,7 @@ func getLatestFileInfo(ctx context.Context, partsMetadata []FileInfo, defaultPar
|
|||||||
return FileInfo{}, errErasureReadQuorum
|
return FileInfo{}, errErasureReadQuorum
|
||||||
}
|
}
|
||||||
|
|
||||||
// Interate through all the modTimes and count the FileInfo(s) with latest time.
|
// Iterate through all the modTimes and count the FileInfo(s) with latest time.
|
||||||
for index, t := range modTimes {
|
for index, t := range modTimes {
|
||||||
if partsMetadata[index].IsValid() && t.Equal(modTime) {
|
if partsMetadata[index].IsValid() && t.Equal(modTime) {
|
||||||
latestFileInfo = partsMetadata[index]
|
latestFileInfo = partsMetadata[index]
|
||||||
|
@ -108,7 +108,7 @@ func (er erasureObjects) listAndHeal(bucket, prefix string, scanMode madmin.Heal
|
|||||||
partial: func(entries metaCacheEntries, _ []error) {
|
partial: func(entries metaCacheEntries, _ []error) {
|
||||||
entry, ok := entries.resolve(&resolver)
|
entry, ok := entries.resolve(&resolver)
|
||||||
if !ok {
|
if !ok {
|
||||||
// check if we can get one entry atleast
|
// check if we can get one entry at least
|
||||||
// proceed to heal nonetheless.
|
// proceed to heal nonetheless.
|
||||||
entry, _ = entries.firstFound()
|
entry, _ = entries.firstFound()
|
||||||
}
|
}
|
||||||
|
@ -1067,7 +1067,7 @@ func TestHealObjectCorruptedPools(t *testing.T) {
|
|||||||
for i := 0; i < (nfi.Erasure.DataBlocks + nfi.Erasure.ParityBlocks); i++ {
|
for i := 0; i < (nfi.Erasure.DataBlocks + nfi.Erasure.ParityBlocks); i++ {
|
||||||
stats, _ := erasureDisks[i].StatInfoFile(context.Background(), bucket, pathJoin(object, xlStorageFormatFile), false)
|
stats, _ := erasureDisks[i].StatInfoFile(context.Background(), bucket, pathJoin(object, xlStorageFormatFile), false)
|
||||||
if len(stats) != 0 {
|
if len(stats) != 0 {
|
||||||
t.Errorf("Expected xl.meta file to be not present, but succeeeded")
|
t.Errorf("Expected xl.meta file to be not present, but succeeded")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -64,7 +64,7 @@ func reduceErrs(errs []error, ignoredErrs []error) (maxCount int, maxErr error)
|
|||||||
if IsErrIgnored(err, ignoredErrs...) {
|
if IsErrIgnored(err, ignoredErrs...) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
// Errors due to context cancelation may be wrapped - group them by context.Canceled.
|
// Errors due to context cancellation may be wrapped - group them by context.Canceled.
|
||||||
if errors.Is(err, context.Canceled) {
|
if errors.Is(err, context.Canceled) {
|
||||||
errorCounts[context.Canceled]++
|
errorCounts[context.Canceled]++
|
||||||
continue
|
continue
|
||||||
|
@ -94,7 +94,7 @@ func TestReduceErrs(t *testing.T) {
|
|||||||
},
|
},
|
||||||
nil, nil,
|
nil, nil,
|
||||||
},
|
},
|
||||||
// Checks if wrapped context cancelation errors are grouped as one.
|
// Checks if wrapped context cancellation errors are grouped as one.
|
||||||
{canceledErrs, nil, context.Canceled},
|
{canceledErrs, nil, context.Canceled},
|
||||||
}
|
}
|
||||||
// Validates list of all the testcases for returning valid errors.
|
// Validates list of all the testcases for returning valid errors.
|
||||||
|
@ -483,7 +483,7 @@ func listObjectParities(partsMetadata []FileInfo, errs []error) (parities []int)
|
|||||||
// readQuorum is the min required disks to read data.
|
// readQuorum is the min required disks to read data.
|
||||||
// writeQuorum is the min required disks to write data.
|
// writeQuorum is the min required disks to write data.
|
||||||
func objectQuorumFromMeta(ctx context.Context, partsMetaData []FileInfo, errs []error, defaultParityCount int) (objectReadQuorum, objectWriteQuorum int, err error) {
|
func objectQuorumFromMeta(ctx context.Context, partsMetaData []FileInfo, errs []error, defaultParityCount int) (objectReadQuorum, objectWriteQuorum int, err error) {
|
||||||
// There should be atleast half correct entries, if not return failure
|
// There should be at least half correct entries, if not return failure
|
||||||
expectedRQuorum := len(partsMetaData) / 2
|
expectedRQuorum := len(partsMetaData) / 2
|
||||||
if defaultParityCount == 0 {
|
if defaultParityCount == 0 {
|
||||||
// if parity count is '0', we expected all entries to be present.
|
// if parity count is '0', we expected all entries to be present.
|
||||||
|
@ -103,7 +103,7 @@ func (er erasureObjects) checkUploadIDExists(ctx context.Context, bucket, object
|
|||||||
return fi, partsMetadata, err
|
return fi, partsMetadata, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Removes part.meta given by partName belonging to a mulitpart upload from minioMetaBucket
|
// Removes part.meta given by partName belonging to a multipart upload from minioMetaBucket
|
||||||
func (er erasureObjects) removePartMeta(bucket, object, uploadID, dataDir string, partNumber int) {
|
func (er erasureObjects) removePartMeta(bucket, object, uploadID, dataDir string, partNumber int) {
|
||||||
uploadIDPath := er.getUploadIDDir(bucket, object, uploadID)
|
uploadIDPath := er.getUploadIDDir(bucket, object, uploadID)
|
||||||
curpartPath := pathJoin(uploadIDPath, dataDir, fmt.Sprintf("part.%d", partNumber))
|
curpartPath := pathJoin(uploadIDPath, dataDir, fmt.Sprintf("part.%d", partNumber))
|
||||||
@ -127,7 +127,7 @@ func (er erasureObjects) removePartMeta(bucket, object, uploadID, dataDir string
|
|||||||
g.Wait()
|
g.Wait()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Removes part given by partName belonging to a mulitpart upload from minioMetaBucket
|
// Removes part given by partName belonging to a multipart upload from minioMetaBucket
|
||||||
func (er erasureObjects) removeObjectPart(bucket, object, uploadID, dataDir string, partNumber int) {
|
func (er erasureObjects) removeObjectPart(bucket, object, uploadID, dataDir string, partNumber int) {
|
||||||
uploadIDPath := er.getUploadIDDir(bucket, object, uploadID)
|
uploadIDPath := er.getUploadIDDir(bucket, object, uploadID)
|
||||||
curpartPath := pathJoin(uploadIDPath, dataDir, fmt.Sprintf("part.%d", partNumber))
|
curpartPath := pathJoin(uploadIDPath, dataDir, fmt.Sprintf("part.%d", partNumber))
|
||||||
@ -656,7 +656,7 @@ func (er erasureObjects) PutObjectPart(ctx context.Context, bucket, object, uplo
|
|||||||
var buffer []byte
|
var buffer []byte
|
||||||
switch size := data.Size(); {
|
switch size := data.Size(); {
|
||||||
case size == 0:
|
case size == 0:
|
||||||
buffer = make([]byte, 1) // Allocate atleast a byte to reach EOF
|
buffer = make([]byte, 1) // Allocate at least a byte to reach EOF
|
||||||
case size == -1:
|
case size == -1:
|
||||||
if size := data.ActualSize(); size > 0 && size < fi.Erasure.BlockSize {
|
if size := data.ActualSize(); size > 0 && size < fi.Erasure.BlockSize {
|
||||||
// Account for padding and forced compression overhead and encryption.
|
// Account for padding and forced compression overhead and encryption.
|
||||||
|
@ -955,7 +955,7 @@ func (er erasureObjects) getObjectFileInfo(ctx context.Context, bucket, object s
|
|||||||
for i := range onlineMeta {
|
for i := range onlineMeta {
|
||||||
// verify metadata is valid, it has similar erasure info
|
// verify metadata is valid, it has similar erasure info
|
||||||
// as well as common modtime, if modtime is not possible
|
// as well as common modtime, if modtime is not possible
|
||||||
// verify if it has common "etag" atleast.
|
// verify if it has common "etag" at least.
|
||||||
if onlineMeta[i].IsValid() && onlineMeta[i].Erasure.Equal(fi.Erasure) {
|
if onlineMeta[i].IsValid() && onlineMeta[i].Erasure.Equal(fi.Erasure) {
|
||||||
ok := onlineMeta[i].ModTime.Equal(modTime)
|
ok := onlineMeta[i].ModTime.Equal(modTime)
|
||||||
if modTime.IsZero() || modTime.Equal(timeSentinel) {
|
if modTime.IsZero() || modTime.Equal(timeSentinel) {
|
||||||
@ -1150,7 +1150,7 @@ func (er erasureObjects) putMetacacheObject(ctx context.Context, key string, r *
|
|||||||
var buffer []byte
|
var buffer []byte
|
||||||
switch size := data.Size(); {
|
switch size := data.Size(); {
|
||||||
case size == 0:
|
case size == 0:
|
||||||
buffer = make([]byte, 1) // Allocate atleast a byte to reach EOF
|
buffer = make([]byte, 1) // Allocate at least a byte to reach EOF
|
||||||
case size >= fi.Erasure.BlockSize:
|
case size >= fi.Erasure.BlockSize:
|
||||||
buffer = globalBytePoolCap.Get()
|
buffer = globalBytePoolCap.Get()
|
||||||
defer globalBytePoolCap.Put(buffer)
|
defer globalBytePoolCap.Put(buffer)
|
||||||
@ -1401,7 +1401,7 @@ func (er erasureObjects) putObject(ctx context.Context, bucket string, object st
|
|||||||
var buffer []byte
|
var buffer []byte
|
||||||
switch size := data.Size(); {
|
switch size := data.Size(); {
|
||||||
case size == 0:
|
case size == 0:
|
||||||
buffer = make([]byte, 1) // Allocate atleast a byte to reach EOF
|
buffer = make([]byte, 1) // Allocate at least a byte to reach EOF
|
||||||
case size >= fi.Erasure.BlockSize || size == -1:
|
case size >= fi.Erasure.BlockSize || size == -1:
|
||||||
buffer = globalBytePoolCap.Get()
|
buffer = globalBytePoolCap.Get()
|
||||||
defer globalBytePoolCap.Put(buffer)
|
defer globalBytePoolCap.Put(buffer)
|
||||||
|
@ -799,7 +799,7 @@ func TestPutObjectNoQuorumSmall(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Test PutObject twice, one small and another bigger
|
// Test PutObject twice, one small and another bigger
|
||||||
// than small data thresold and checks reading them again
|
// than small data threshold and checks reading them again
|
||||||
func TestPutObjectSmallInlineData(t *testing.T) {
|
func TestPutObjectSmallInlineData(t *testing.T) {
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
@ -2260,7 +2260,7 @@ const (
|
|||||||
vmware = "VMWare"
|
vmware = "VMWare"
|
||||||
)
|
)
|
||||||
|
|
||||||
// HealthOptions takes input options to return sepcific information
|
// HealthOptions takes input options to return specific information
|
||||||
type HealthOptions struct {
|
type HealthOptions struct {
|
||||||
Maintenance bool
|
Maintenance bool
|
||||||
DeploymentType string
|
DeploymentType string
|
||||||
@ -2435,7 +2435,7 @@ func (z *erasureServerPools) PutObjectMetadata(ctx context.Context, bucket, obje
|
|||||||
}
|
}
|
||||||
|
|
||||||
opts.MetadataChg = true
|
opts.MetadataChg = true
|
||||||
// We don't know the size here set 1GiB atleast.
|
// We don't know the size here set 1GiB at least.
|
||||||
idx, err := z.getPoolIdxExistingWithOpts(ctx, bucket, object, opts)
|
idx, err := z.getPoolIdxExistingWithOpts(ctx, bucket, object, opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return ObjectInfo{}, err
|
return ObjectInfo{}, err
|
||||||
@ -2453,7 +2453,7 @@ func (z *erasureServerPools) PutObjectTags(ctx context.Context, bucket, object s
|
|||||||
|
|
||||||
opts.MetadataChg = true
|
opts.MetadataChg = true
|
||||||
|
|
||||||
// We don't know the size here set 1GiB atleast.
|
// We don't know the size here set 1GiB at least.
|
||||||
idx, err := z.getPoolIdxExistingWithOpts(ctx, bucket, object, opts)
|
idx, err := z.getPoolIdxExistingWithOpts(ctx, bucket, object, opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return ObjectInfo{}, err
|
return ObjectInfo{}, err
|
||||||
|
@ -85,7 +85,7 @@ func writeDataBlocks(ctx context.Context, dst io.Writer, enBlocks [][]byte, data
|
|||||||
if write < int64(len(block)) {
|
if write < int64(len(block)) {
|
||||||
n, err := dst.Write(block[:write])
|
n, err := dst.Write(block[:write])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// The writer will be closed incase of range queries, which will emit ErrClosedPipe.
|
// The writer will be closed in case of range queries, which will emit ErrClosedPipe.
|
||||||
// The reader pipe might be closed at ListObjects io.EOF ignore it.
|
// The reader pipe might be closed at ListObjects io.EOF ignore it.
|
||||||
if err != io.ErrClosedPipe && err != io.EOF {
|
if err != io.ErrClosedPipe && err != io.EOF {
|
||||||
logger.LogIf(ctx, err)
|
logger.LogIf(ctx, err)
|
||||||
@ -99,7 +99,7 @@ func writeDataBlocks(ctx context.Context, dst io.Writer, enBlocks [][]byte, data
|
|||||||
// Copy the block.
|
// Copy the block.
|
||||||
n, err := dst.Write(block)
|
n, err := dst.Write(block)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// The writer will be closed incase of range queries, which will emit ErrClosedPipe.
|
// The writer will be closed in case of range queries, which will emit ErrClosedPipe.
|
||||||
// The reader pipe might be closed at ListObjects io.EOF ignore it.
|
// The reader pipe might be closed at ListObjects io.EOF ignore it.
|
||||||
if err != io.ErrClosedPipe && err != io.EOF {
|
if err != io.ErrClosedPipe && err != io.EOF {
|
||||||
logger.LogIf(ctx, err)
|
logger.LogIf(ctx, err)
|
||||||
|
@ -483,7 +483,7 @@ func TestGetErasureID(t *testing.T) {
|
|||||||
|
|
||||||
formats[2].ID = "bad-id"
|
formats[2].ID = "bad-id"
|
||||||
if _, err = formatErasureGetDeploymentID(quorumFormat, formats); !errors.Is(err, errCorruptedFormat) {
|
if _, err = formatErasureGetDeploymentID(quorumFormat, formats); !errors.Is(err, errCorruptedFormat) {
|
||||||
t.Fatalf("Unexpect error %s", err)
|
t.Fatalf("Unexpected error %s", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -50,7 +50,7 @@ func (log *minioLogger) Printf(sessionID string, format string, v ...interface{}
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// PrintCommand impelment Logger
|
// PrintCommand implement Logger
|
||||||
func (log *minioLogger) PrintCommand(sessionID string, command string, params string) {
|
func (log *minioLogger) PrintCommand(sessionID string, command string, params string) {
|
||||||
if serverDebugLog {
|
if serverDebugLog {
|
||||||
if command == "PASS" {
|
if command == "PASS" {
|
||||||
@ -61,7 +61,7 @@ func (log *minioLogger) PrintCommand(sessionID string, command string, params st
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// PrintResponse impelment Logger
|
// PrintResponse implement Logger
|
||||||
func (log *minioLogger) PrintResponse(sessionID string, code int, message string) {
|
func (log *minioLogger) PrintResponse(sessionID string, code int, message string) {
|
||||||
if serverDebugLog {
|
if serverDebugLog {
|
||||||
logger.Info("%s < %d %s", sessionID, code, message)
|
logger.Info("%s < %d %s", sessionID, code, message)
|
||||||
|
@ -263,7 +263,7 @@ func isKMSReq(r *http.Request) bool {
|
|||||||
|
|
||||||
// Supported Amz date headers.
|
// Supported Amz date headers.
|
||||||
var amzDateHeaders = []string{
|
var amzDateHeaders = []string{
|
||||||
// Do not chane this order, x-amz-date value should be
|
// Do not change this order, x-amz-date value should be
|
||||||
// validated first.
|
// validated first.
|
||||||
"x-amz-date",
|
"x-amz-date",
|
||||||
"date",
|
"date",
|
||||||
|
@ -433,7 +433,7 @@ func (er *erasureObjects) healErasureSet(ctx context.Context, buckets []string,
|
|||||||
partial: func(entries metaCacheEntries, _ []error) {
|
partial: func(entries metaCacheEntries, _ []error) {
|
||||||
entry, ok := entries.resolve(&resolver)
|
entry, ok := entries.resolve(&resolver)
|
||||||
if !ok {
|
if !ok {
|
||||||
// check if we can get one entry atleast
|
// check if we can get one entry at least
|
||||||
// proceed to heal nonetheless.
|
// proceed to heal nonetheless.
|
||||||
entry, _ = entries.firstFound()
|
entry, _ = entries.firstFound()
|
||||||
}
|
}
|
||||||
|
@ -293,7 +293,7 @@ var (
|
|||||||
// Global server's network statistics
|
// Global server's network statistics
|
||||||
globalConnStats = newConnStats()
|
globalConnStats = newConnStats()
|
||||||
|
|
||||||
// Global HTTP request statisitics
|
// Global HTTP request statistics
|
||||||
globalHTTPStats = newHTTPStats()
|
globalHTTPStats = newHTTPStats()
|
||||||
|
|
||||||
// Global bucket network and API statistics
|
// Global bucket network and API statistics
|
||||||
|
@ -206,7 +206,7 @@ func getReqAccessCred(r *http.Request, region string) (cred auth.Credentials) {
|
|||||||
return cred
|
return cred
|
||||||
}
|
}
|
||||||
|
|
||||||
// Extract request params to be sent with event notifiation.
|
// Extract request params to be sent with event notification.
|
||||||
func extractReqParams(r *http.Request) map[string]string {
|
func extractReqParams(r *http.Request) map[string]string {
|
||||||
if r == nil {
|
if r == nil {
|
||||||
return nil
|
return nil
|
||||||
@ -237,7 +237,7 @@ func extractReqParams(r *http.Request) map[string]string {
|
|||||||
return m
|
return m
|
||||||
}
|
}
|
||||||
|
|
||||||
// Extract response elements to be sent with event notifiation.
|
// Extract response elements to be sent with event notification.
|
||||||
func extractRespElements(w http.ResponseWriter) map[string]string {
|
func extractRespElements(w http.ResponseWriter) map[string]string {
|
||||||
if w == nil {
|
if w == nil {
|
||||||
return map[string]string{}
|
return map[string]string{}
|
||||||
|
@ -152,7 +152,7 @@ func ReadinessCheckHandler(w http.ResponseWriter, r *http.Request) {
|
|||||||
|
|
||||||
// LivenessCheckHandler checks whether MinIO is up. It differs from the
|
// LivenessCheckHandler checks whether MinIO is up. It differs from the
|
||||||
// readiness handler since a failing liveness check causes pod restarts
|
// readiness handler since a failing liveness check causes pod restarts
|
||||||
// in K8S enviromnents. Therefore, it does not contact external systems.
|
// in K8S environments. Therefore, it does not contact external systems.
|
||||||
func LivenessCheckHandler(w http.ResponseWriter, r *http.Request) {
|
func LivenessCheckHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
if objLayer := newObjectLayerFn(); objLayer == nil {
|
if objLayer := newObjectLayerFn(); objLayer == nil {
|
||||||
w.Header().Set(xhttp.MinIOServerStatus, unavailable) // Service not initialized yet
|
w.Header().Set(xhttp.MinIOServerStatus, unavailable) // Service not initialized yet
|
||||||
|
@ -140,7 +140,7 @@ func pickRelevantGoroutines() (gs []string) {
|
|||||||
buf := debug.Stack()
|
buf := debug.Stack()
|
||||||
// runtime stack of go routines will be listed with 2 blank spaces between each of them, so split on "\n\n" .
|
// runtime stack of go routines will be listed with 2 blank spaces between each of them, so split on "\n\n" .
|
||||||
for _, g := range strings.Split(string(buf), "\n\n") {
|
for _, g := range strings.Split(string(buf), "\n\n") {
|
||||||
// Again split on a new line, the first line of the second half contaisn the info about the go routine.
|
// Again split on a new line, the first line of the second half contains the info about the go routine.
|
||||||
sl := strings.SplitN(g, "\n", 2)
|
sl := strings.SplitN(g, "\n", 2)
|
||||||
if len(sl) != 2 {
|
if len(sl) != 2 {
|
||||||
continue
|
continue
|
||||||
|
@ -47,7 +47,7 @@ func initLicenseUpdateJob(ctx context.Context, objAPI ObjectLayer) {
|
|||||||
// sleep for some time and try again.
|
// sleep for some time and try again.
|
||||||
duration := time.Duration(r.Float64() * float64(time.Hour))
|
duration := time.Duration(r.Float64() * float64(time.Hour))
|
||||||
if duration < time.Second {
|
if duration < time.Second {
|
||||||
// Make sure to sleep atleast a second to avoid high CPU ticks.
|
// Make sure to sleep at least a second to avoid high CPU ticks.
|
||||||
duration = time.Second
|
duration = time.Second
|
||||||
}
|
}
|
||||||
time.Sleep(duration)
|
time.Sleep(duration)
|
||||||
|
@ -34,7 +34,7 @@ type lockRequesterInfo struct {
|
|||||||
UID string // UID to uniquely identify request of client.
|
UID string // UID to uniquely identify request of client.
|
||||||
Timestamp time.Time // Timestamp set at the time of initialization.
|
Timestamp time.Time // Timestamp set at the time of initialization.
|
||||||
TimeLastRefresh time.Time // Timestamp for last lock refresh.
|
TimeLastRefresh time.Time // Timestamp for last lock refresh.
|
||||||
Source string // Contains line, function and filename reqesting the lock.
|
Source string // Contains line, function and filename requesting the lock.
|
||||||
Group bool // indicates if it was a group lock.
|
Group bool // indicates if it was a group lock.
|
||||||
// Owner represents the UUID of the owner who originally requested the lock
|
// Owner represents the UUID of the owner who originally requested the lock
|
||||||
// useful in expiry.
|
// useful in expiry.
|
||||||
|
@ -111,9 +111,9 @@ func getHostIP(host string) (ipList set.StringSet, err error) {
|
|||||||
return ipList, err
|
return ipList, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// sortIPs - sort ips based on higher octects.
|
// sortIPs - sort ips based on higher octets.
|
||||||
// The logic to sort by last octet is implemented to
|
// The logic to sort by last octet is implemented to
|
||||||
// prefer CIDRs with higher octects, this in-turn skips the
|
// prefer CIDRs with higher octets, this in-turn skips the
|
||||||
// localhost/loopback address to be not preferred as the
|
// localhost/loopback address to be not preferred as the
|
||||||
// first ip on the list. Subsequently this list helps us print
|
// first ip on the list. Subsequently this list helps us print
|
||||||
// a user friendly message with appropriate values.
|
// a user friendly message with appropriate values.
|
||||||
|
@ -428,7 +428,7 @@ func (lm ListMultipartsInfo) Lookup(uploadID string) bool {
|
|||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
// ListMultipartsInfo - represnets bucket resources for incomplete multipart uploads.
|
// ListMultipartsInfo - represents bucket resources for incomplete multipart uploads.
|
||||||
type ListMultipartsInfo struct {
|
type ListMultipartsInfo struct {
|
||||||
// Together with upload-id-marker, this parameter specifies the multipart upload
|
// Together with upload-id-marker, this parameter specifies the multipart upload
|
||||||
// after which listing should begin.
|
// after which listing should begin.
|
||||||
|
@ -539,7 +539,7 @@ func (e ObjectNameTooLong) Error() string {
|
|||||||
|
|
||||||
// Error returns string an error formatted as the given text.
|
// Error returns string an error formatted as the given text.
|
||||||
func (e ObjectNamePrefixAsSlash) Error() string {
|
func (e ObjectNamePrefixAsSlash) Error() string {
|
||||||
return "Object name contains forward slash as pefix: " + e.Bucket + "/" + e.Object
|
return "Object name contains forward slash as prefix: " + e.Bucket + "/" + e.Object
|
||||||
}
|
}
|
||||||
|
|
||||||
// AllAccessDisabled All access to this object has been disabled
|
// AllAccessDisabled All access to this object has been disabled
|
||||||
|
@ -825,7 +825,7 @@ func _testListObjects(obj ObjectLayer, instanceType string, t1 TestErrHandler, v
|
|||||||
{"ad", "", "", "", 0, ListObjectsInfo{}, BucketNotFound{Bucket: "ad"}, false},
|
{"ad", "", "", "", 0, ListObjectsInfo{}, BucketNotFound{Bucket: "ad"}, false},
|
||||||
// Using an existing file for bucket name, but its not a directory (5).
|
// Using an existing file for bucket name, but its not a directory (5).
|
||||||
{"simple-file.txt", "", "", "", 0, ListObjectsInfo{}, BucketNotFound{Bucket: "simple-file.txt"}, false},
|
{"simple-file.txt", "", "", "", 0, ListObjectsInfo{}, BucketNotFound{Bucket: "simple-file.txt"}, false},
|
||||||
// Valid bucket names, but they donot exist (6-8).
|
// Valid bucket names, but they do not exist (6-8).
|
||||||
{"volatile-bucket-1", "", "", "", 0, ListObjectsInfo{}, BucketNotFound{Bucket: "volatile-bucket-1"}, false},
|
{"volatile-bucket-1", "", "", "", 0, ListObjectsInfo{}, BucketNotFound{Bucket: "volatile-bucket-1"}, false},
|
||||||
{"volatile-bucket-2", "", "", "", 0, ListObjectsInfo{}, BucketNotFound{Bucket: "volatile-bucket-2"}, false},
|
{"volatile-bucket-2", "", "", "", 0, ListObjectsInfo{}, BucketNotFound{Bucket: "volatile-bucket-2"}, false},
|
||||||
{"volatile-bucket-3", "", "", "", 0, ListObjectsInfo{}, BucketNotFound{Bucket: "volatile-bucket-3"}, false},
|
{"volatile-bucket-3", "", "", "", 0, ListObjectsInfo{}, BucketNotFound{Bucket: "volatile-bucket-3"}, false},
|
||||||
@ -1570,7 +1570,7 @@ func testListObjectVersions(obj ObjectLayer, instanceType string, t1 TestErrHand
|
|||||||
{"ad", "", "", "", 0, ListObjectsInfo{}, BucketNotFound{Bucket: "ad"}, false},
|
{"ad", "", "", "", 0, ListObjectsInfo{}, BucketNotFound{Bucket: "ad"}, false},
|
||||||
// Using an existing file for bucket name, but its not a directory (5).
|
// Using an existing file for bucket name, but its not a directory (5).
|
||||||
{"simple-file.txt", "", "", "", 0, ListObjectsInfo{}, BucketNotFound{Bucket: "simple-file.txt"}, false},
|
{"simple-file.txt", "", "", "", 0, ListObjectsInfo{}, BucketNotFound{Bucket: "simple-file.txt"}, false},
|
||||||
// Valid bucket names, but they donot exist (6-8).
|
// Valid bucket names, but they do not exist (6-8).
|
||||||
{"volatile-bucket-1", "", "", "", 0, ListObjectsInfo{}, BucketNotFound{Bucket: "volatile-bucket-1"}, false},
|
{"volatile-bucket-1", "", "", "", 0, ListObjectsInfo{}, BucketNotFound{Bucket: "volatile-bucket-1"}, false},
|
||||||
{"volatile-bucket-2", "", "", "", 0, ListObjectsInfo{}, BucketNotFound{Bucket: "volatile-bucket-2"}, false},
|
{"volatile-bucket-2", "", "", "", 0, ListObjectsInfo{}, BucketNotFound{Bucket: "volatile-bucket-2"}, false},
|
||||||
{"volatile-bucket-3", "", "", "", 0, ListObjectsInfo{}, BucketNotFound{Bucket: "volatile-bucket-3"}, false},
|
{"volatile-bucket-3", "", "", "", 0, ListObjectsInfo{}, BucketNotFound{Bucket: "volatile-bucket-3"}, false},
|
||||||
|
@ -52,16 +52,16 @@ func testObjectNewMultipartUpload(obj ObjectLayer, instanceType string, t TestEr
|
|||||||
}
|
}
|
||||||
|
|
||||||
errMsg := "Bucket not found: minio-bucket"
|
errMsg := "Bucket not found: minio-bucket"
|
||||||
// opearation expected to fail since the bucket on which NewMultipartUpload is being initiated doesn't exist.
|
// operation expected to fail since the bucket on which NewMultipartUpload is being initiated doesn't exist.
|
||||||
_, err = obj.NewMultipartUpload(context.Background(), bucket, object, opts)
|
_, err = obj.NewMultipartUpload(context.Background(), bucket, object, opts)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
t.Fatalf("%s: Expected to fail since the NewMultipartUpload is intialized on a non-existent bucket.", instanceType)
|
t.Fatalf("%s: Expected to fail since the NewMultipartUpload is initialized on a non-existent bucket.", instanceType)
|
||||||
}
|
}
|
||||||
if errMsg != err.Error() {
|
if errMsg != err.Error() {
|
||||||
t.Errorf("%s, Expected to fail with Error \"%s\", but instead found \"%s\".", instanceType, errMsg, err.Error())
|
t.Errorf("%s, Expected to fail with Error \"%s\", but instead found \"%s\".", instanceType, errMsg, err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create bucket before intiating NewMultipartUpload.
|
// Create bucket before initiating NewMultipartUpload.
|
||||||
err = obj.MakeBucket(context.Background(), bucket, MakeBucketOptions{})
|
err = obj.MakeBucket(context.Background(), bucket, MakeBucketOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// failed to create newbucket, abort.
|
// failed to create newbucket, abort.
|
||||||
@ -94,7 +94,7 @@ func testObjectAbortMultipartUpload(obj ObjectLayer, instanceType string, t Test
|
|||||||
bucket := "minio-bucket"
|
bucket := "minio-bucket"
|
||||||
object := "minio-object"
|
object := "minio-object"
|
||||||
opts := ObjectOptions{}
|
opts := ObjectOptions{}
|
||||||
// Create bucket before intiating NewMultipartUpload.
|
// Create bucket before initiating NewMultipartUpload.
|
||||||
err := obj.MakeBucket(context.Background(), bucket, MakeBucketOptions{})
|
err := obj.MakeBucket(context.Background(), bucket, MakeBucketOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// failed to create newbucket, abort.
|
// failed to create newbucket, abort.
|
||||||
@ -150,7 +150,7 @@ func testObjectAPIIsUploadIDExists(obj ObjectLayer, instanceType string, t TestE
|
|||||||
bucket := "minio-bucket"
|
bucket := "minio-bucket"
|
||||||
object := "minio-object"
|
object := "minio-object"
|
||||||
|
|
||||||
// Create bucket before intiating NewMultipartUpload.
|
// Create bucket before initiating NewMultipartUpload.
|
||||||
err := obj.MakeBucket(context.Background(), bucket, MakeBucketOptions{})
|
err := obj.MakeBucket(context.Background(), bucket, MakeBucketOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// Failed to create newbucket, abort.
|
// Failed to create newbucket, abort.
|
||||||
@ -182,7 +182,7 @@ func testObjectAPIPutObjectPart(obj ObjectLayer, instanceType string, t TestErrH
|
|||||||
bucket := "minio-bucket"
|
bucket := "minio-bucket"
|
||||||
object := "minio-object"
|
object := "minio-object"
|
||||||
opts := ObjectOptions{}
|
opts := ObjectOptions{}
|
||||||
// Create bucket before intiating NewMultipartUpload.
|
// Create bucket before initiating NewMultipartUpload.
|
||||||
err := obj.MakeBucket(context.Background(), bucket, MakeBucketOptions{})
|
err := obj.MakeBucket(context.Background(), bucket, MakeBucketOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// Failed to create newbucket, abort.
|
// Failed to create newbucket, abort.
|
||||||
@ -348,7 +348,7 @@ func testListMultipartUploads(obj ObjectLayer, instanceType string, t TestErrHan
|
|||||||
// bucketnames[1].
|
// bucketnames[1].
|
||||||
// objectNames[0].
|
// objectNames[0].
|
||||||
// uploadIds [1-3].
|
// uploadIds [1-3].
|
||||||
// Bucket to test for mutiple upload Id's for a given object.
|
// Bucket to test for multiple upload Id's for a given object.
|
||||||
err = obj.MakeBucket(context.Background(), bucketNames[1], MakeBucketOptions{})
|
err = obj.MakeBucket(context.Background(), bucketNames[1], MakeBucketOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// Failed to create newbucket, abort.
|
// Failed to create newbucket, abort.
|
||||||
@ -366,7 +366,7 @@ func testListMultipartUploads(obj ObjectLayer, instanceType string, t TestErrHan
|
|||||||
uploadIDs = append(uploadIDs, res.UploadID)
|
uploadIDs = append(uploadIDs, res.UploadID)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Bucket to test for mutiple objects, each with unique UUID.
|
// Bucket to test for multiple objects, each with unique UUID.
|
||||||
// bucketnames[2].
|
// bucketnames[2].
|
||||||
// objectNames[0-2].
|
// objectNames[0-2].
|
||||||
// uploadIds [4-9].
|
// uploadIds [4-9].
|
||||||
@ -1045,7 +1045,7 @@ func testListMultipartUploads(obj ObjectLayer, instanceType string, t TestErrHan
|
|||||||
{"Test", "", "", "", "", 0, ListMultipartsInfo{}, BucketNotFound{Bucket: "Test"}, false},
|
{"Test", "", "", "", "", 0, ListMultipartsInfo{}, BucketNotFound{Bucket: "Test"}, false},
|
||||||
{"---", "", "", "", "", 0, ListMultipartsInfo{}, BucketNotFound{Bucket: "---"}, false},
|
{"---", "", "", "", "", 0, ListMultipartsInfo{}, BucketNotFound{Bucket: "---"}, false},
|
||||||
{"ad", "", "", "", "", 0, ListMultipartsInfo{}, BucketNotFound{Bucket: "ad"}, false},
|
{"ad", "", "", "", "", 0, ListMultipartsInfo{}, BucketNotFound{Bucket: "ad"}, false},
|
||||||
// Valid bucket names, but they donot exist (Test number 5-7).
|
// Valid bucket names, but they do not exist (Test number 5-7).
|
||||||
{"volatile-bucket-1", "", "", "", "", 0, ListMultipartsInfo{}, BucketNotFound{Bucket: "volatile-bucket-1"}, false},
|
{"volatile-bucket-1", "", "", "", "", 0, ListMultipartsInfo{}, BucketNotFound{Bucket: "volatile-bucket-1"}, false},
|
||||||
{"volatile-bucket-2", "", "", "", "", 0, ListMultipartsInfo{}, BucketNotFound{Bucket: "volatile-bucket-2"}, false},
|
{"volatile-bucket-2", "", "", "", "", 0, ListMultipartsInfo{}, BucketNotFound{Bucket: "volatile-bucket-2"}, false},
|
||||||
{"volatile-bucket-3", "", "", "", "", 0, ListMultipartsInfo{}, BucketNotFound{Bucket: "volatile-bucket-3"}, false},
|
{"volatile-bucket-3", "", "", "", "", 0, ListMultipartsInfo{}, BucketNotFound{Bucket: "volatile-bucket-3"}, false},
|
||||||
@ -1210,7 +1210,7 @@ func testListObjectPartsDiskNotFound(obj ObjectLayer, instanceType string, disks
|
|||||||
// bucketnames[0].
|
// bucketnames[0].
|
||||||
// objectNames[0].
|
// objectNames[0].
|
||||||
// uploadIds [0].
|
// uploadIds [0].
|
||||||
// Create bucket before intiating NewMultipartUpload.
|
// Create bucket before initiating NewMultipartUpload.
|
||||||
err := obj.MakeBucket(context.Background(), bucketNames[0], MakeBucketOptions{})
|
err := obj.MakeBucket(context.Background(), bucketNames[0], MakeBucketOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// Failed to create newbucket, abort.
|
// Failed to create newbucket, abort.
|
||||||
@ -1362,7 +1362,7 @@ func testListObjectPartsDiskNotFound(obj ObjectLayer, instanceType string, disks
|
|||||||
{"---", "", "", 0, 0, ListPartsInfo{}, BucketNotFound{Bucket: "---"}, false},
|
{"---", "", "", 0, 0, ListPartsInfo{}, BucketNotFound{Bucket: "---"}, false},
|
||||||
{"ad", "", "", 0, 0, ListPartsInfo{}, BucketNotFound{Bucket: "ad"}, false},
|
{"ad", "", "", 0, 0, ListPartsInfo{}, BucketNotFound{Bucket: "ad"}, false},
|
||||||
// Test cases for listing uploadID with single part.
|
// Test cases for listing uploadID with single part.
|
||||||
// Valid bucket names, but they donot exist (Test number 5-7).
|
// Valid bucket names, but they do not exist (Test number 5-7).
|
||||||
{"volatile-bucket-1", "", "", 0, 0, ListPartsInfo{}, BucketNotFound{Bucket: "volatile-bucket-1"}, false},
|
{"volatile-bucket-1", "", "", 0, 0, ListPartsInfo{}, BucketNotFound{Bucket: "volatile-bucket-1"}, false},
|
||||||
{"volatile-bucket-2", "", "", 0, 0, ListPartsInfo{}, BucketNotFound{Bucket: "volatile-bucket-2"}, false},
|
{"volatile-bucket-2", "", "", 0, 0, ListPartsInfo{}, BucketNotFound{Bucket: "volatile-bucket-2"}, false},
|
||||||
{"volatile-bucket-3", "", "", 0, 0, ListPartsInfo{}, BucketNotFound{Bucket: "volatile-bucket-3"}, false},
|
{"volatile-bucket-3", "", "", 0, 0, ListPartsInfo{}, BucketNotFound{Bucket: "volatile-bucket-3"}, false},
|
||||||
@ -1461,7 +1461,7 @@ func testListObjectParts(obj ObjectLayer, instanceType string, t TestErrHandler)
|
|||||||
// bucketnames[0].
|
// bucketnames[0].
|
||||||
// objectNames[0].
|
// objectNames[0].
|
||||||
// uploadIds [0].
|
// uploadIds [0].
|
||||||
// Create bucket before intiating NewMultipartUpload.
|
// Create bucket before initiating NewMultipartUpload.
|
||||||
err := obj.MakeBucket(context.Background(), bucketNames[0], MakeBucketOptions{})
|
err := obj.MakeBucket(context.Background(), bucketNames[0], MakeBucketOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// Failed to create newbucket, abort.
|
// Failed to create newbucket, abort.
|
||||||
@ -1599,7 +1599,7 @@ func testListObjectParts(obj ObjectLayer, instanceType string, t TestErrHandler)
|
|||||||
{"---", "", "", 0, 0, ListPartsInfo{}, BucketNotFound{Bucket: "---"}, false},
|
{"---", "", "", 0, 0, ListPartsInfo{}, BucketNotFound{Bucket: "---"}, false},
|
||||||
{"ad", "", "", 0, 0, ListPartsInfo{}, BucketNotFound{Bucket: "ad"}, false},
|
{"ad", "", "", 0, 0, ListPartsInfo{}, BucketNotFound{Bucket: "ad"}, false},
|
||||||
// Test cases for listing uploadID with single part.
|
// Test cases for listing uploadID with single part.
|
||||||
// Valid bucket names, but they donot exist (Test number 5-7).
|
// Valid bucket names, but they do not exist (Test number 5-7).
|
||||||
{"volatile-bucket-1", "", "", 0, 0, ListPartsInfo{}, BucketNotFound{Bucket: "volatile-bucket-1"}, false},
|
{"volatile-bucket-1", "", "", 0, 0, ListPartsInfo{}, BucketNotFound{Bucket: "volatile-bucket-1"}, false},
|
||||||
{"volatile-bucket-2", "", "", 0, 0, ListPartsInfo{}, BucketNotFound{Bucket: "volatile-bucket-2"}, false},
|
{"volatile-bucket-2", "", "", 0, 0, ListPartsInfo{}, BucketNotFound{Bucket: "volatile-bucket-2"}, false},
|
||||||
{"volatile-bucket-3", "", "", 0, 0, ListPartsInfo{}, BucketNotFound{Bucket: "volatile-bucket-3"}, false},
|
{"volatile-bucket-3", "", "", 0, 0, ListPartsInfo{}, BucketNotFound{Bucket: "volatile-bucket-3"}, false},
|
||||||
@ -1702,7 +1702,7 @@ func testObjectCompleteMultipartUpload(obj ObjectLayer, instanceType string, t T
|
|||||||
// bucketnames[0].
|
// bucketnames[0].
|
||||||
// objectNames[0].
|
// objectNames[0].
|
||||||
// uploadIds [0].
|
// uploadIds [0].
|
||||||
// Create bucket before intiating NewMultipartUpload.
|
// Create bucket before initiating NewMultipartUpload.
|
||||||
err = obj.MakeBucket(context.Background(), bucketNames[0], MakeBucketOptions{})
|
err = obj.MakeBucket(context.Background(), bucketNames[0], MakeBucketOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// Failed to create newbucket, abort.
|
// Failed to create newbucket, abort.
|
||||||
@ -1814,7 +1814,7 @@ func testObjectCompleteMultipartUpload(obj ObjectLayer, instanceType string, t T
|
|||||||
{"---", "", "", []CompletePart{}, "", BucketNotFound{Bucket: "---"}, false},
|
{"---", "", "", []CompletePart{}, "", BucketNotFound{Bucket: "---"}, false},
|
||||||
{"ad", "", "", []CompletePart{}, "", BucketNotFound{Bucket: "ad"}, false},
|
{"ad", "", "", []CompletePart{}, "", BucketNotFound{Bucket: "ad"}, false},
|
||||||
// Test cases for listing uploadID with single part.
|
// Test cases for listing uploadID with single part.
|
||||||
// Valid bucket names, but they donot exist (Test number 5-7).
|
// Valid bucket names, but they do not exist (Test number 5-7).
|
||||||
{"volatile-bucket-1", "", "", []CompletePart{}, "", BucketNotFound{Bucket: "volatile-bucket-1"}, false},
|
{"volatile-bucket-1", "", "", []CompletePart{}, "", BucketNotFound{Bucket: "volatile-bucket-1"}, false},
|
||||||
{"volatile-bucket-2", "", "", []CompletePart{}, "", BucketNotFound{Bucket: "volatile-bucket-2"}, false},
|
{"volatile-bucket-2", "", "", []CompletePart{}, "", BucketNotFound{Bucket: "volatile-bucket-2"}, false},
|
||||||
{"volatile-bucket-3", "", "", []CompletePart{}, "", BucketNotFound{Bucket: "volatile-bucket-3"}, false},
|
{"volatile-bucket-3", "", "", []CompletePart{}, "", BucketNotFound{Bucket: "volatile-bucket-3"}, false},
|
||||||
|
@ -99,7 +99,7 @@ func testPathTraversalExploit(obj ObjectLayer, instanceType, bucketName string,
|
|||||||
t.Fatalf("failed to create HTTP request for Put Object: <ERROR> %v", err)
|
t.Fatalf("failed to create HTTP request for Put Object: <ERROR> %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Since `apiRouter` satisfies `http.Handler` it has a ServeHTTP to execute the logic ofthe handler.
|
// Since `apiRouter` satisfies `http.Handler` it has a ServeHTTP to execute the logic of the handler.
|
||||||
// Call the ServeHTTP to execute the handler.
|
// Call the ServeHTTP to execute the handler.
|
||||||
apiRouter.ServeHTTP(rec, req)
|
apiRouter.ServeHTTP(rec, req)
|
||||||
|
|
||||||
|
@ -1914,7 +1914,7 @@ func testAPICopyObjectPartHandler(obj ObjectLayer, instanceType, bucketName stri
|
|||||||
},
|
},
|
||||||
|
|
||||||
// Test case - 6.
|
// Test case - 6.
|
||||||
// Test case with ivalid byte range for exceeding source size boundaries.
|
// Test case with invalid byte range for exceeding source size boundaries.
|
||||||
{
|
{
|
||||||
bucketName: bucketName,
|
bucketName: bucketName,
|
||||||
uploadID: uploadID,
|
uploadID: uploadID,
|
||||||
@ -2030,7 +2030,7 @@ func testAPICopyObjectPartHandler(obj ObjectLayer, instanceType, bucketName stri
|
|||||||
secretKey: credentials.SecretKey,
|
secretKey: credentials.SecretKey,
|
||||||
expectedRespStatus: http.StatusNotFound,
|
expectedRespStatus: http.StatusNotFound,
|
||||||
},
|
},
|
||||||
// Test case - 16, Test case with ivalid byte range empty value.
|
// Test case - 16, Test case with invalid byte range empty value.
|
||||||
{
|
{
|
||||||
bucketName: bucketName,
|
bucketName: bucketName,
|
||||||
uploadID: uploadID,
|
uploadID: uploadID,
|
||||||
@ -3021,7 +3021,7 @@ func testAPICompleteMultipartHandler(obj ObjectLayer, instanceType, bucketName s
|
|||||||
},
|
},
|
||||||
// Test case - 7.
|
// Test case - 7.
|
||||||
// Test case with proper parts.
|
// Test case with proper parts.
|
||||||
// Should successed and the content in the response body is asserted.
|
// Should succeeded and the content in the response body is asserted.
|
||||||
{
|
{
|
||||||
bucket: bucketName,
|
bucket: bucketName,
|
||||||
object: objectName,
|
object: objectName,
|
||||||
@ -3037,7 +3037,7 @@ func testAPICompleteMultipartHandler(obj ObjectLayer, instanceType, bucketName s
|
|||||||
},
|
},
|
||||||
// Test case - 8.
|
// Test case - 8.
|
||||||
// Test case with proper parts.
|
// Test case with proper parts.
|
||||||
// Should successed and the content in the response body is asserted.
|
// Should succeeded and the content in the response body is asserted.
|
||||||
{
|
{
|
||||||
bucket: bucketName,
|
bucket: bucketName,
|
||||||
object: objectName,
|
object: objectName,
|
||||||
@ -3088,7 +3088,7 @@ func testAPICompleteMultipartHandler(obj ObjectLayer, instanceType, bucketName s
|
|||||||
if rec.Code == http.StatusOK {
|
if rec.Code == http.StatusOK {
|
||||||
// Verify whether the bucket obtained object is same as the one created.
|
// Verify whether the bucket obtained object is same as the one created.
|
||||||
if !bytes.Equal(testCase.expectedContent, actualContent) {
|
if !bytes.Equal(testCase.expectedContent, actualContent) {
|
||||||
t.Errorf("Test %d : MinIO %s: CompleteMultipart response content differs from expected value. got %s, expecte %s", i+1, instanceType,
|
t.Errorf("Test %d : MinIO %s: CompleteMultipart response content differs from expected value. got %s, expected %s", i+1, instanceType,
|
||||||
string(actualContent), string(testCase.expectedContent))
|
string(actualContent), string(testCase.expectedContent))
|
||||||
}
|
}
|
||||||
continue
|
continue
|
||||||
@ -3366,7 +3366,7 @@ func testAPIDeleteObjectHandler(obj ObjectLayer, instanceType, bucketName string
|
|||||||
}{
|
}{
|
||||||
// Test case - 1.
|
// Test case - 1.
|
||||||
// Deleting an existing object.
|
// Deleting an existing object.
|
||||||
// Expected to return HTTP resposne status code 204.
|
// Expected to return HTTP response status code 204.
|
||||||
{
|
{
|
||||||
bucketName: bucketName,
|
bucketName: bucketName,
|
||||||
objectName: objectName,
|
objectName: objectName,
|
||||||
@ -3490,7 +3490,7 @@ func testAPIPutObjectPartHandlerStreaming(obj ObjectLayer, instanceType, bucketN
|
|||||||
}
|
}
|
||||||
apiRouter.ServeHTTP(rec, req)
|
apiRouter.ServeHTTP(rec, req)
|
||||||
|
|
||||||
// Get uploadID of the mulitpart upload initiated.
|
// Get uploadID of the multipart upload initiated.
|
||||||
var mpartResp InitiateMultipartUploadResponse
|
var mpartResp InitiateMultipartUploadResponse
|
||||||
mpartRespBytes, err := io.ReadAll(rec.Result().Body)
|
mpartRespBytes, err := io.ReadAll(rec.Result().Body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -3897,7 +3897,7 @@ func testAPIListObjectPartsHandlerPreSign(obj ObjectLayer, instanceType, bucketN
|
|||||||
}
|
}
|
||||||
apiRouter.ServeHTTP(rec, req)
|
apiRouter.ServeHTTP(rec, req)
|
||||||
|
|
||||||
// Get uploadID of the mulitpart upload initiated.
|
// Get uploadID of the multipart upload initiated.
|
||||||
var mpartResp InitiateMultipartUploadResponse
|
var mpartResp InitiateMultipartUploadResponse
|
||||||
mpartRespBytes, err := io.ReadAll(rec.Result().Body)
|
mpartRespBytes, err := io.ReadAll(rec.Result().Body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -104,7 +104,7 @@ func osMkdirAll(dirPath string, perm os.FileMode, baseDir string) error {
|
|||||||
// refer https://github.com/golang/go/issues/24015
|
// refer https://github.com/golang/go/issues/24015
|
||||||
const blockSize = 8 << 10 // 8192
|
const blockSize = 8 << 10 // 8192
|
||||||
|
|
||||||
// By default atleast 128 entries in single getdents call (1MiB buffer)
|
// By default at least 128 entries in single getdents call (1MiB buffer)
|
||||||
var (
|
var (
|
||||||
direntPool = sync.Pool{
|
direntPool = sync.Pool{
|
||||||
New: func() interface{} {
|
New: func() interface{} {
|
||||||
|
@ -40,7 +40,7 @@ type fanOutOptions struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// fanOutPutObject takes an input source reader and fans out multiple PUT operations
|
// fanOutPutObject takes an input source reader and fans out multiple PUT operations
|
||||||
// based on the incoming fan-out request, a context cancelation by the caller
|
// based on the incoming fan-out request, a context cancellation by the caller
|
||||||
// would ensure all fan-out operations are canceled.
|
// would ensure all fan-out operations are canceled.
|
||||||
func fanOutPutObject(ctx context.Context, bucket string, objectAPI ObjectLayer, fanOutEntries []minio.PutObjectFanOutEntry, fanOutBuf []byte, opts fanOutOptions) ([]ObjectInfo, []error) {
|
func fanOutPutObject(ctx context.Context, bucket string, objectAPI ObjectLayer, fanOutEntries []minio.PutObjectFanOutEntry, fanOutBuf []byte, opts fanOutOptions) ([]ObjectInfo, []error) {
|
||||||
errs := make([]error, len(fanOutEntries))
|
errs := make([]error, len(fanOutEntries))
|
||||||
|
@ -152,7 +152,7 @@ func testPostPolicyReservedBucketExploit(obj ObjectLayer, instanceType string, d
|
|||||||
req.Header.Set("Content-Type", contentTypeHdr)
|
req.Header.Set("Content-Type", contentTypeHdr)
|
||||||
req.Header.Set("User-Agent", "Mozilla")
|
req.Header.Set("User-Agent", "Mozilla")
|
||||||
|
|
||||||
// Since `apiRouter` satisfies `http.Handler` it has a ServeHTTP to execute the logic ofthe handler.
|
// Since `apiRouter` satisfies `http.Handler` it has a ServeHTTP to execute the logic of the handler.
|
||||||
// Call the ServeHTTP to execute the handler.
|
// Call the ServeHTTP to execute the handler.
|
||||||
apiRouter.ServeHTTP(rec, req)
|
apiRouter.ServeHTTP(rec, req)
|
||||||
|
|
||||||
@ -225,7 +225,7 @@ func testPostPolicyBucketHandler(obj ObjectLayer, instanceType string, t TestErr
|
|||||||
if perr != nil {
|
if perr != nil {
|
||||||
t.Fatalf("Test %d: %s: Failed to create HTTP request for PostPolicyHandler: <ERROR> %v", i+1, instanceType, perr)
|
t.Fatalf("Test %d: %s: Failed to create HTTP request for PostPolicyHandler: <ERROR> %v", i+1, instanceType, perr)
|
||||||
}
|
}
|
||||||
// Since `apiRouter` satisfies `http.Handler` it has a ServeHTTP to execute the logic ofthe handler.
|
// Since `apiRouter` satisfies `http.Handler` it has a ServeHTTP to execute the logic of the handler.
|
||||||
// Call the ServeHTTP to execute the handler.
|
// Call the ServeHTTP to execute the handler.
|
||||||
apiRouter.ServeHTTP(rec, req)
|
apiRouter.ServeHTTP(rec, req)
|
||||||
if rec.Code != test.expectedStatus {
|
if rec.Code != test.expectedStatus {
|
||||||
@ -284,7 +284,7 @@ func testPostPolicyBucketHandler(obj ObjectLayer, instanceType string, t TestErr
|
|||||||
// Change the request body.
|
// Change the request body.
|
||||||
req.Body = io.NopCloser(bytes.NewReader([]byte("Hello,")))
|
req.Body = io.NopCloser(bytes.NewReader([]byte("Hello,")))
|
||||||
}
|
}
|
||||||
// Since `apiRouter` satisfies `http.Handler` it has a ServeHTTP to execute the logic ofthe handler.
|
// Since `apiRouter` satisfies `http.Handler` it has a ServeHTTP to execute the logic of the handler.
|
||||||
// Call the ServeHTTP to execute the handler.
|
// Call the ServeHTTP to execute the handler.
|
||||||
apiRouter.ServeHTTP(rec, req)
|
apiRouter.ServeHTTP(rec, req)
|
||||||
if rec.Code != testCase.expectedRespStatus {
|
if rec.Code != testCase.expectedRespStatus {
|
||||||
@ -415,7 +415,7 @@ func testPostPolicyBucketHandler(obj ObjectLayer, instanceType string, t TestErr
|
|||||||
if perr != nil {
|
if perr != nil {
|
||||||
t.Fatalf("Test %d: %s: Failed to create HTTP request for PostPolicyHandler: <ERROR> %v", i+1, instanceType, perr)
|
t.Fatalf("Test %d: %s: Failed to create HTTP request for PostPolicyHandler: <ERROR> %v", i+1, instanceType, perr)
|
||||||
}
|
}
|
||||||
// Since `apiRouter` satisfies `http.Handler` it has a ServeHTTP to execute the logic ofthe handler.
|
// Since `apiRouter` satisfies `http.Handler` it has a ServeHTTP to execute the logic of the handler.
|
||||||
// Call the ServeHTTP to execute the handler.
|
// Call the ServeHTTP to execute the handler.
|
||||||
apiRouter.ServeHTTP(rec, req)
|
apiRouter.ServeHTTP(rec, req)
|
||||||
if rec.Code != testCase.expectedRespStatus {
|
if rec.Code != testCase.expectedRespStatus {
|
||||||
@ -487,7 +487,7 @@ func testPostPolicyBucketHandler(obj ObjectLayer, instanceType string, t TestErr
|
|||||||
if perr != nil {
|
if perr != nil {
|
||||||
t.Fatalf("Test %d: %s: Failed to create HTTP request for PostPolicyHandler: <ERROR> %v", i+1, instanceType, perr)
|
t.Fatalf("Test %d: %s: Failed to create HTTP request for PostPolicyHandler: <ERROR> %v", i+1, instanceType, perr)
|
||||||
}
|
}
|
||||||
// Since `apiRouter` satisfies `http.Handler` it has a ServeHTTP to execute the logic ofthe handler.
|
// Since `apiRouter` satisfies `http.Handler` it has a ServeHTTP to execute the logic of the handler.
|
||||||
// Call the ServeHTTP to execute the handler.
|
// Call the ServeHTTP to execute the handler.
|
||||||
apiRouter.ServeHTTP(rec, req)
|
apiRouter.ServeHTTP(rec, req)
|
||||||
if rec.Code != testCase.expectedRespStatus {
|
if rec.Code != testCase.expectedRespStatus {
|
||||||
@ -556,7 +556,7 @@ func testPostPolicyBucketHandlerRedirect(obj ObjectLayer, instanceType string, t
|
|||||||
if perr != nil {
|
if perr != nil {
|
||||||
t.Fatalf("%s: Failed to create HTTP request for PostPolicyHandler: <ERROR> %v", instanceType, perr)
|
t.Fatalf("%s: Failed to create HTTP request for PostPolicyHandler: <ERROR> %v", instanceType, perr)
|
||||||
}
|
}
|
||||||
// Since `apiRouter` satisfies `http.Handler` it has a ServeHTTP to execute the logic ofthe handler.
|
// Since `apiRouter` satisfies `http.Handler` it has a ServeHTTP to execute the logic of the handler.
|
||||||
// Call the ServeHTTP to execute the handler.
|
// Call the ServeHTTP to execute the handler.
|
||||||
apiRouter.ServeHTTP(rec, req)
|
apiRouter.ServeHTTP(rec, req)
|
||||||
|
|
||||||
|
@ -260,7 +260,7 @@ func parsePostPolicyForm(r io.Reader) (PostPolicyForm, error) {
|
|||||||
return parsedPolicy, nil
|
return parsedPolicy, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// checkPolicyCond returns a boolean to indicate if a condition is satisified according
|
// checkPolicyCond returns a boolean to indicate if a condition is satisfied according
|
||||||
// to the passed operator
|
// to the passed operator
|
||||||
func checkPolicyCond(op string, input1, input2 string) bool {
|
func checkPolicyCond(op string, input1, input2 string) bool {
|
||||||
switch op {
|
switch op {
|
||||||
|
@ -95,7 +95,7 @@ var ServerFlags = []cli.Flag{
|
|||||||
cli.DurationFlag{
|
cli.DurationFlag{
|
||||||
Name: "idle-timeout",
|
Name: "idle-timeout",
|
||||||
Value: xhttp.DefaultIdleTimeout,
|
Value: xhttp.DefaultIdleTimeout,
|
||||||
Usage: "idle timeout is the maximum amount of time to wait for the next request when keep-alives are enabled",
|
Usage: "idle timeout is the maximum amount of time to wait for the next request when keep-alive are enabled",
|
||||||
EnvVar: "MINIO_IDLE_TIMEOUT",
|
EnvVar: "MINIO_IDLE_TIMEOUT",
|
||||||
Hidden: true,
|
Hidden: true,
|
||||||
},
|
},
|
||||||
@ -415,7 +415,7 @@ func initAllSubsystems(ctx context.Context) {
|
|||||||
globalBucketVersioningSys = NewBucketVersioningSys()
|
globalBucketVersioningSys = NewBucketVersioningSys()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create new bucket replication subsytem
|
// Create new bucket replication subsystem
|
||||||
globalBucketTargetSys = NewBucketTargetSys(GlobalContext)
|
globalBucketTargetSys = NewBucketTargetSys(GlobalContext)
|
||||||
|
|
||||||
// Create new ILM tier configuration subsystem
|
// Create new ILM tier configuration subsystem
|
||||||
|
@ -199,7 +199,7 @@ func getReqAccessKeyV2(r *http.Request) (auth.Credentials, bool, APIErrorCode) {
|
|||||||
return auth.Credentials{}, false, ErrMissingFields
|
return auth.Credentials{}, false, ErrMissingFields
|
||||||
}
|
}
|
||||||
|
|
||||||
// Then will be splitting on ":", this will seprate `AWSAccessKeyId` and `Signature` string.
|
// Then will be splitting on ":", this will separate `AWSAccessKeyId` and `Signature` string.
|
||||||
keySignFields := strings.Split(strings.TrimSpace(authFields[1]), ":")
|
keySignFields := strings.Split(strings.TrimSpace(authFields[1]), ":")
|
||||||
if len(keySignFields) != 2 {
|
if len(keySignFields) != 2 {
|
||||||
return auth.Credentials{}, false, ErrMissingFields
|
return auth.Credentials{}, false, ErrMissingFields
|
||||||
|
@ -161,7 +161,7 @@ type signValues struct {
|
|||||||
Signature string
|
Signature string
|
||||||
}
|
}
|
||||||
|
|
||||||
// preSignValues data type represents structued form of AWS Signature V4 query string.
|
// preSignValues data type represents structured form of AWS Signature V4 query string.
|
||||||
type preSignValues struct {
|
type preSignValues struct {
|
||||||
signValues
|
signValues
|
||||||
Date time.Time
|
Date time.Time
|
||||||
@ -248,7 +248,7 @@ func parsePreSignV4(query url.Values, region string, stype serviceType) (psv pre
|
|||||||
return psv, aec
|
return psv, aec
|
||||||
}
|
}
|
||||||
|
|
||||||
// Return structed form of signature query string.
|
// Return structured form of signature query string.
|
||||||
return preSignV4Values, ErrNone
|
return preSignV4Values, ErrNone
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -79,7 +79,7 @@ func validateCredentialfields(t *testing.T, testNum int, expectedCredentials cre
|
|||||||
}
|
}
|
||||||
|
|
||||||
// TestParseCredentialHeader - validates the format validator and extractor for the Credential header in an aws v4 request.
|
// TestParseCredentialHeader - validates the format validator and extractor for the Credential header in an aws v4 request.
|
||||||
// A valid format of creadential should be of the following format.
|
// A valid format of credential should be of the following format.
|
||||||
// Credential = accessKey + SlashSeparator+ scope
|
// Credential = accessKey + SlashSeparator+ scope
|
||||||
// where scope = string.Join([]string{ currTime.Format(yyyymmdd),
|
// where scope = string.Join([]string{ currTime.Format(yyyymmdd),
|
||||||
//
|
//
|
||||||
@ -387,7 +387,7 @@ func TestParseSignV4(t *testing.T) {
|
|||||||
},
|
},
|
||||||
// Test case - 5.
|
// Test case - 5.
|
||||||
// Auth field with missing "SigHeaderTag",ErrMissingSignHeadersTag expected.
|
// Auth field with missing "SigHeaderTag",ErrMissingSignHeadersTag expected.
|
||||||
// A vaild credential is generated.
|
// A valid credential is generated.
|
||||||
// Test case with invalid credential field.
|
// Test case with invalid credential field.
|
||||||
{
|
{
|
||||||
inputV4AuthStr: signV4Algorithm +
|
inputV4AuthStr: signV4Algorithm +
|
||||||
@ -409,7 +409,7 @@ func TestParseSignV4(t *testing.T) {
|
|||||||
},
|
},
|
||||||
// Test case - 6.
|
// Test case - 6.
|
||||||
// Auth string with missing "SignatureTag",ErrMissingSignTag expected.
|
// Auth string with missing "SignatureTag",ErrMissingSignTag expected.
|
||||||
// A vaild credential is generated.
|
// A valid credential is generated.
|
||||||
// Test case with invalid credential field.
|
// Test case with invalid credential field.
|
||||||
{
|
{
|
||||||
inputV4AuthStr: signV4Algorithm +
|
inputV4AuthStr: signV4Algorithm +
|
||||||
|
@ -206,7 +206,7 @@ func extractSignedHeaders(signedHeaders []string, r *http.Request) (http.Header,
|
|||||||
extractedSignedHeaders := make(http.Header)
|
extractedSignedHeaders := make(http.Header)
|
||||||
for _, header := range signedHeaders {
|
for _, header := range signedHeaders {
|
||||||
// `host` will not be found in the headers, can be found in r.Host.
|
// `host` will not be found in the headers, can be found in r.Host.
|
||||||
// but its alway necessary that the list of signed headers containing host in it.
|
// but its always necessary that the list of signed headers containing host in it.
|
||||||
val, ok := reqHeaders[http.CanonicalHeaderKey(header)]
|
val, ok := reqHeaders[http.CanonicalHeaderKey(header)]
|
||||||
if !ok {
|
if !ok {
|
||||||
// try to set headers from Query String
|
// try to set headers from Query String
|
||||||
|
@ -220,7 +220,7 @@ func TestDoesPresignedSignatureMatch(t *testing.T) {
|
|||||||
expected: ErrRequestNotReadyYet,
|
expected: ErrRequestNotReadyYet,
|
||||||
},
|
},
|
||||||
// (7) Should not error with invalid region instead, call should proceed
|
// (7) Should not error with invalid region instead, call should proceed
|
||||||
// with sigature does not match.
|
// with signature does not match.
|
||||||
{
|
{
|
||||||
queryParams: map[string]string{
|
queryParams: map[string]string{
|
||||||
"X-Amz-Algorithm": signV4Algorithm,
|
"X-Amz-Algorithm": signV4Algorithm,
|
||||||
|
@ -90,7 +90,7 @@ func (sm *siteResyncMetrics) init(ctx context.Context) {
|
|||||||
}
|
}
|
||||||
duration := time.Duration(r.Float64() * float64(time.Second*10))
|
duration := time.Duration(r.Float64() * float64(time.Second*10))
|
||||||
if duration < time.Second {
|
if duration < time.Second {
|
||||||
// Make sure to sleep atleast a second to avoid high CPU ticks.
|
// Make sure to sleep at least a second to avoid high CPU ticks.
|
||||||
duration = time.Second
|
duration = time.Second
|
||||||
}
|
}
|
||||||
time.Sleep(duration)
|
time.Sleep(duration)
|
||||||
|
@ -1188,7 +1188,7 @@ func checkDiskFatalErrs(errs []error) error {
|
|||||||
// at each implementation of error for added hints.
|
// at each implementation of error for added hints.
|
||||||
//
|
//
|
||||||
// FIXME: This is an unusual function but serves its purpose for
|
// FIXME: This is an unusual function but serves its purpose for
|
||||||
// now, need to revist the overall erroring structure here.
|
// now, need to revisit the overall erroring structure here.
|
||||||
// Do not like it :-(
|
// Do not like it :-(
|
||||||
func logFatalErrs(err error, endpoint Endpoint, exit bool) {
|
func logFatalErrs(err error, endpoint Endpoint, exit bool) {
|
||||||
switch {
|
switch {
|
||||||
|
@ -171,7 +171,7 @@ func calculateSeedSignature(r *http.Request, trailers bool) (cred auth.Credentia
|
|||||||
return cred, "", "", time.Time{}, ErrSignatureDoesNotMatch
|
return cred, "", "", time.Time{}, ErrSignatureDoesNotMatch
|
||||||
}
|
}
|
||||||
|
|
||||||
// Return caculated signature.
|
// Return calculated signature.
|
||||||
return cred, newSignature, region, date, ErrNone
|
return cred, newSignature, region, date, ErrNone
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -723,7 +723,7 @@ func (sts *stsAPIHandlers) AssumeRoleWithCertificate(w http.ResponseWriter, r *h
|
|||||||
// We have to establish a TLS connection and the
|
// We have to establish a TLS connection and the
|
||||||
// client must provide exactly one client certificate.
|
// client must provide exactly one client certificate.
|
||||||
// Otherwise, we don't have a certificate to verify or
|
// Otherwise, we don't have a certificate to verify or
|
||||||
// the policy lookup would ambigious.
|
// the policy lookup would ambiguous.
|
||||||
if r.TLS == nil {
|
if r.TLS == nil {
|
||||||
writeSTSErrorResponse(ctx, w, ErrSTSInsecureConnection, errors.New("No TLS connection attempt"))
|
writeSTSErrorResponse(ctx, w, ErrSTSInsecureConnection, errors.New("No TLS connection attempt"))
|
||||||
return
|
return
|
||||||
@ -732,7 +732,7 @@ func (sts *stsAPIHandlers) AssumeRoleWithCertificate(w http.ResponseWriter, r *h
|
|||||||
// A client may send a certificate chain such that we end up
|
// A client may send a certificate chain such that we end up
|
||||||
// with multiple peer certificates. However, we can only accept
|
// with multiple peer certificates. However, we can only accept
|
||||||
// a single client certificate. Otherwise, the certificate to
|
// a single client certificate. Otherwise, the certificate to
|
||||||
// policy mapping would be ambigious.
|
// policy mapping would be ambiguous.
|
||||||
// However, we can filter all CA certificates and only check
|
// However, we can filter all CA certificates and only check
|
||||||
// whether they client has sent exactly one (non-CA) leaf certificate.
|
// whether they client has sent exactly one (non-CA) leaf certificate.
|
||||||
peerCertificates := make([]*x509.Certificate, 0, len(r.TLS.PeerCertificates))
|
peerCertificates := make([]*x509.Certificate, 0, len(r.TLS.PeerCertificates))
|
||||||
|
@ -1511,7 +1511,7 @@ func removeRoots(roots []string) {
|
|||||||
|
|
||||||
// creates a bucket for the tests and returns the bucket name.
|
// creates a bucket for the tests and returns the bucket name.
|
||||||
// initializes the specified API endpoints for the tests.
|
// initializes the specified API endpoints for the tests.
|
||||||
// initialies the root and returns its path.
|
// initializes the root and returns its path.
|
||||||
// return credentials.
|
// return credentials.
|
||||||
func initAPIHandlerTest(ctx context.Context, obj ObjectLayer, endpoints []string) (string, http.Handler, error) {
|
func initAPIHandlerTest(ctx context.Context, obj ObjectLayer, endpoints []string) (string, http.Handler, error) {
|
||||||
initAllSubsystems(ctx)
|
initAllSubsystems(ctx)
|
||||||
@ -1660,7 +1660,7 @@ func ExecObjectLayerAPIAnonTest(t *testing.T, obj ObjectLayer, testName, bucketN
|
|||||||
// expected error response when the unsigned HTTP request is not permitted.
|
// expected error response when the unsigned HTTP request is not permitted.
|
||||||
unsupportedSignature := getAPIError(ErrSignatureVersionNotSupported).HTTPStatusCode
|
unsupportedSignature := getAPIError(ErrSignatureVersionNotSupported).HTTPStatusCode
|
||||||
if rec.Code != unsupportedSignature {
|
if rec.Code != unsupportedSignature {
|
||||||
t.Fatal(failTestStr(unknownSignTestStr, fmt.Sprintf("Object API Unknow auth test for \"%s\", expected to fail with %d, but failed with %d", testName, unsupportedSignature, rec.Code)))
|
t.Fatal(failTestStr(unknownSignTestStr, fmt.Sprintf("Object API Unknown auth test for \"%s\", expected to fail with %d, but failed with %d", testName, unsupportedSignature, rec.Code)))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1674,7 +1674,7 @@ func ExecObjectLayerAPINilTest(t TestErrHandler, bucketName, objectName, instanc
|
|||||||
// httptest Recorder to capture all the response by the http handler.
|
// httptest Recorder to capture all the response by the http handler.
|
||||||
rec := httptest.NewRecorder()
|
rec := httptest.NewRecorder()
|
||||||
|
|
||||||
// The API handler gets the referece to the object layer via the global object Layer,
|
// The API handler gets the reference to the object layer via the global object Layer,
|
||||||
// setting it to `nil` in order test for handlers response for uninitialized object layer.
|
// setting it to `nil` in order test for handlers response for uninitialized object layer.
|
||||||
globalObjLayerMutex.Lock()
|
globalObjLayerMutex.Lock()
|
||||||
globalObjectAPI = nil
|
globalObjectAPI = nil
|
||||||
@ -1757,7 +1757,7 @@ func ExecObjectLayerAPITest(t *testing.T, objAPITest objAPITestType, endpoints [
|
|||||||
|
|
||||||
bucketErasure, erAPIRouter, err := initAPIHandlerTest(ctx, objLayer, endpoints)
|
bucketErasure, erAPIRouter, err := initAPIHandlerTest(ctx, objLayer, endpoints)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Initialzation of API handler tests failed: <ERROR> %s", err)
|
t.Fatalf("Initialization of API handler tests failed: <ERROR> %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// initialize the server and obtain the credentials and root.
|
// initialize the server and obtain the credentials and root.
|
||||||
|
@ -72,7 +72,7 @@ func NewTierJournal() *TierJournal {
|
|||||||
return j
|
return j
|
||||||
}
|
}
|
||||||
|
|
||||||
// Init intializes an in-memory journal built using a
|
// Init initializes an in-memory journal built using a
|
||||||
// buffered channel for new journal entries. It also initializes the on-disk
|
// buffered channel for new journal entries. It also initializes the on-disk
|
||||||
// journal only to process existing journal entries made from previous versions.
|
// journal only to process existing journal entries made from previous versions.
|
||||||
func (t *TierJournal) Init(ctx context.Context) error {
|
func (t *TierJournal) Init(ctx context.Context) error {
|
||||||
|
@ -99,7 +99,7 @@ var errNoSuchPolicy = errors.New("Specified canned policy does not exist")
|
|||||||
var errPolicyInUse = errors.New("Specified policy is in use and cannot be deleted.")
|
var errPolicyInUse = errors.New("Specified policy is in use and cannot be deleted.")
|
||||||
|
|
||||||
// error returned when more than a single policy is specified when only one is
|
// error returned when more than a single policy is specified when only one is
|
||||||
// expectd.
|
// expected.
|
||||||
var errTooManyPolicies = errors.New("Only a single policy may be specified here.")
|
var errTooManyPolicies = errors.New("Only a single policy may be specified here.")
|
||||||
|
|
||||||
// error returned in IAM subsystem when an external users systems is configured.
|
// error returned in IAM subsystem when an external users systems is configured.
|
||||||
|
@ -292,7 +292,7 @@ func isMaxPartID(partID int) bool {
|
|||||||
return partID > globalMaxPartID
|
return partID > globalMaxPartID
|
||||||
}
|
}
|
||||||
|
|
||||||
// profilerWrapper is created becauses pkg/profiler doesn't
|
// profilerWrapper is created because pkg/profiler doesn't
|
||||||
// provide any API to calculate the profiler file path in the
|
// provide any API to calculate the profiler file path in the
|
||||||
// disk since the name of this latter is randomly generated.
|
// disk since the name of this latter is randomly generated.
|
||||||
type profilerWrapper struct {
|
type profilerWrapper struct {
|
||||||
|
@ -38,7 +38,7 @@ var _ WarmBackend = (*warmBackendMinIO)(nil)
|
|||||||
func newWarmBackendMinIO(conf madmin.TierMinIO, tier string) (*warmBackendMinIO, error) {
|
func newWarmBackendMinIO(conf madmin.TierMinIO, tier string) (*warmBackendMinIO, error) {
|
||||||
// Validation of credentials
|
// Validation of credentials
|
||||||
if conf.AccessKey == "" || conf.SecretKey == "" {
|
if conf.AccessKey == "" || conf.SecretKey == "" {
|
||||||
return nil, errors.New("both access and secret keys are requied")
|
return nil, errors.New("both access and secret keys are required")
|
||||||
}
|
}
|
||||||
|
|
||||||
if conf.Bucket == "" {
|
if conf.Bucket == "" {
|
||||||
|
@ -770,7 +770,7 @@ func readXLMetaNoData(r io.Reader, size int64) ([]byte, error) {
|
|||||||
case 1, 2, 3:
|
case 1, 2, 3:
|
||||||
sz, tmp, err := msgp.ReadBytesHeader(tmp)
|
sz, tmp, err := msgp.ReadBytesHeader(tmp)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("readXLMetaNoData(read_meta): uknown metadata version %w", err)
|
return nil, fmt.Errorf("readXLMetaNoData(read_meta): unknown metadata version %w", err)
|
||||||
}
|
}
|
||||||
want := int64(sz) + int64(len(buf)-len(tmp))
|
want := int64(sz) + int64(len(buf)-len(tmp))
|
||||||
|
|
||||||
|
@ -2243,7 +2243,7 @@ func (s *xlStorage) deleteFile(basePath, deletePath string, recursive, immediate
|
|||||||
if runtime.GOOS == globalMacOSName {
|
if runtime.GOOS == globalMacOSName {
|
||||||
storeFilePath := pathJoin(deletePath, ".DS_Store")
|
storeFilePath := pathJoin(deletePath, ".DS_Store")
|
||||||
_, err := Stat(storeFilePath)
|
_, err := Stat(storeFilePath)
|
||||||
// .DS_Store exsits
|
// .DS_Store exists
|
||||||
if err == nil {
|
if err == nil {
|
||||||
// delete first
|
// delete first
|
||||||
Remove(storeFilePath)
|
Remove(storeFilePath)
|
||||||
@ -2454,7 +2454,7 @@ func (s *xlStorage) RenameData(ctx context.Context, srcVolume, srcPath string, f
|
|||||||
formatLegacy := s.formatLegacy
|
formatLegacy := s.formatLegacy
|
||||||
s.RUnlock()
|
s.RUnlock()
|
||||||
// It is possible that some drives may not have `xl.meta` file
|
// It is possible that some drives may not have `xl.meta` file
|
||||||
// in such scenarios verify if atleast `part.1` files exist
|
// in such scenarios verify if at least `part.1` files exist
|
||||||
// to verify for legacy version.
|
// to verify for legacy version.
|
||||||
if formatLegacy {
|
if formatLegacy {
|
||||||
// We only need this code if we are moving
|
// We only need this code if we are moving
|
||||||
|
@ -1200,7 +1200,7 @@ func TestXLStorageReadFile(t *testing.T) {
|
|||||||
if err == nil && err != testCase.expectedErr {
|
if err == nil && err != testCase.expectedErr {
|
||||||
t.Errorf("Case: %d %#v, expected: %s, got :%s", i+1, testCase, testCase.expectedErr, err)
|
t.Errorf("Case: %d %#v, expected: %s, got :%s", i+1, testCase, testCase.expectedErr, err)
|
||||||
}
|
}
|
||||||
// Expected error retured, proceed further to validate the returned results.
|
// Expected error returned, proceed further to validate the returned results.
|
||||||
if err != nil && testCase.expectedErr == nil {
|
if err != nil && testCase.expectedErr == nil {
|
||||||
t.Errorf("Case: %d %#v, expected: %s, got :%s", i+1, testCase, testCase.expectedErr, err)
|
t.Errorf("Case: %d %#v, expected: %s, got :%s", i+1, testCase, testCase.expectedErr, err)
|
||||||
}
|
}
|
||||||
@ -1404,7 +1404,7 @@ func TestXLStorageAppendFile(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// TestXLStorage case with invalid volume name.
|
// TestXLStorage case with invalid volume name.
|
||||||
// A valid volume name should be atleast of size 3.
|
// A valid volume name should be at least of size 3.
|
||||||
err = xlStorage.AppendFile(context.Background(), "bn", "yes", []byte("hello, world"))
|
err = xlStorage.AppendFile(context.Background(), "bn", "yes", []byte("hello, world"))
|
||||||
if err != errVolumeNotFound {
|
if err != errVolumeNotFound {
|
||||||
t.Fatalf("expected: \"Invalid argument error\", got: \"%s\"", err)
|
t.Fatalf("expected: \"Invalid argument error\", got: \"%s\"", err)
|
||||||
@ -1560,7 +1560,7 @@ func TestXLStorageRenameFile(t *testing.T) {
|
|||||||
expectedErr: errVolumeNotFound,
|
expectedErr: errVolumeNotFound,
|
||||||
},
|
},
|
||||||
// TestXLStorage case - 12.
|
// TestXLStorage case - 12.
|
||||||
// TestXLStorage case with invalid src volume name. Length should be atleast 3.
|
// TestXLStorage case with invalid src volume name. Length should be at least 3.
|
||||||
// Expecting to fail with `errInvalidArgument`.
|
// Expecting to fail with `errInvalidArgument`.
|
||||||
{
|
{
|
||||||
srcVol: "ab",
|
srcVol: "ab",
|
||||||
@ -1570,7 +1570,7 @@ func TestXLStorageRenameFile(t *testing.T) {
|
|||||||
expectedErr: errVolumeNotFound,
|
expectedErr: errVolumeNotFound,
|
||||||
},
|
},
|
||||||
// TestXLStorage case - 13.
|
// TestXLStorage case - 13.
|
||||||
// TestXLStorage case with invalid destination volume name. Length should be atleast 3.
|
// TestXLStorage case with invalid destination volume name. Length should be at least 3.
|
||||||
// Expecting to fail with `errInvalidArgument`.
|
// Expecting to fail with `errInvalidArgument`.
|
||||||
{
|
{
|
||||||
srcVol: "abcd",
|
srcVol: "abcd",
|
||||||
@ -1580,7 +1580,7 @@ func TestXLStorageRenameFile(t *testing.T) {
|
|||||||
expectedErr: errVolumeNotFound,
|
expectedErr: errVolumeNotFound,
|
||||||
},
|
},
|
||||||
// TestXLStorage case - 14.
|
// TestXLStorage case - 14.
|
||||||
// TestXLStorage case with invalid destination volume name. Length should be atleast 3.
|
// TestXLStorage case with invalid destination volume name. Length should be at least 3.
|
||||||
// Expecting to fail with `errInvalidArgument`.
|
// Expecting to fail with `errInvalidArgument`.
|
||||||
{
|
{
|
||||||
srcVol: "abcd",
|
srcVol: "abcd",
|
||||||
|
@ -857,7 +857,7 @@ When the _access_ format is used, MinIO appends events to a table. It creates ro
|
|||||||
|
|
||||||
The steps below show how to use this notification target in `namespace` format. The other format is very similar and is omitted for brevity.
|
The steps below show how to use this notification target in `namespace` format. The other format is very similar and is omitted for brevity.
|
||||||
|
|
||||||
### Step 1: Ensure PostgresSQL minimum requirements are met
|
### Step 1: Ensure postgresql minimum requirements are met
|
||||||
|
|
||||||
MinIO requires PostgreSQL version 9.5 or above. MinIO uses the [`INSERT ON CONFLICT`](https://www.postgresql.org/docs/9.5/static/sql-insert.html#SQL-ON-CONFLICT) (aka UPSERT) feature, introduced in version 9.5 and the [JSONB](https://www.postgresql.org/docs/9.4/static/datatype-json.html) data-type introduced in version 9.4.
|
MinIO requires PostgreSQL version 9.5 or above. MinIO uses the [`INSERT ON CONFLICT`](https://www.postgresql.org/docs/9.5/static/sql-insert.html#SQL-ON-CONFLICT) (aka UPSERT) feature, introduced in version 9.5 and the [JSONB](https://www.postgresql.org/docs/9.4/static/datatype-json.html) data-type introduced in version 9.4.
|
||||||
|
|
||||||
|
@ -234,7 +234,7 @@ fi
|
|||||||
## Check replication of deleted ILM expiry rules
|
## Check replication of deleted ILM expiry rules
|
||||||
./mc ilm rule remove --id "${id}" sitea/bucket
|
./mc ilm rule remove --id "${id}" sitea/bucket
|
||||||
sleep 30
|
sleep 30
|
||||||
# should error as rule doesnt exist
|
# should error as rule doesn't exist
|
||||||
error=$(./mc ilm rule list siteb/bucket --json | jq '.error.cause.message' | sed 's/"//g')
|
error=$(./mc ilm rule list siteb/bucket --json | jq '.error.cause.message' | sed 's/"//g')
|
||||||
if [ "$error" != "The lifecycle configuration does not exist" ]; then
|
if [ "$error" != "The lifecycle configuration does not exist" ]; then
|
||||||
echo "BUG: Removed ILM expiry rule not replicated to 'siteb'"
|
echo "BUG: Removed ILM expiry rule not replicated to 'siteb'"
|
||||||
|
@ -56,7 +56,7 @@ func getMountMap() (map[string]string, error) {
|
|||||||
for scanner.Scan() {
|
for scanner.Scan() {
|
||||||
s := strings.Split(scanner.Text(), " ")
|
s := strings.Split(scanner.Text(), " ")
|
||||||
if len(s) != 11 {
|
if len(s) != 11 {
|
||||||
return nil, errors.New("unsupport /proc/self/mountinfo format")
|
return nil, errors.New("unsupported /proc/self/mountinfo format")
|
||||||
}
|
}
|
||||||
result[s[2]] = s[9]
|
result[s[2]] = s[9]
|
||||||
}
|
}
|
||||||
|
@ -4,7 +4,7 @@ MinIO now supports starting the server arguments and configuration via a YAML co
|
|||||||
|
|
||||||
Historically everything to MinIO was provided via command arguments for the hostnames and the drives via an ellipses syntax such as `minio server http://host{1...4}/disk{1...4}` this requirement added an additional burden to have sequential hostnames for us to make sure that we can provide horizontal distribution, however we have come across situations where sometimes this is not feasible and there are no easier alternatives without modifying /etc/hosts on the host system as root user. Many times in airgapped deployments this is not allowed or requires audits and approvals.
|
Historically everything to MinIO was provided via command arguments for the hostnames and the drives via an ellipses syntax such as `minio server http://host{1...4}/disk{1...4}` this requirement added an additional burden to have sequential hostnames for us to make sure that we can provide horizontal distribution, however we have come across situations where sometimes this is not feasible and there are no easier alternatives without modifying /etc/hosts on the host system as root user. Many times in airgapped deployments this is not allowed or requires audits and approvals.
|
||||||
|
|
||||||
MinIO server configuration file allows users to provide topology that allows for heterogenous hostnames, allowing MinIO to deployed in pre-existing environments without any further OS level configurations.
|
MinIO server configuration file allows users to provide topology that allows for heterogeneous hostnames, allowing MinIO to deployed in pre-existing environments without any further OS level configurations.
|
||||||
|
|
||||||
### Usage
|
### Usage
|
||||||
|
|
||||||
@ -12,7 +12,7 @@ MinIO server configuration file allows users to provide topology that allows for
|
|||||||
minio server --config config.yaml
|
minio server --config config.yaml
|
||||||
```
|
```
|
||||||
|
|
||||||
Lets you start MinIO server with all inputs to start MinIO server provided via this configuration file, once the configuration file is provided all other pre-existing values on disk for configuration are overriden by the new values set in this configuration file.
|
Lets you start MinIO server with all inputs to start MinIO server provided via this configuration file, once the configuration file is provided all other pre-existing values on disk for configuration are overridden by the new values set in this configuration file.
|
||||||
|
|
||||||
Following is an example YAML configuration structure.
|
Following is an example YAML configuration structure.
|
||||||
```
|
```
|
||||||
@ -53,7 +53,7 @@ options:
|
|||||||
- Mixing `local-path` and `distributed-path` is not allowed, doing so would cause MinIO to refuse starting the server.
|
- Mixing `local-path` and `distributed-path` is not allowed, doing so would cause MinIO to refuse starting the server.
|
||||||
- Ellipses notation (e.g. `{1...10}`) or bracket notations are fully allowed (e.g. `{a,c,f}`) to have multiple entries in one line.
|
- Ellipses notation (e.g. `{1...10}`) or bracket notations are fully allowed (e.g. `{a,c,f}`) to have multiple entries in one line.
|
||||||
|
|
||||||
> NOTE: MinIO environmental variables still take precedence over the `config.yaml` file, however `config.yaml` is preffered over MinIO internal config KV settings via `mc admin config set alias/ <sub-system>`.
|
> NOTE: MinIO environmental variables still take precedence over the `config.yaml` file, however `config.yaml` is preferred over MinIO internal config KV settings via `mc admin config set alias/ <sub-system>`.
|
||||||
|
|
||||||
### TODO
|
### TODO
|
||||||
|
|
||||||
|
@ -26,7 +26,7 @@ MinIO follows strict **read-after-write** and **list-after-write** consistency m
|
|||||||
|
|
||||||
**In our tests we also found ext4 does not honor POSIX O_DIRECT/Fdatasync semantics, ext4 trades performance for consistency guarantees. Please avoid ext4 in your setup.**
|
**In our tests we also found ext4 does not honor POSIX O_DIRECT/Fdatasync semantics, ext4 trades performance for consistency guarantees. Please avoid ext4 in your setup.**
|
||||||
|
|
||||||
**If MinIO distributed setup is using NFS volumes underneath it is not guaranteed MinIO will provide these consistency guarantees since NFS is not strictly consistent (If you must use NFS we recommend that you atleast use NFSv4 instead of NFSv3 for relatively better outcomes).**
|
**If MinIO distributed setup is using NFS volumes underneath it is not guaranteed MinIO will provide these consistency guarantees since NFS is not strictly consistent (If you must use NFS we recommend that you at least use NFSv4 instead of NFSv3 for relatively better outcomes).**
|
||||||
|
|
||||||
## Get started
|
## Get started
|
||||||
|
|
||||||
|
@ -80,7 +80,7 @@ if [ "${expected_checksum}" != "${actual_checksum}" ]; then
|
|||||||
exit
|
exit
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Compare the difference of the list of disks and their location, with the below exected output
|
# Compare the difference of the list of disks and their location, with the below expected output
|
||||||
diff <(./mc admin info minio1 --json | jq -r '.info.servers[].drives[] | "\(.pool_index),\(.set_index),\(.disk_index) \(.endpoint)"' | sort) <(
|
diff <(./mc admin info minio1 --json | jq -r '.info.servers[].drives[] | "\(.pool_index),\(.set_index),\(.disk_index) \(.endpoint)"' | sort) <(
|
||||||
cat <<EOF
|
cat <<EOF
|
||||||
0,0,0 http://localhost:9001/tmp/xl/node9001/mnt/disk1
|
0,0,0 http://localhost:9001/tmp/xl/node9001/mnt/disk1
|
||||||
|
@ -110,7 +110,7 @@ Verify if healing and replacing a drive works
|
|||||||
λ make verify-healing
|
λ make verify-healing
|
||||||
```
|
```
|
||||||
|
|
||||||
At this point in time the backport is ready to be submitted as a pull request to the relevant branch. A pull request is recommended to ensure [mint](http://github.com/minio/mint) tests are validated. Pull request also ensures code-reviews for the backports incase of any unforeseen regressions.
|
At this point in time the backport is ready to be submitted as a pull request to the relevant branch. A pull request is recommended to ensure [mint](http://github.com/minio/mint) tests are validated. Pull request also ensures code-reviews for the backports in case of any unforeseen regressions.
|
||||||
|
|
||||||
### Building a hotfix binary and container
|
### Building a hotfix binary and container
|
||||||
|
|
||||||
|
@ -31,7 +31,7 @@ export MINIO_KMS_SECRET_KEY=my-minio-key:OSMM+vkKUTCvQs9YL/CVMIMt43HFhkUpqJxTmGl
|
|||||||
```
|
```
|
||||||
|
|
||||||
> You can choose an arbitrary name for the key - instead of `my-minio-key`.
|
> You can choose an arbitrary name for the key - instead of `my-minio-key`.
|
||||||
> Please note that loosing the `MINIO_KMS_SECRET_KEY` will cause data loss
|
> Please note that losing the `MINIO_KMS_SECRET_KEY` will cause data loss
|
||||||
> since you will not be able to decrypt the IAM/configuration data anymore.
|
> since you will not be able to decrypt the IAM/configuration data anymore.
|
||||||
For distributed MinIO deployments, specify the *same* `MINIO_KMS_SECRET_KEY` for each MinIO server process.
|
For distributed MinIO deployments, specify the *same* `MINIO_KMS_SECRET_KEY` for each MinIO server process.
|
||||||
|
|
||||||
|
@ -4,7 +4,7 @@ MinIO uses a key-management-system (KMS) to support SSE-S3. If a client requests
|
|||||||
|
|
||||||
## Quick Start
|
## Quick Start
|
||||||
|
|
||||||
MinIO supports multiple KMS implementations via our [KES](https://github.com/minio/kes#kes) project. We run a KES instance at `https://play.min.io:7373` for you to experiment and quickly get started. To run MinIO with a KMS just fetch the root identity, set the following environment variables and then start your MinIO server. If you havn't installed MinIO, yet, then follow the MinIO [install instructions](https://min.io/docs/minio/linux/index.html#quickstart-for-linux) first.
|
MinIO supports multiple KMS implementations via our [KES](https://github.com/minio/kes#kes) project. We run a KES instance at `https://play.min.io:7373` for you to experiment and quickly get started. To run MinIO with a KMS just fetch the root identity, set the following environment variables and then start your MinIO server. If you haven't installed MinIO, yet, then follow the MinIO [install instructions](https://min.io/docs/minio/linux/index.html#quickstart-for-linux) first.
|
||||||
|
|
||||||
### 1. Fetch the root identity
|
### 1. Fetch the root identity
|
||||||
|
|
||||||
|
@ -27,7 +27,7 @@ inhibit_rules:
|
|||||||
equal: ['alertname', 'dev', 'instance']
|
equal: ['alertname', 'dev', 'instance']
|
||||||
```
|
```
|
||||||
|
|
||||||
This sample confoguration uses a `webhook` at http://127.0.0.1:8010/webhook to post the alerts.
|
This sample configuration uses a `webhook` at http://127.0.0.1:8010/webhook to post the alerts.
|
||||||
Start the AlertManager and it listens on port `9093` by default. Make sure your webhook is up and listening for the alerts.
|
Start the AlertManager and it listens on port `9093` by default. Make sure your webhook is up and listening for the alerts.
|
||||||
|
|
||||||
## Configure Prometheus to use AlertManager
|
## Configure Prometheus to use AlertManager
|
||||||
|
@ -1150,7 +1150,7 @@
|
|||||||
"uid": "Prometheus"
|
"uid": "Prometheus"
|
||||||
},
|
},
|
||||||
"exemplar": true,
|
"exemplar": true,
|
||||||
"expr": "sum by (server,enpoint) (minio_cluster_replication_link_offline_duration_seconds{job=\"$scrape_jobs\"})",
|
"expr": "sum by (server,endpoint) (minio_cluster_replication_link_offline_duration_seconds{job=\"$scrape_jobs\"})",
|
||||||
"interval": "1m",
|
"interval": "1m",
|
||||||
"legendFormat": "{{server,endpoint}}",
|
"legendFormat": "{{server,endpoint}}",
|
||||||
"refId": "A"
|
"refId": "A"
|
||||||
|
@ -53,7 +53,7 @@ We found the following APIs to be redundant or less useful outside of AWS S3. If
|
|||||||
|
|
||||||
## Object name restrictions on MinIO
|
## Object name restrictions on MinIO
|
||||||
|
|
||||||
- Object name restrictions on MinIO are governed by OS and filesystem limitations. For example object names that contain characters `^*|\/&";` are unsupported on Windows platform or any other file systems that do not support filenames with special charaters.
|
- Object name restrictions on MinIO are governed by OS and filesystem limitations. For example object names that contain characters `^*|\/&";` are unsupported on Windows platform or any other file systems that do not support filenames with special characters.
|
||||||
|
|
||||||
> **This list is non exhaustive, it depends on the operating system and filesystem under use - please consult your operating system vendor for a more comprehensive list of special characters**.
|
> **This list is non exhaustive, it depends on the operating system and filesystem under use - please consult your operating system vendor for a more comprehensive list of special characters**.
|
||||||
|
|
||||||
|
@ -107,7 +107,7 @@ The returned user's DN and their password are then verified with the LDAP server
|
|||||||
|
|
||||||
### Group membership search
|
### Group membership search
|
||||||
|
|
||||||
MinIO can be optionally configured to find the groups of a user from AD/LDAP by specifying the folllowing variables:
|
MinIO can be optionally configured to find the groups of a user from AD/LDAP by specifying the following variables:
|
||||||
|
|
||||||
```
|
```
|
||||||
MINIO_IDENTITY_LDAP_GROUP_SEARCH_FILTER (string) search filter for groups e.g. "(&(objectclass=groupOfNames)(memberUid=%s))"
|
MINIO_IDENTITY_LDAP_GROUP_SEARCH_FILTER (string) search filter for groups e.g. "(&(objectclass=groupOfNames)(memberUid=%s))"
|
||||||
|
@ -265,7 +265,7 @@ Sample URLs for Keycloak are
|
|||||||
|
|
||||||
`config_url` - `http://localhost:8080/auth/realms/demo/.well-known/openid-configuration`
|
`config_url` - `http://localhost:8080/auth/realms/demo/.well-known/openid-configuration`
|
||||||
|
|
||||||
JWT token returned by the Identity Provider should include a custom claim for the policy, this is required to create a STS user in MinIO. The name of the custom claim could be either `policy` or `<NAMESPACE_PREFIX>policy`. If there is no namespace then `claim_prefix` can be ingored. For example if the custom claim name is `https://min.io/policy` then, `claim_prefix` should be set as `https://min.io/`.
|
JWT token returned by the Identity Provider should include a custom claim for the policy, this is required to create a STS user in MinIO. The name of the custom claim could be either `policy` or `<NAMESPACE_PREFIX>policy`. If there is no namespace then `claim_prefix` can be ignored. For example if the custom claim name is `https://min.io/policy` then, `claim_prefix` should be set as `https://min.io/`.
|
||||||
|
|
||||||
- Open MinIO Console and click `Login with SSO`
|
- Open MinIO Console and click `Login with SSO`
|
||||||
- The user will be redirected to the Identity Provider login page
|
- The user will be redirected to the Identity Provider login page
|
||||||
|
@ -50,7 +50,7 @@ func Parse(amzDateStr string) (time.Time, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
var httpTimeFormats = []string{
|
var httpTimeFormats = []string{
|
||||||
// Do not chagne this order, http time format dates
|
// Do not change this order, http time format dates
|
||||||
// are usually in http.TimeFormat however there are
|
// are usually in http.TimeFormat however there are
|
||||||
// situations where for example aws-sdk-java doesn't
|
// situations where for example aws-sdk-java doesn't
|
||||||
// send the correct format.
|
// send the correct format.
|
||||||
|
@ -89,7 +89,7 @@ func (a And) ContainsDuplicateTag() bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// BySize returns true when sz satisfies a
|
// BySize returns true when sz satisfies a
|
||||||
// ObjectSizeLessThan/ObjectSizeGreaterthan or a logial AND of these predicates
|
// ObjectSizeLessThan/ObjectSizeGreaterthan or a logical AND of these predicates
|
||||||
// Note: And combines size and other predicates like Tags, Prefix, etc. This
|
// Note: And combines size and other predicates like Tags, Prefix, etc. This
|
||||||
// method applies exclusively to size predicates only.
|
// method applies exclusively to size predicates only.
|
||||||
func (a And) BySize(sz int64) bool {
|
func (a And) BySize(sz int64) bool {
|
||||||
|
@ -249,7 +249,7 @@ func (f Filter) TestTags(userTags string) bool {
|
|||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
// BySize returns true if sz satisifies one of ObjectSizeGreaterThan,
|
// BySize returns true if sz satisfies one of ObjectSizeGreaterThan,
|
||||||
// ObjectSizeLessThan predicates or a combination of them via And.
|
// ObjectSizeLessThan predicates or a combination of them via And.
|
||||||
func (f Filter) BySize(sz int64) bool {
|
func (f Filter) BySize(sz int64) bool {
|
||||||
if f.ObjectSizeGreaterThan > 0 &&
|
if f.ObjectSizeGreaterThan > 0 &&
|
||||||
|
2
internal/config/cache/cache.go
vendored
2
internal/config/cache/cache.go
vendored
@ -135,7 +135,7 @@ func (c Config) Get(r *CondCheck) (*ObjectInfo, error) {
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// We do not want Get's to take so much time, anything
|
// We do not want Gets to take so much time, anything
|
||||||
// beyond 250ms we should cut it, remote cache is too
|
// beyond 250ms we should cut it, remote cache is too
|
||||||
// busy already.
|
// busy already.
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), 250*time.Millisecond)
|
ctx, cancel := context.WithTimeout(context.Background(), 250*time.Millisecond)
|
||||||
|
@ -604,7 +604,7 @@ func LookupSite(siteKV KVS, regionKV KVS) (s Site, err error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// CheckValidKeys - checks if inputs KVS has the necessary keys,
|
// CheckValidKeys - checks if inputs KVS has the necessary keys,
|
||||||
// returns error if it find extra or superflous keys.
|
// returns error if it find extra or superfluous keys.
|
||||||
func CheckValidKeys(subSys string, kv KVS, validKVS KVS, deprecatedKeys ...string) error {
|
func CheckValidKeys(subSys string, kv KVS, validKVS KVS, deprecatedKeys ...string) error {
|
||||||
nkv := KVS{}
|
nkv := KVS{}
|
||||||
for _, kv := range kv {
|
for _, kv := range kv {
|
||||||
|
@ -258,7 +258,7 @@ func TestExpCorrect(t *testing.T) {
|
|||||||
if err := updateClaimsExpiry("3600", claimsMap.MapClaims); err != nil {
|
if err := updateClaimsExpiry("3600", claimsMap.MapClaims); err != nil {
|
||||||
t.Error(err)
|
t.Error(err)
|
||||||
}
|
}
|
||||||
// Build simple toke with updated expiration claim
|
// Build simple token with updated expiration claim
|
||||||
token := jwtgo.NewWithClaims(jwtgo.SigningMethodHS256, claimsMap)
|
token := jwtgo.NewWithClaims(jwtgo.SigningMethodHS256, claimsMap)
|
||||||
tokenString, err := token.SignedString(signKey)
|
tokenString, err := token.SignedString(signKey)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -53,7 +53,7 @@ var (
|
|||||||
ErrAccessTokenExpired = errors.New("access_token expired or unauthorized")
|
ErrAccessTokenExpired = errors.New("access_token expired or unauthorized")
|
||||||
)
|
)
|
||||||
|
|
||||||
// Provider implements indentity provider specific admin operations, such as
|
// Provider implements identity provider specific admin operations, such as
|
||||||
// looking up users, fetching additional attributes etc.
|
// looking up users, fetching additional attributes etc.
|
||||||
type Provider interface {
|
type Provider interface {
|
||||||
LoginWithUser(username, password string) error
|
LoginWithUser(username, password string) error
|
||||||
|
@ -91,7 +91,7 @@ func (p *providerCfg) initializeProvider(cfgGet func(string) string, transport h
|
|||||||
)
|
)
|
||||||
return err
|
return err
|
||||||
default:
|
default:
|
||||||
return fmt.Errorf("Unsupport vendor %s", keyCloakVendor)
|
return fmt.Errorf("Unsupported vendor %s", keyCloakVendor)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -197,7 +197,7 @@ func (h *metrics) accumRequestRTT(reqStartTime time.Time, rttMs float64, isSucce
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Round the reqest time *down* to whole minute.
|
// Round the request time *down* to whole minute.
|
||||||
reqTimeMinute := reqStartTime.Truncate(time.Minute)
|
reqTimeMinute := reqStartTime.Truncate(time.Minute)
|
||||||
if reqTimeMinute.After(h.currentMinute.statsTime) {
|
if reqTimeMinute.After(h.currentMinute.statsTime) {
|
||||||
// Drop the last full minute now, since we got a request for a time we
|
// Drop the last full minute now, since we got a request for a time we
|
||||||
|
@ -50,7 +50,7 @@ func (set TargetIDSet) Union(sset TargetIDSet) TargetIDSet {
|
|||||||
return nset
|
return nset
|
||||||
}
|
}
|
||||||
|
|
||||||
// Difference - returns diffrence with given set as new set.
|
// Difference - returns difference with given set as new set.
|
||||||
func (set TargetIDSet) Difference(sset TargetIDSet) TargetIDSet {
|
func (set TargetIDSet) Difference(sset TargetIDSet) TargetIDSet {
|
||||||
nset := NewTargetIDSet()
|
nset := NewTargetIDSet()
|
||||||
for k := range set {
|
for k := range set {
|
||||||
|
@ -143,7 +143,7 @@ var (
|
|||||||
},
|
},
|
||||||
config.HelpKV{
|
config.HelpKV{
|
||||||
Key: target.AmqpPublisherConfirms,
|
Key: target.AmqpPublisherConfirms,
|
||||||
Description: "enable consumer acknowlegement and publisher confirms, use this along with queue_dir for guaranteed delivery of all events",
|
Description: "enable consumer acknowledgement and publisher confirms, use this along with queue_dir for guaranteed delivery of all events",
|
||||||
Optional: true,
|
Optional: true,
|
||||||
Type: "on|off",
|
Type: "on|off",
|
||||||
},
|
},
|
||||||
|
@ -74,7 +74,7 @@ func GenerateIV(random io.Reader) (iv [32]byte) {
|
|||||||
// SealedKey represents a sealed object key. It can be stored
|
// SealedKey represents a sealed object key. It can be stored
|
||||||
// at an untrusted location.
|
// at an untrusted location.
|
||||||
type SealedKey struct {
|
type SealedKey struct {
|
||||||
Key [64]byte // The encrypted and authenticted object-key.
|
Key [64]byte // The encrypted and authenticated object-key.
|
||||||
IV [32]byte // The random IV used to encrypt the object-key.
|
IV [32]byte // The random IV used to encrypt the object-key.
|
||||||
Algorithm string // The sealing algorithm used to encrypt the object key.
|
Algorithm string // The sealing algorithm used to encrypt the object key.
|
||||||
}
|
}
|
||||||
|
@ -156,7 +156,7 @@ func IsEncrypted(metadata map[string]string) (Type, bool) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// CreateMultipartMetadata adds the multipart flag entry to metadata
|
// CreateMultipartMetadata adds the multipart flag entry to metadata
|
||||||
// and returns modifed metadata. It allocates a new metadata map if
|
// and returns modified metadata. It allocates a new metadata map if
|
||||||
// metadata is nil.
|
// metadata is nil.
|
||||||
func CreateMultipartMetadata(metadata map[string]string) map[string]string {
|
func CreateMultipartMetadata(metadata map[string]string) map[string]string {
|
||||||
if metadata == nil {
|
if metadata == nil {
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user