mirror of https://github.com/minio/minio.git
use typos instead of codespell (#19088)
This commit is contained in:
parent
56887f3208
commit
53aa8f5650
|
@ -1,8 +0,0 @@
|
||||||
|
|
||||||
[codespell]
|
|
||||||
# certs_test.go - has lots of ceritificates.
|
|
||||||
skip = go.mod,go.sum,*.txt,LICENSE,*.zip,.git,*.pdf,*.svg,.codespellrc,CREDITS,certs_test.go
|
|
||||||
check-hidden = true
|
|
||||||
ignore-regex = \b(newfolder/afile|filterIn|HelpES)\b
|
|
||||||
ignore-words-list = inout,bui,to,bu,te,ot,toi,ist,parms,flate
|
|
||||||
|
|
|
@ -1,20 +0,0 @@
|
||||||
---
|
|
||||||
name: Codespell
|
|
||||||
|
|
||||||
on:
|
|
||||||
pull_request:
|
|
||||||
branches: [master]
|
|
||||||
|
|
||||||
permissions:
|
|
||||||
contents: read
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
codespell:
|
|
||||||
name: Check for spelling errors
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- name: Checkout
|
|
||||||
uses: actions/checkout@v3
|
|
||||||
- name: Codespell
|
|
||||||
uses: codespell-project/actions-codespell@v2
|
|
|
@ -0,0 +1,15 @@
|
||||||
|
---
|
||||||
|
name: Test GitHub Action
|
||||||
|
on: [pull_request]
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
run:
|
||||||
|
name: Spell Check with Typos
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: Checkout Actions Repository
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Check spelling of repo
|
||||||
|
uses: crate-ci/typos@master
|
||||||
|
|
|
@ -0,0 +1,31 @@
|
||||||
|
[files]
|
||||||
|
extend-exclude = [
|
||||||
|
".git/",
|
||||||
|
"docs/",
|
||||||
|
]
|
||||||
|
ignore-hidden = false
|
||||||
|
|
||||||
|
[default]
|
||||||
|
extend-ignore-re = [
|
||||||
|
"Patrick Collison",
|
||||||
|
"Copyright 2014 Unknwon",
|
||||||
|
"[0-9A-Za-z/+=]{64}",
|
||||||
|
"ZXJuZXQxDjAMBgNVBA-some-junk-Q4wDAYDVQQLEwVNaW5pbzEOMAwGA1UEAxMF",
|
||||||
|
"eyJmb28iOiJiYXIifQ",
|
||||||
|
'http\.Header\{"X-Amz-Server-Side-Encryptio":',
|
||||||
|
'sessionToken',
|
||||||
|
]
|
||||||
|
|
||||||
|
[default.extend-words]
|
||||||
|
"encrypter" = "encrypter"
|
||||||
|
"requestor" = "requestor"
|
||||||
|
|
||||||
|
[default.extend-identifiers]
|
||||||
|
"bui" = "bui"
|
||||||
|
"toi" = "toi"
|
||||||
|
"ot" = "ot"
|
||||||
|
"dm2nd" = "dm2nd"
|
||||||
|
"HashiCorp" = "HashiCorp"
|
||||||
|
"ParseND" = "ParseND"
|
||||||
|
"ParseNDStream" = "ParseNDStream"
|
||||||
|
"TestGetPartialObjectMisAligned" = "TestGetPartialObjectMisAligned"
|
|
@ -1192,7 +1192,7 @@ type healInitParams struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// extractHealInitParams - Validates params for heal init API.
|
// extractHealInitParams - Validates params for heal init API.
|
||||||
func extractHealInitParams(vars map[string]string, qParms url.Values, r io.Reader) (hip healInitParams, err APIErrorCode) {
|
func extractHealInitParams(vars map[string]string, qParams url.Values, r io.Reader) (hip healInitParams, err APIErrorCode) {
|
||||||
hip.bucket = vars[mgmtBucket]
|
hip.bucket = vars[mgmtBucket]
|
||||||
hip.objPrefix = vars[mgmtPrefix]
|
hip.objPrefix = vars[mgmtPrefix]
|
||||||
|
|
||||||
|
@ -1213,13 +1213,13 @@ func extractHealInitParams(vars map[string]string, qParms url.Values, r io.Reade
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(qParms[mgmtClientToken]) > 0 {
|
if len(qParams[mgmtClientToken]) > 0 {
|
||||||
hip.clientToken = qParms[mgmtClientToken][0]
|
hip.clientToken = qParams[mgmtClientToken][0]
|
||||||
}
|
}
|
||||||
if _, ok := qParms[mgmtForceStart]; ok {
|
if _, ok := qParams[mgmtForceStart]; ok {
|
||||||
hip.forceStart = true
|
hip.forceStart = true
|
||||||
}
|
}
|
||||||
if _, ok := qParms[mgmtForceStop]; ok {
|
if _, ok := qParams[mgmtForceStop]; ok {
|
||||||
hip.forceStop = true
|
hip.forceStop = true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -348,7 +348,7 @@ func TestExtractHealInitParams(t *testing.T) {
|
||||||
}
|
}
|
||||||
return v
|
return v
|
||||||
}
|
}
|
||||||
qParmsArr := []url.Values{
|
qParamsArr := []url.Values{
|
||||||
// Invalid cases
|
// Invalid cases
|
||||||
mkParams("", true, true),
|
mkParams("", true, true),
|
||||||
mkParams("111", true, true),
|
mkParams("111", true, true),
|
||||||
|
@ -373,9 +373,9 @@ func TestExtractHealInitParams(t *testing.T) {
|
||||||
body := `{"recursive": false, "dryRun": true, "remove": false, "scanMode": 0}`
|
body := `{"recursive": false, "dryRun": true, "remove": false, "scanMode": 0}`
|
||||||
|
|
||||||
// Test all combinations!
|
// Test all combinations!
|
||||||
for pIdx, parms := range qParmsArr {
|
for pIdx, params := range qParamsArr {
|
||||||
for vIdx, vars := range varsArr {
|
for vIdx, vars := range varsArr {
|
||||||
_, err := extractHealInitParams(vars, parms, bytes.NewReader([]byte(body)))
|
_, err := extractHealInitParams(vars, params, bytes.NewReader([]byte(body)))
|
||||||
isErrCase := false
|
isErrCase := false
|
||||||
if pIdx < 4 || vIdx < 1 {
|
if pIdx < 4 || vIdx < 1 {
|
||||||
isErrCase = true
|
isErrCase = true
|
||||||
|
|
|
@ -390,7 +390,7 @@ const (
|
||||||
ErrParseExpectedIdentForGroupName
|
ErrParseExpectedIdentForGroupName
|
||||||
ErrParseExpectedIdentForAlias
|
ErrParseExpectedIdentForAlias
|
||||||
ErrParseUnsupportedCallWithStar
|
ErrParseUnsupportedCallWithStar
|
||||||
ErrParseNonUnaryAgregateFunctionCall
|
ErrParseNonUnaryAggregateFunctionCall
|
||||||
ErrParseMalformedJoin
|
ErrParseMalformedJoin
|
||||||
ErrParseExpectedIdentForAt
|
ErrParseExpectedIdentForAt
|
||||||
ErrParseAsteriskIsNotAloneInSelectList
|
ErrParseAsteriskIsNotAloneInSelectList
|
||||||
|
@ -1899,8 +1899,8 @@ var errorCodes = errorCodeMap{
|
||||||
Description: "Only COUNT with (*) as a parameter is supported in the SQL expression.",
|
Description: "Only COUNT with (*) as a parameter is supported in the SQL expression.",
|
||||||
HTTPStatusCode: http.StatusBadRequest,
|
HTTPStatusCode: http.StatusBadRequest,
|
||||||
},
|
},
|
||||||
ErrParseNonUnaryAgregateFunctionCall: {
|
ErrParseNonUnaryAggregateFunctionCall: {
|
||||||
Code: "ParseNonUnaryAgregateFunctionCall",
|
Code: "ParseNonUnaryAggregateFunctionCall",
|
||||||
Description: "Only one argument is supported for aggregate functions in the SQL expression.",
|
Description: "Only one argument is supported for aggregate functions in the SQL expression.",
|
||||||
HTTPStatusCode: http.StatusBadRequest,
|
HTTPStatusCode: http.StatusBadRequest,
|
||||||
},
|
},
|
||||||
|
|
|
@ -944,7 +944,7 @@ func writeSuccessResponseHeadersOnly(w http.ResponseWriter) {
|
||||||
writeResponse(w, http.StatusOK, nil, mimeNone)
|
writeResponse(w, http.StatusOK, nil, mimeNone)
|
||||||
}
|
}
|
||||||
|
|
||||||
// writeErrorRespone writes error headers
|
// writeErrorResponse writes error headers
|
||||||
func writeErrorResponse(ctx context.Context, w http.ResponseWriter, err APIError, reqURL *url.URL) {
|
func writeErrorResponse(ctx context.Context, w http.ResponseWriter, err APIError, reqURL *url.URL) {
|
||||||
if err.HTTPStatusCode == http.StatusServiceUnavailable {
|
if err.HTTPStatusCode == http.StatusServiceUnavailable {
|
||||||
// Set retry-after header to indicate user-agents to retry request after 120secs.
|
// Set retry-after header to indicate user-agents to retry request after 120secs.
|
||||||
|
|
File diff suppressed because one or more lines are too long
|
@ -237,7 +237,7 @@ func TestIsRequestPresignedSignatureV2(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestIsRequestPresignedSignatureV4 - Test validates the logic for presign signature verision v4 detection.
|
// TestIsRequestPresignedSignatureV4 - Test validates the logic for presign signature version v4 detection.
|
||||||
func TestIsRequestPresignedSignatureV4(t *testing.T) {
|
func TestIsRequestPresignedSignatureV4(t *testing.T) {
|
||||||
testCases := []struct {
|
testCases := []struct {
|
||||||
inputQueryKey string
|
inputQueryKey string
|
||||||
|
|
|
@ -580,7 +580,7 @@ func testListBucketsHandler(obj ObjectLayer, instanceType, bucketName string, ap
|
||||||
expectedRespStatus: http.StatusOK,
|
expectedRespStatus: http.StatusOK,
|
||||||
},
|
},
|
||||||
// Test case - 2.
|
// Test case - 2.
|
||||||
// Test case with invalid accessKey to produce and validate Signature MisMatch error.
|
// Test case with invalid accessKey to produce and validate Signature Mismatch error.
|
||||||
{
|
{
|
||||||
bucketName: bucketName,
|
bucketName: bucketName,
|
||||||
accessKey: "abcd",
|
accessKey: "abcd",
|
||||||
|
|
|
@ -289,7 +289,7 @@ func (api objectAPIHandlers) ListObjectsV1Handler(w http.ResponseWriter, r *http
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// Extract all the litsObjectsV1 query params to their native values.
|
// Extract all the listObjectsV1 query params to their native values.
|
||||||
prefix, marker, delimiter, maxKeys, encodingType, s3Error := getListObjectsV1Args(r.Form)
|
prefix, marker, delimiter, maxKeys, encodingType, s3Error := getListObjectsV1Args(r.Form)
|
||||||
if s3Error != ErrNone {
|
if s3Error != ErrNone {
|
||||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL)
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL)
|
||||||
|
|
|
@ -464,7 +464,7 @@ func (r *ReplicationStats) getLatestReplicationStats(bucket string) (s BucketSta
|
||||||
return r.calculateBucketReplicationStats(bucket, bucketStats)
|
return r.calculateBucketReplicationStats(bucket, bucketStats)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *ReplicationStats) incQ(bucket string, sz int64, isDeleleRepl bool, opType replication.Type) {
|
func (r *ReplicationStats) incQ(bucket string, sz int64, isDeleteRepl bool, opType replication.Type) {
|
||||||
r.qCache.Lock()
|
r.qCache.Lock()
|
||||||
defer r.qCache.Unlock()
|
defer r.qCache.Unlock()
|
||||||
v, ok := r.qCache.bucketStats[bucket]
|
v, ok := r.qCache.bucketStats[bucket]
|
||||||
|
|
|
@ -58,8 +58,8 @@ func commonETags(etags []string) (etag string, maxima int) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// commonTime returns a maximally occurring time from a list of time.
|
// commonTime returns a maximally occurring time from a list of time.
|
||||||
func commonTimeAndOccurence(times []time.Time, group time.Duration) (maxTime time.Time, maxima int) {
|
func commonTimeAndOccurrence(times []time.Time, group time.Duration) (maxTime time.Time, maxima int) {
|
||||||
timeOccurenceMap := make(map[int64]int, len(times))
|
timeOccurrenceMap := make(map[int64]int, len(times))
|
||||||
groupNano := group.Nanoseconds()
|
groupNano := group.Nanoseconds()
|
||||||
// Ignore the uuid sentinel and count the rest.
|
// Ignore the uuid sentinel and count the rest.
|
||||||
for _, t := range times {
|
for _, t := range times {
|
||||||
|
@ -68,7 +68,7 @@ func commonTimeAndOccurence(times []time.Time, group time.Duration) (maxTime tim
|
||||||
}
|
}
|
||||||
nano := t.UnixNano()
|
nano := t.UnixNano()
|
||||||
if group > 0 {
|
if group > 0 {
|
||||||
for k := range timeOccurenceMap {
|
for k := range timeOccurrenceMap {
|
||||||
if k == nano {
|
if k == nano {
|
||||||
// We add to ourself later
|
// We add to ourself later
|
||||||
continue
|
continue
|
||||||
|
@ -79,12 +79,12 @@ func commonTimeAndOccurence(times []time.Time, group time.Duration) (maxTime tim
|
||||||
}
|
}
|
||||||
// We are within the limit
|
// We are within the limit
|
||||||
if diff < groupNano {
|
if diff < groupNano {
|
||||||
timeOccurenceMap[k]++
|
timeOccurrenceMap[k]++
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Add ourself...
|
// Add ourself...
|
||||||
timeOccurenceMap[nano]++
|
timeOccurrenceMap[nano]++
|
||||||
}
|
}
|
||||||
|
|
||||||
maxima = 0 // Counter for remembering max occurrence of elements.
|
maxima = 0 // Counter for remembering max occurrence of elements.
|
||||||
|
@ -92,7 +92,7 @@ func commonTimeAndOccurence(times []time.Time, group time.Duration) (maxTime tim
|
||||||
|
|
||||||
// Find the common cardinality from previously collected
|
// Find the common cardinality from previously collected
|
||||||
// occurrences of elements.
|
// occurrences of elements.
|
||||||
for nano, count := range timeOccurenceMap {
|
for nano, count := range timeOccurrenceMap {
|
||||||
if count < maxima {
|
if count < maxima {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
@ -111,7 +111,7 @@ func commonTimeAndOccurence(times []time.Time, group time.Duration) (maxTime tim
|
||||||
// commonTime returns a maximally occurring time from a list of time if it
|
// commonTime returns a maximally occurring time from a list of time if it
|
||||||
// occurs >= quorum, else return timeSentinel
|
// occurs >= quorum, else return timeSentinel
|
||||||
func commonTime(modTimes []time.Time, quorum int) time.Time {
|
func commonTime(modTimes []time.Time, quorum int) time.Time {
|
||||||
if modTime, count := commonTimeAndOccurence(modTimes, 0); count >= quorum {
|
if modTime, count := commonTimeAndOccurrence(modTimes, 0); count >= quorum {
|
||||||
return modTime
|
return modTime
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -614,7 +614,7 @@ func TestHealingDanglingObject(t *testing.T) {
|
||||||
resetGlobalHealState()
|
resetGlobalHealState()
|
||||||
defer resetGlobalHealState()
|
defer resetGlobalHealState()
|
||||||
|
|
||||||
// Set globalStoragClass.STANDARD to EC:4 for this test
|
// Set globalStorageClass.STANDARD to EC:4 for this test
|
||||||
saveSC := globalStorageClass
|
saveSC := globalStorageClass
|
||||||
defer func() {
|
defer func() {
|
||||||
globalStorageClass.Update(saveSC)
|
globalStorageClass.Update(saveSC)
|
||||||
|
|
|
@ -964,7 +964,7 @@ func (er erasureObjects) getObjectInfo(ctx context.Context, bucket, object strin
|
||||||
return objInfo, nil
|
return objInfo, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// getObjectInfoAndQuroum - wrapper for reading object metadata and constructs ObjectInfo, additionally returns write quorum for the object.
|
// getObjectInfoAndQuorum - wrapper for reading object metadata and constructs ObjectInfo, additionally returns write quorum for the object.
|
||||||
func (er erasureObjects) getObjectInfoAndQuorum(ctx context.Context, bucket, object string, opts ObjectOptions) (objInfo ObjectInfo, wquorum int, err error) {
|
func (er erasureObjects) getObjectInfoAndQuorum(ctx context.Context, bucket, object string, opts ObjectOptions) (objInfo ObjectInfo, wquorum int, err error) {
|
||||||
fi, _, _, err := er.getObjectFileInfo(ctx, bucket, object, opts, false)
|
fi, _, _, err := er.getObjectFileInfo(ctx, bucket, object, opts, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -32,7 +32,7 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
// Tests validate bucket LocationConstraint.
|
// Tests validate bucket LocationConstraint.
|
||||||
func TestIsValidLocationContraint(t *testing.T) {
|
func TestIsValidLocationConstraint(t *testing.T) {
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
|
|
|
@ -538,7 +538,7 @@ func getNodeDriveTimeoutErrorsMD() MetricDescription {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func getNodeDriveAvailablityErrorsMD() MetricDescription {
|
func getNodeDriveAvailabilityErrorsMD() MetricDescription {
|
||||||
return MetricDescription{
|
return MetricDescription{
|
||||||
Namespace: nodeMetricNamespace,
|
Namespace: nodeMetricNamespace,
|
||||||
Subsystem: driveSubsystem,
|
Subsystem: driveSubsystem,
|
||||||
|
@ -3466,7 +3466,7 @@ func getLocalStorageMetrics(opts MetricsGroupOpts) *MetricsGroup {
|
||||||
})
|
})
|
||||||
|
|
||||||
metrics = append(metrics, Metric{
|
metrics = append(metrics, Metric{
|
||||||
Description: getNodeDriveAvailablityErrorsMD(),
|
Description: getNodeDriveAvailabilityErrorsMD(),
|
||||||
Value: float64(disk.Metrics.TotalErrorsAvailability),
|
Value: float64(disk.Metrics.TotalErrorsAvailability),
|
||||||
VariableLabels: map[string]string{"drive": disk.DrivePath},
|
VariableLabels: map[string]string{"drive": disk.DrivePath},
|
||||||
})
|
})
|
||||||
|
|
|
@ -680,13 +680,13 @@ func isErrBucketNotFound(err error) bool {
|
||||||
return errors.As(err, &bkNotFound)
|
return errors.As(err, &bkNotFound)
|
||||||
}
|
}
|
||||||
|
|
||||||
// isErrReadQuorum check if the error type is InsufficentReadQuorum
|
// isErrReadQuorum check if the error type is InsufficientReadQuorum
|
||||||
func isErrReadQuorum(err error) bool {
|
func isErrReadQuorum(err error) bool {
|
||||||
var rquorum InsufficientReadQuorum
|
var rquorum InsufficientReadQuorum
|
||||||
return errors.As(err, &rquorum)
|
return errors.As(err, &rquorum)
|
||||||
}
|
}
|
||||||
|
|
||||||
// isErrWriteQuorum check if the error type is InsufficentWriteQuorum
|
// isErrWriteQuorum check if the error type is InsufficientWriteQuorum
|
||||||
func isErrWriteQuorum(err error) bool {
|
func isErrWriteQuorum(err error) bool {
|
||||||
var rquorum InsufficientWriteQuorum
|
var rquorum InsufficientWriteQuorum
|
||||||
return errors.As(err, &rquorum)
|
return errors.As(err, &rquorum)
|
||||||
|
|
|
@ -227,7 +227,7 @@ func testObjectAPIPutObjectPart(obj ObjectLayer, instanceType string, t TestErrH
|
||||||
inputReaderData string
|
inputReaderData string
|
||||||
inputMd5 string
|
inputMd5 string
|
||||||
inputSHA256 string
|
inputSHA256 string
|
||||||
intputDataSize int64
|
inputDataSize int64
|
||||||
// flag indicating whether the test should pass.
|
// flag indicating whether the test should pass.
|
||||||
shouldPass bool
|
shouldPass bool
|
||||||
// expected error output.
|
// expected error output.
|
||||||
|
@ -287,27 +287,27 @@ func testObjectAPIPutObjectPart(obj ObjectLayer, instanceType string, t TestErrH
|
||||||
// Test case - 14.
|
// Test case - 14.
|
||||||
// Input with size more than the size of actual data inside the reader.
|
// Input with size more than the size of actual data inside the reader.
|
||||||
{
|
{
|
||||||
bucketName: bucket, objName: object, uploadID: uploadID, PartID: 1, inputReaderData: "abcd", inputMd5: "e2fc714c4727ee9395f324cd2e7f3335", intputDataSize: int64(len("abcd") + 1),
|
bucketName: bucket, objName: object, uploadID: uploadID, PartID: 1, inputReaderData: "abcd", inputMd5: "e2fc714c4727ee9395f324cd2e7f3335", inputDataSize: int64(len("abcd") + 1),
|
||||||
expectedError: hash.BadDigest{ExpectedMD5: "e2fc714c4727ee9395f324cd2e7f3335", CalculatedMD5: "e2fc714c4727ee9395f324cd2e7f331f"},
|
expectedError: hash.BadDigest{ExpectedMD5: "e2fc714c4727ee9395f324cd2e7f3335", CalculatedMD5: "e2fc714c4727ee9395f324cd2e7f331f"},
|
||||||
},
|
},
|
||||||
// Test case - 15.
|
// Test case - 15.
|
||||||
// Input with size less than the size of actual data inside the reader.
|
// Input with size less than the size of actual data inside the reader.
|
||||||
{
|
{
|
||||||
bucketName: bucket, objName: object, uploadID: uploadID, PartID: 1, inputReaderData: "abcd", inputMd5: "900150983cd24fb0d6963f7d28e17f73", intputDataSize: int64(len("abcd") - 1),
|
bucketName: bucket, objName: object, uploadID: uploadID, PartID: 1, inputReaderData: "abcd", inputMd5: "900150983cd24fb0d6963f7d28e17f73", inputDataSize: int64(len("abcd") - 1),
|
||||||
expectedError: ioutil.ErrOverread,
|
expectedError: ioutil.ErrOverread,
|
||||||
},
|
},
|
||||||
|
|
||||||
// Test case - 16-19.
|
// Test case - 16-19.
|
||||||
// Validating for success cases.
|
// Validating for success cases.
|
||||||
{bucketName: bucket, objName: object, uploadID: uploadID, PartID: 1, inputReaderData: "abcd", inputMd5: "e2fc714c4727ee9395f324cd2e7f331f", inputSHA256: "88d4266fd4e6338d13b845fcf289579d209c897823b9217da3e161936f031589", intputDataSize: int64(len("abcd")), shouldPass: true},
|
{bucketName: bucket, objName: object, uploadID: uploadID, PartID: 1, inputReaderData: "abcd", inputMd5: "e2fc714c4727ee9395f324cd2e7f331f", inputSHA256: "88d4266fd4e6338d13b845fcf289579d209c897823b9217da3e161936f031589", inputDataSize: int64(len("abcd")), shouldPass: true},
|
||||||
{bucketName: bucket, objName: object, uploadID: uploadID, PartID: 2, inputReaderData: "efgh", inputMd5: "1f7690ebdd9b4caf8fab49ca1757bf27", inputSHA256: "e5e088a0b66163a0a26a5e053d2a4496dc16ab6e0e3dd1adf2d16aa84a078c9d", intputDataSize: int64(len("efgh")), shouldPass: true},
|
{bucketName: bucket, objName: object, uploadID: uploadID, PartID: 2, inputReaderData: "efgh", inputMd5: "1f7690ebdd9b4caf8fab49ca1757bf27", inputSHA256: "e5e088a0b66163a0a26a5e053d2a4496dc16ab6e0e3dd1adf2d16aa84a078c9d", inputDataSize: int64(len("efgh")), shouldPass: true},
|
||||||
{bucketName: bucket, objName: object, uploadID: uploadID, PartID: 3, inputReaderData: "ijkl", inputMd5: "09a0877d04abf8759f99adec02baf579", inputSHA256: "005c19658919186b85618c5870463eec8d9b8c1a9d00208a5352891ba5bbe086", intputDataSize: int64(len("abcd")), shouldPass: true},
|
{bucketName: bucket, objName: object, uploadID: uploadID, PartID: 3, inputReaderData: "ijkl", inputMd5: "09a0877d04abf8759f99adec02baf579", inputSHA256: "005c19658919186b85618c5870463eec8d9b8c1a9d00208a5352891ba5bbe086", inputDataSize: int64(len("abcd")), shouldPass: true},
|
||||||
{bucketName: bucket, objName: object, uploadID: uploadID, PartID: 4, inputReaderData: "mnop", inputMd5: "e132e96a5ddad6da8b07bba6f6131fef", inputSHA256: "f1afc31479522d6cff1ed068f93998f05a8cd3b22f5c37d7f307084f62d1d270", intputDataSize: int64(len("abcd")), shouldPass: true},
|
{bucketName: bucket, objName: object, uploadID: uploadID, PartID: 4, inputReaderData: "mnop", inputMd5: "e132e96a5ddad6da8b07bba6f6131fef", inputSHA256: "f1afc31479522d6cff1ed068f93998f05a8cd3b22f5c37d7f307084f62d1d270", inputDataSize: int64(len("abcd")), shouldPass: true},
|
||||||
}
|
}
|
||||||
|
|
||||||
// Validate all the test cases.
|
// Validate all the test cases.
|
||||||
for i, testCase := range testCases {
|
for i, testCase := range testCases {
|
||||||
actualInfo, actualErr := obj.PutObjectPart(context.Background(), testCase.bucketName, testCase.objName, testCase.uploadID, testCase.PartID, mustGetPutObjReader(t, bytes.NewBufferString(testCase.inputReaderData), testCase.intputDataSize, testCase.inputMd5, testCase.inputSHA256), opts)
|
actualInfo, actualErr := obj.PutObjectPart(context.Background(), testCase.bucketName, testCase.objName, testCase.uploadID, testCase.PartID, mustGetPutObjReader(t, bytes.NewBufferString(testCase.inputReaderData), testCase.inputDataSize, testCase.inputMd5, testCase.inputSHA256), opts)
|
||||||
// All are test cases above are expected to fail.
|
// All are test cases above are expected to fail.
|
||||||
if actualErr != nil && testCase.shouldPass {
|
if actualErr != nil && testCase.shouldPass {
|
||||||
t.Errorf("Test %d: %s: Expected to pass, but failed with: <ERROR> %s.", i+1, instanceType, actualErr.Error())
|
t.Errorf("Test %d: %s: Expected to pass, but failed with: <ERROR> %s.", i+1, instanceType, actualErr.Error())
|
||||||
|
@ -410,7 +410,7 @@ func testListMultipartUploads(obj ObjectLayer, instanceType string, t TestErrHan
|
||||||
PartID int
|
PartID int
|
||||||
inputReaderData string
|
inputReaderData string
|
||||||
inputMd5 string
|
inputMd5 string
|
||||||
intputDataSize int64
|
inputDataSize int64
|
||||||
expectedMd5 string
|
expectedMd5 string
|
||||||
}{
|
}{
|
||||||
// Case 1-4.
|
// Case 1-4.
|
||||||
|
@ -439,7 +439,7 @@ func testListMultipartUploads(obj ObjectLayer, instanceType string, t TestErrHan
|
||||||
sha256sum := ""
|
sha256sum := ""
|
||||||
// Iterating over creatPartCases to generate multipart chunks.
|
// Iterating over creatPartCases to generate multipart chunks.
|
||||||
for _, testCase := range createPartCases {
|
for _, testCase := range createPartCases {
|
||||||
_, err := obj.PutObjectPart(context.Background(), testCase.bucketName, testCase.objName, testCase.uploadID, testCase.PartID, mustGetPutObjReader(t, bytes.NewBufferString(testCase.inputReaderData), testCase.intputDataSize, testCase.inputMd5, sha256sum), opts)
|
_, err := obj.PutObjectPart(context.Background(), testCase.bucketName, testCase.objName, testCase.uploadID, testCase.PartID, mustGetPutObjReader(t, bytes.NewBufferString(testCase.inputReaderData), testCase.inputDataSize, testCase.inputMd5, sha256sum), opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||||
}
|
}
|
||||||
|
@ -1263,7 +1263,7 @@ func testListObjectPartsDiskNotFound(obj ObjectLayer, instanceType string, disks
|
||||||
PartID int
|
PartID int
|
||||||
inputReaderData string
|
inputReaderData string
|
||||||
inputMd5 string
|
inputMd5 string
|
||||||
intputDataSize int64
|
inputDataSize int64
|
||||||
expectedMd5 string
|
expectedMd5 string
|
||||||
}{
|
}{
|
||||||
// Case 1-4.
|
// Case 1-4.
|
||||||
|
@ -1277,7 +1277,7 @@ func testListObjectPartsDiskNotFound(obj ObjectLayer, instanceType string, disks
|
||||||
sha256sum := ""
|
sha256sum := ""
|
||||||
// Iterating over creatPartCases to generate multipart chunks.
|
// Iterating over creatPartCases to generate multipart chunks.
|
||||||
for _, testCase := range createPartCases {
|
for _, testCase := range createPartCases {
|
||||||
_, err := obj.PutObjectPart(context.Background(), testCase.bucketName, testCase.objName, testCase.uploadID, testCase.PartID, mustGetPutObjReader(t, bytes.NewBufferString(testCase.inputReaderData), testCase.intputDataSize, testCase.inputMd5, sha256sum), opts)
|
_, err := obj.PutObjectPart(context.Background(), testCase.bucketName, testCase.objName, testCase.uploadID, testCase.PartID, mustGetPutObjReader(t, bytes.NewBufferString(testCase.inputReaderData), testCase.inputDataSize, testCase.inputMd5, sha256sum), opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||||
}
|
}
|
||||||
|
@ -1500,7 +1500,7 @@ func testListObjectParts(obj ObjectLayer, instanceType string, t TestErrHandler)
|
||||||
PartID int
|
PartID int
|
||||||
inputReaderData string
|
inputReaderData string
|
||||||
inputMd5 string
|
inputMd5 string
|
||||||
intputDataSize int64
|
inputDataSize int64
|
||||||
expectedMd5 string
|
expectedMd5 string
|
||||||
}{
|
}{
|
||||||
// Case 1-4.
|
// Case 1-4.
|
||||||
|
@ -1514,7 +1514,7 @@ func testListObjectParts(obj ObjectLayer, instanceType string, t TestErrHandler)
|
||||||
sha256sum := ""
|
sha256sum := ""
|
||||||
// Iterating over creatPartCases to generate multipart chunks.
|
// Iterating over creatPartCases to generate multipart chunks.
|
||||||
for _, testCase := range createPartCases {
|
for _, testCase := range createPartCases {
|
||||||
_, err := obj.PutObjectPart(context.Background(), testCase.bucketName, testCase.objName, testCase.uploadID, testCase.PartID, mustGetPutObjReader(t, bytes.NewBufferString(testCase.inputReaderData), testCase.intputDataSize, testCase.inputMd5, sha256sum), opts)
|
_, err := obj.PutObjectPart(context.Background(), testCase.bucketName, testCase.objName, testCase.uploadID, testCase.PartID, mustGetPutObjReader(t, bytes.NewBufferString(testCase.inputReaderData), testCase.inputDataSize, testCase.inputMd5, sha256sum), opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||||
}
|
}
|
||||||
|
@ -1744,7 +1744,7 @@ func testObjectCompleteMultipartUpload(obj ObjectLayer, instanceType string, t T
|
||||||
PartID int
|
PartID int
|
||||||
inputReaderData string
|
inputReaderData string
|
||||||
inputMd5 string
|
inputMd5 string
|
||||||
intputDataSize int64
|
inputDataSize int64
|
||||||
}{
|
}{
|
||||||
// Case 1-4.
|
// Case 1-4.
|
||||||
// Creating sequence of parts for same uploadID.
|
// Creating sequence of parts for same uploadID.
|
||||||
|
@ -1761,7 +1761,7 @@ func testObjectCompleteMultipartUpload(obj ObjectLayer, instanceType string, t T
|
||||||
var opts ObjectOptions
|
var opts ObjectOptions
|
||||||
// Iterating over creatPartCases to generate multipart chunks.
|
// Iterating over creatPartCases to generate multipart chunks.
|
||||||
for _, part := range parts {
|
for _, part := range parts {
|
||||||
_, err = obj.PutObjectPart(context.Background(), part.bucketName, part.objName, part.uploadID, part.PartID, mustGetPutObjReader(t, bytes.NewBufferString(part.inputReaderData), part.intputDataSize, part.inputMd5, sha256sum), opts)
|
_, err = obj.PutObjectPart(context.Background(), part.bucketName, part.objName, part.uploadID, part.PartID, mustGetPutObjReader(t, bytes.NewBufferString(part.inputReaderData), part.inputDataSize, part.inputMd5, sha256sum), opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("%s : %s", instanceType, err)
|
t.Fatalf("%s : %s", instanceType, err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -70,12 +70,12 @@ func testObjectAPIPutObject(obj ObjectLayer, instanceType string, t TestErrHandl
|
||||||
invalidMD5Header := md5Header([]byte("meh"))
|
invalidMD5Header := md5Header([]byte("meh"))
|
||||||
|
|
||||||
testCases := []struct {
|
testCases := []struct {
|
||||||
bucketName string
|
bucketName string
|
||||||
objName string
|
objName string
|
||||||
inputData []byte
|
inputData []byte
|
||||||
inputMeta map[string]string
|
inputMeta map[string]string
|
||||||
inputSHA256 string
|
inputSHA256 string
|
||||||
intputDataSize int64
|
inputDataSize int64
|
||||||
// expected error output.
|
// expected error output.
|
||||||
expectedMd5 string
|
expectedMd5 string
|
||||||
expectedError error
|
expectedError error
|
||||||
|
@ -106,7 +106,7 @@ func testObjectAPIPutObject(obj ObjectLayer, instanceType string, t TestErrHandl
|
||||||
7: {
|
7: {
|
||||||
bucketName: bucket, objName: object, inputData: []byte("abcd"),
|
bucketName: bucket, objName: object, inputData: []byte("abcd"),
|
||||||
inputMeta: map[string]string{"etag": "e2fc714c4727ee9395f324cd2e7f331f"},
|
inputMeta: map[string]string{"etag": "e2fc714c4727ee9395f324cd2e7f331f"},
|
||||||
inputSHA256: "88d4266fd4e6338d13b845fcf289579d209c897823b9217da3e161936f031580", intputDataSize: int64(len("abcd")),
|
inputSHA256: "88d4266fd4e6338d13b845fcf289579d209c897823b9217da3e161936f031580", inputDataSize: int64(len("abcd")),
|
||||||
expectedError: hash.SHA256Mismatch{
|
expectedError: hash.SHA256Mismatch{
|
||||||
ExpectedSHA256: "88d4266fd4e6338d13b845fcf289579d209c897823b9217da3e161936f031580",
|
ExpectedSHA256: "88d4266fd4e6338d13b845fcf289579d209c897823b9217da3e161936f031580",
|
||||||
CalculatedSHA256: "88d4266fd4e6338d13b845fcf289579d209c897823b9217da3e161936f031589",
|
CalculatedSHA256: "88d4266fd4e6338d13b845fcf289579d209c897823b9217da3e161936f031589",
|
||||||
|
@ -116,76 +116,76 @@ func testObjectAPIPutObject(obj ObjectLayer, instanceType string, t TestErrHandl
|
||||||
// Input with size more than the size of actual data inside the reader.
|
// Input with size more than the size of actual data inside the reader.
|
||||||
8: {
|
8: {
|
||||||
bucketName: bucket, objName: object, inputData: []byte("abcd"),
|
bucketName: bucket, objName: object, inputData: []byte("abcd"),
|
||||||
inputMeta: map[string]string{"etag": "e2fc714c4727ee9395f324cd2e7f331e"}, intputDataSize: int64(len("abcd") + 1),
|
inputMeta: map[string]string{"etag": "e2fc714c4727ee9395f324cd2e7f331e"}, inputDataSize: int64(len("abcd") + 1),
|
||||||
expectedError: hash.BadDigest{ExpectedMD5: "e2fc714c4727ee9395f324cd2e7f331e", CalculatedMD5: "e2fc714c4727ee9395f324cd2e7f331f"},
|
expectedError: hash.BadDigest{ExpectedMD5: "e2fc714c4727ee9395f324cd2e7f331e", CalculatedMD5: "e2fc714c4727ee9395f324cd2e7f331f"},
|
||||||
},
|
},
|
||||||
|
|
||||||
// Input with size less than the size of actual data inside the reader.
|
// Input with size less than the size of actual data inside the reader.
|
||||||
9: {
|
9: {
|
||||||
bucketName: bucket, objName: object, inputData: []byte("abcd"),
|
bucketName: bucket, objName: object, inputData: []byte("abcd"),
|
||||||
inputMeta: map[string]string{"etag": "900150983cd24fb0d6963f7d28e17f73"}, intputDataSize: int64(len("abcd") - 1),
|
inputMeta: map[string]string{"etag": "900150983cd24fb0d6963f7d28e17f73"}, inputDataSize: int64(len("abcd") - 1),
|
||||||
expectedError: ioutil.ErrOverread,
|
expectedError: ioutil.ErrOverread,
|
||||||
},
|
},
|
||||||
|
|
||||||
// Validating for success cases.
|
// Validating for success cases.
|
||||||
10: {bucketName: bucket, objName: object, inputData: []byte("abcd"), inputMeta: map[string]string{"etag": "e2fc714c4727ee9395f324cd2e7f331f"}, intputDataSize: int64(len("abcd"))},
|
10: {bucketName: bucket, objName: object, inputData: []byte("abcd"), inputMeta: map[string]string{"etag": "e2fc714c4727ee9395f324cd2e7f331f"}, inputDataSize: int64(len("abcd"))},
|
||||||
11: {bucketName: bucket, objName: object, inputData: []byte("efgh"), inputMeta: map[string]string{"etag": "1f7690ebdd9b4caf8fab49ca1757bf27"}, intputDataSize: int64(len("efgh"))},
|
11: {bucketName: bucket, objName: object, inputData: []byte("efgh"), inputMeta: map[string]string{"etag": "1f7690ebdd9b4caf8fab49ca1757bf27"}, inputDataSize: int64(len("efgh"))},
|
||||||
12: {bucketName: bucket, objName: object, inputData: []byte("ijkl"), inputMeta: map[string]string{"etag": "09a0877d04abf8759f99adec02baf579"}, intputDataSize: int64(len("ijkl"))},
|
12: {bucketName: bucket, objName: object, inputData: []byte("ijkl"), inputMeta: map[string]string{"etag": "09a0877d04abf8759f99adec02baf579"}, inputDataSize: int64(len("ijkl"))},
|
||||||
13: {bucketName: bucket, objName: object, inputData: []byte("mnop"), inputMeta: map[string]string{"etag": "e132e96a5ddad6da8b07bba6f6131fef"}, intputDataSize: int64(len("mnop"))},
|
13: {bucketName: bucket, objName: object, inputData: []byte("mnop"), inputMeta: map[string]string{"etag": "e132e96a5ddad6da8b07bba6f6131fef"}, inputDataSize: int64(len("mnop"))},
|
||||||
|
|
||||||
// With no metadata
|
// With no metadata
|
||||||
14: {bucketName: bucket, objName: object, inputData: data, intputDataSize: int64(len(data)), expectedMd5: getMD5Hash(data)},
|
14: {bucketName: bucket, objName: object, inputData: data, inputDataSize: int64(len(data)), expectedMd5: getMD5Hash(data)},
|
||||||
15: {bucketName: bucket, objName: object, inputData: nilBytes, intputDataSize: int64(len(nilBytes)), expectedMd5: getMD5Hash(nilBytes)},
|
15: {bucketName: bucket, objName: object, inputData: nilBytes, inputDataSize: int64(len(nilBytes)), expectedMd5: getMD5Hash(nilBytes)},
|
||||||
16: {bucketName: bucket, objName: object, inputData: fiveMBBytes, intputDataSize: int64(len(fiveMBBytes)), expectedMd5: getMD5Hash(fiveMBBytes)},
|
16: {bucketName: bucket, objName: object, inputData: fiveMBBytes, inputDataSize: int64(len(fiveMBBytes)), expectedMd5: getMD5Hash(fiveMBBytes)},
|
||||||
|
|
||||||
// With arbitrary metadata
|
// With arbitrary metadata
|
||||||
17: {bucketName: bucket, objName: object, inputData: data, inputMeta: map[string]string{"answer": "42"}, intputDataSize: int64(len(data)), expectedMd5: getMD5Hash(data)},
|
17: {bucketName: bucket, objName: object, inputData: data, inputMeta: map[string]string{"answer": "42"}, inputDataSize: int64(len(data)), expectedMd5: getMD5Hash(data)},
|
||||||
18: {bucketName: bucket, objName: object, inputData: nilBytes, inputMeta: map[string]string{"answer": "42"}, intputDataSize: int64(len(nilBytes)), expectedMd5: getMD5Hash(nilBytes)},
|
18: {bucketName: bucket, objName: object, inputData: nilBytes, inputMeta: map[string]string{"answer": "42"}, inputDataSize: int64(len(nilBytes)), expectedMd5: getMD5Hash(nilBytes)},
|
||||||
19: {bucketName: bucket, objName: object, inputData: fiveMBBytes, inputMeta: map[string]string{"answer": "42"}, intputDataSize: int64(len(fiveMBBytes)), expectedMd5: getMD5Hash(fiveMBBytes)},
|
19: {bucketName: bucket, objName: object, inputData: fiveMBBytes, inputMeta: map[string]string{"answer": "42"}, inputDataSize: int64(len(fiveMBBytes)), expectedMd5: getMD5Hash(fiveMBBytes)},
|
||||||
|
|
||||||
// With valid md5sum and sha256.
|
// With valid md5sum and sha256.
|
||||||
20: {bucketName: bucket, objName: object, inputData: data, inputMeta: md5Header(data), inputSHA256: getSHA256Hash(data), intputDataSize: int64(len(data)), expectedMd5: getMD5Hash(data)},
|
20: {bucketName: bucket, objName: object, inputData: data, inputMeta: md5Header(data), inputSHA256: getSHA256Hash(data), inputDataSize: int64(len(data)), expectedMd5: getMD5Hash(data)},
|
||||||
21: {bucketName: bucket, objName: object, inputData: nilBytes, inputMeta: md5Header(nilBytes), inputSHA256: getSHA256Hash(nilBytes), intputDataSize: int64(len(nilBytes)), expectedMd5: getMD5Hash(nilBytes)},
|
21: {bucketName: bucket, objName: object, inputData: nilBytes, inputMeta: md5Header(nilBytes), inputSHA256: getSHA256Hash(nilBytes), inputDataSize: int64(len(nilBytes)), expectedMd5: getMD5Hash(nilBytes)},
|
||||||
22: {bucketName: bucket, objName: object, inputData: fiveMBBytes, inputMeta: md5Header(fiveMBBytes), inputSHA256: getSHA256Hash(fiveMBBytes), intputDataSize: int64(len(fiveMBBytes)), expectedMd5: getMD5Hash(fiveMBBytes)},
|
22: {bucketName: bucket, objName: object, inputData: fiveMBBytes, inputMeta: md5Header(fiveMBBytes), inputSHA256: getSHA256Hash(fiveMBBytes), inputDataSize: int64(len(fiveMBBytes)), expectedMd5: getMD5Hash(fiveMBBytes)},
|
||||||
|
|
||||||
// data with invalid md5sum in header
|
// data with invalid md5sum in header
|
||||||
23: {
|
23: {
|
||||||
bucketName: bucket, objName: object, inputData: data, inputMeta: invalidMD5Header, intputDataSize: int64(len(data)), expectedMd5: getMD5Hash(data),
|
bucketName: bucket, objName: object, inputData: data, inputMeta: invalidMD5Header, inputDataSize: int64(len(data)), expectedMd5: getMD5Hash(data),
|
||||||
expectedError: hash.BadDigest{ExpectedMD5: invalidMD5, CalculatedMD5: getMD5Hash(data)},
|
expectedError: hash.BadDigest{ExpectedMD5: invalidMD5, CalculatedMD5: getMD5Hash(data)},
|
||||||
},
|
},
|
||||||
24: {
|
24: {
|
||||||
bucketName: bucket, objName: object, inputData: nilBytes, inputMeta: invalidMD5Header, intputDataSize: int64(len(nilBytes)), expectedMd5: getMD5Hash(nilBytes),
|
bucketName: bucket, objName: object, inputData: nilBytes, inputMeta: invalidMD5Header, inputDataSize: int64(len(nilBytes)), expectedMd5: getMD5Hash(nilBytes),
|
||||||
expectedError: hash.BadDigest{ExpectedMD5: invalidMD5, CalculatedMD5: getMD5Hash(nilBytes)},
|
expectedError: hash.BadDigest{ExpectedMD5: invalidMD5, CalculatedMD5: getMD5Hash(nilBytes)},
|
||||||
},
|
},
|
||||||
25: {
|
25: {
|
||||||
bucketName: bucket, objName: object, inputData: fiveMBBytes, inputMeta: invalidMD5Header, intputDataSize: int64(len(fiveMBBytes)), expectedMd5: getMD5Hash(fiveMBBytes),
|
bucketName: bucket, objName: object, inputData: fiveMBBytes, inputMeta: invalidMD5Header, inputDataSize: int64(len(fiveMBBytes)), expectedMd5: getMD5Hash(fiveMBBytes),
|
||||||
expectedError: hash.BadDigest{ExpectedMD5: invalidMD5, CalculatedMD5: getMD5Hash(fiveMBBytes)},
|
expectedError: hash.BadDigest{ExpectedMD5: invalidMD5, CalculatedMD5: getMD5Hash(fiveMBBytes)},
|
||||||
},
|
},
|
||||||
|
|
||||||
// data with size different from the actual number of bytes available in the reader
|
// data with size different from the actual number of bytes available in the reader
|
||||||
26: {bucketName: bucket, objName: object, inputData: data, intputDataSize: int64(len(data) - 1), expectedMd5: getMD5Hash(data[:len(data)-1]), expectedError: ioutil.ErrOverread},
|
26: {bucketName: bucket, objName: object, inputData: data, inputDataSize: int64(len(data) - 1), expectedMd5: getMD5Hash(data[:len(data)-1]), expectedError: ioutil.ErrOverread},
|
||||||
27: {bucketName: bucket, objName: object, inputData: nilBytes, intputDataSize: int64(len(nilBytes) + 1), expectedMd5: getMD5Hash(nilBytes), expectedError: IncompleteBody{Bucket: bucket, Object: object}},
|
27: {bucketName: bucket, objName: object, inputData: nilBytes, inputDataSize: int64(len(nilBytes) + 1), expectedMd5: getMD5Hash(nilBytes), expectedError: IncompleteBody{Bucket: bucket, Object: object}},
|
||||||
28: {bucketName: bucket, objName: object, inputData: fiveMBBytes, expectedMd5: getMD5Hash(fiveMBBytes), expectedError: ioutil.ErrOverread},
|
28: {bucketName: bucket, objName: object, inputData: fiveMBBytes, expectedMd5: getMD5Hash(fiveMBBytes), expectedError: ioutil.ErrOverread},
|
||||||
|
|
||||||
// valid data with X-Amz-Meta- meta
|
// valid data with X-Amz-Meta- meta
|
||||||
29: {bucketName: bucket, objName: object, inputData: data, inputMeta: map[string]string{"X-Amz-Meta-AppID": "a42"}, intputDataSize: int64(len(data)), expectedMd5: getMD5Hash(data)},
|
29: {bucketName: bucket, objName: object, inputData: data, inputMeta: map[string]string{"X-Amz-Meta-AppID": "a42"}, inputDataSize: int64(len(data)), expectedMd5: getMD5Hash(data)},
|
||||||
|
|
||||||
// Put an empty object with a trailing slash
|
// Put an empty object with a trailing slash
|
||||||
30: {bucketName: bucket, objName: "emptydir/", inputData: []byte{}, expectedMd5: getMD5Hash([]byte{})},
|
30: {bucketName: bucket, objName: "emptydir/", inputData: []byte{}, expectedMd5: getMD5Hash([]byte{})},
|
||||||
// Put an object inside the empty directory
|
// Put an object inside the empty directory
|
||||||
31: {bucketName: bucket, objName: "emptydir/" + object, inputData: data, intputDataSize: int64(len(data)), expectedMd5: getMD5Hash(data)},
|
31: {bucketName: bucket, objName: "emptydir/" + object, inputData: data, inputDataSize: int64(len(data)), expectedMd5: getMD5Hash(data)},
|
||||||
// Put the empty object with a trailing slash again (refer to Test case 30), this needs to succeed
|
// Put the empty object with a trailing slash again (refer to Test case 30), this needs to succeed
|
||||||
32: {bucketName: bucket, objName: "emptydir/", inputData: []byte{}, expectedMd5: getMD5Hash([]byte{})},
|
32: {bucketName: bucket, objName: "emptydir/", inputData: []byte{}, expectedMd5: getMD5Hash([]byte{})},
|
||||||
|
|
||||||
// With invalid crc32.
|
// With invalid crc32.
|
||||||
33: {
|
33: {
|
||||||
bucketName: bucket, objName: object, inputData: []byte("abcd"),
|
bucketName: bucket, objName: object, inputData: []byte("abcd"),
|
||||||
inputMeta: map[string]string{"etag": "e2fc714c4727ee9395f324cd2e7f331f", "x-amz-checksum-crc32": "abcd"},
|
inputMeta: map[string]string{"etag": "e2fc714c4727ee9395f324cd2e7f331f", "x-amz-checksum-crc32": "abcd"},
|
||||||
intputDataSize: int64(len("abcd")),
|
inputDataSize: int64(len("abcd")),
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
for i, testCase := range testCases {
|
for i, testCase := range testCases {
|
||||||
in := mustGetPutObjReader(t, bytes.NewReader(testCase.inputData), testCase.intputDataSize, testCase.inputMeta["etag"], testCase.inputSHA256)
|
in := mustGetPutObjReader(t, bytes.NewReader(testCase.inputData), testCase.inputDataSize, testCase.inputMeta["etag"], testCase.inputSHA256)
|
||||||
objInfo, actualErr := obj.PutObject(context.Background(), testCase.bucketName, testCase.objName, in, ObjectOptions{UserDefined: testCase.inputMeta})
|
objInfo, actualErr := obj.PutObject(context.Background(), testCase.bucketName, testCase.objName, in, ObjectOptions{UserDefined: testCase.inputMeta})
|
||||||
if actualErr != nil && testCase.expectedError == nil {
|
if actualErr != nil && testCase.expectedError == nil {
|
||||||
t.Errorf("Test %d: %s: Expected to pass, but failed with: error %s.", i, instanceType, actualErr.Error())
|
t.Errorf("Test %d: %s: Expected to pass, but failed with: error %s.", i, instanceType, actualErr.Error())
|
||||||
|
@ -243,11 +243,11 @@ func testObjectAPIPutObjectDiskNotFound(obj ObjectLayer, instanceType string, di
|
||||||
}
|
}
|
||||||
|
|
||||||
testCases := []struct {
|
testCases := []struct {
|
||||||
bucketName string
|
bucketName string
|
||||||
objName string
|
objName string
|
||||||
inputData []byte
|
inputData []byte
|
||||||
inputMeta map[string]string
|
inputMeta map[string]string
|
||||||
intputDataSize int64
|
inputDataSize int64
|
||||||
// flag indicating whether the test should pass.
|
// flag indicating whether the test should pass.
|
||||||
shouldPass bool
|
shouldPass bool
|
||||||
// expected error output.
|
// expected error output.
|
||||||
|
@ -263,7 +263,7 @@ func testObjectAPIPutObjectDiskNotFound(obj ObjectLayer, instanceType string, di
|
||||||
|
|
||||||
sha256sum := ""
|
sha256sum := ""
|
||||||
for i, testCase := range testCases {
|
for i, testCase := range testCases {
|
||||||
objInfo, actualErr := obj.PutObject(context.Background(), testCase.bucketName, testCase.objName, mustGetPutObjReader(t, bytes.NewReader(testCase.inputData), testCase.intputDataSize, testCase.inputMeta["etag"], sha256sum), ObjectOptions{UserDefined: testCase.inputMeta})
|
objInfo, actualErr := obj.PutObject(context.Background(), testCase.bucketName, testCase.objName, mustGetPutObjReader(t, bytes.NewReader(testCase.inputData), testCase.inputDataSize, testCase.inputMeta["etag"], sha256sum), ObjectOptions{UserDefined: testCase.inputMeta})
|
||||||
if actualErr != nil && testCase.shouldPass {
|
if actualErr != nil && testCase.shouldPass {
|
||||||
t.Errorf("Test %d: %s: Expected to pass, but failed with: <ERROR> %s.", i+1, instanceType, actualErr.Error())
|
t.Errorf("Test %d: %s: Expected to pass, but failed with: <ERROR> %s.", i+1, instanceType, actualErr.Error())
|
||||||
}
|
}
|
||||||
|
@ -291,11 +291,11 @@ func testObjectAPIPutObjectDiskNotFound(obj ObjectLayer, instanceType string, di
|
||||||
|
|
||||||
// Validate the last test.
|
// Validate the last test.
|
||||||
testCase := struct {
|
testCase := struct {
|
||||||
bucketName string
|
bucketName string
|
||||||
objName string
|
objName string
|
||||||
inputData []byte
|
inputData []byte
|
||||||
inputMeta map[string]string
|
inputMeta map[string]string
|
||||||
intputDataSize int64
|
inputDataSize int64
|
||||||
// flag indicating whether the test should pass.
|
// flag indicating whether the test should pass.
|
||||||
shouldPass bool
|
shouldPass bool
|
||||||
// expected error output.
|
// expected error output.
|
||||||
|
@ -312,7 +312,7 @@ func testObjectAPIPutObjectDiskNotFound(obj ObjectLayer, instanceType string, di
|
||||||
errErasureWriteQuorum,
|
errErasureWriteQuorum,
|
||||||
}
|
}
|
||||||
|
|
||||||
_, actualErr := obj.PutObject(context.Background(), testCase.bucketName, testCase.objName, mustGetPutObjReader(t, bytes.NewReader(testCase.inputData), testCase.intputDataSize, testCase.inputMeta["etag"], sha256sum), ObjectOptions{UserDefined: testCase.inputMeta})
|
_, actualErr := obj.PutObject(context.Background(), testCase.bucketName, testCase.objName, mustGetPutObjReader(t, bytes.NewReader(testCase.inputData), testCase.inputDataSize, testCase.inputMeta["etag"], sha256sum), ObjectOptions{UserDefined: testCase.inputMeta})
|
||||||
if actualErr != nil && testCase.shouldPass {
|
if actualErr != nil && testCase.shouldPass {
|
||||||
t.Errorf("Test %d: %s: Expected to pass, but failed with: <ERROR> %s.", len(testCases)+1, instanceType, actualErr.Error())
|
t.Errorf("Test %d: %s: Expected to pass, but failed with: <ERROR> %s.", len(testCases)+1, instanceType, actualErr.Error())
|
||||||
}
|
}
|
||||||
|
|
|
@ -149,7 +149,7 @@ func TestIsValidBucketName(t *testing.T) {
|
||||||
{"192.168.1.1", false},
|
{"192.168.1.1", false},
|
||||||
{"$this-is-not-valid-too", false},
|
{"$this-is-not-valid-too", false},
|
||||||
{"contains-$-dollar", false},
|
{"contains-$-dollar", false},
|
||||||
{"contains-^-carret", false},
|
{"contains-^-caret", false},
|
||||||
{"contains-$-dollar", false},
|
{"contains-$-dollar", false},
|
||||||
{"contains-$-dollar", false},
|
{"contains-$-dollar", false},
|
||||||
{"......", false},
|
{"......", false},
|
||||||
|
@ -195,7 +195,7 @@ func TestIsValidObjectName(t *testing.T) {
|
||||||
{"117Gn8rfHL2ACARPAhaFd0AGzic9pUbIA/5OCn5A", true},
|
{"117Gn8rfHL2ACARPAhaFd0AGzic9pUbIA/5OCn5A", true},
|
||||||
{"SHØRT", true},
|
{"SHØRT", true},
|
||||||
{"f*le", true},
|
{"f*le", true},
|
||||||
{"contains-^-carret", true},
|
{"contains-^-caret", true},
|
||||||
{"contains-|-pipe", true},
|
{"contains-|-pipe", true},
|
||||||
{"contains-`-tick", true},
|
{"contains-`-tick", true},
|
||||||
{"..test", true},
|
{"..test", true},
|
||||||
|
|
|
@ -1302,7 +1302,7 @@ func testAPIPutObjectHandler(obj ObjectLayer, instanceType, bucketName string, a
|
||||||
|
|
||||||
copySourceHeader := map[string]string{"X-Amz-Copy-Source": "somewhere"}
|
copySourceHeader := map[string]string{"X-Amz-Copy-Source": "somewhere"}
|
||||||
invalidMD5Header := map[string]string{"Content-Md5": "42"}
|
invalidMD5Header := map[string]string{"Content-Md5": "42"}
|
||||||
inalidStorageClassHeader := map[string]string{xhttp.AmzStorageClass: "INVALID"}
|
invalidStorageClassHeader := map[string]string{xhttp.AmzStorageClass: "INVALID"}
|
||||||
|
|
||||||
addCustomHeaders := func(req *http.Request, customHeaders map[string]string) {
|
addCustomHeaders := func(req *http.Request, customHeaders map[string]string) {
|
||||||
for k, value := range customHeaders {
|
for k, value := range customHeaders {
|
||||||
|
@ -1350,7 +1350,7 @@ func testAPIPutObjectHandler(obj ObjectLayer, instanceType, bucketName string, a
|
||||||
objectName: objectName,
|
objectName: objectName,
|
||||||
data: bytesData,
|
data: bytesData,
|
||||||
dataLen: len(bytesData),
|
dataLen: len(bytesData),
|
||||||
accessKey: "Wrong-AcessID",
|
accessKey: "Wrong-AccessID",
|
||||||
secretKey: credentials.SecretKey,
|
secretKey: credentials.SecretKey,
|
||||||
|
|
||||||
expectedRespStatus: http.StatusForbidden,
|
expectedRespStatus: http.StatusForbidden,
|
||||||
|
@ -1408,7 +1408,7 @@ func testAPIPutObjectHandler(obj ObjectLayer, instanceType, bucketName string, a
|
||||||
6: {
|
6: {
|
||||||
bucketName: bucketName,
|
bucketName: bucketName,
|
||||||
objectName: objectName,
|
objectName: objectName,
|
||||||
headers: inalidStorageClassHeader,
|
headers: invalidStorageClassHeader,
|
||||||
data: bytesData,
|
data: bytesData,
|
||||||
dataLen: len(bytesData),
|
dataLen: len(bytesData),
|
||||||
accessKey: credentials.AccessKey,
|
accessKey: credentials.AccessKey,
|
||||||
|
@ -2574,7 +2574,7 @@ func testAPICopyObjectHandler(obj ObjectLayer, instanceType, bucketName string,
|
||||||
}
|
}
|
||||||
|
|
||||||
// Wrapper for calling NewMultipartUpload tests for both Erasure multiple disks and single node setup.
|
// Wrapper for calling NewMultipartUpload tests for both Erasure multiple disks and single node setup.
|
||||||
// First register the HTTP handler for NewMutlipartUpload, then a HTTP request for NewMultipart upload is made.
|
// First register the HTTP handler for NewMultipartUpload, then a HTTP request for NewMultipart upload is made.
|
||||||
// The UploadID from the response body is parsed and its existence is asserted with an attempt to ListParts using it.
|
// The UploadID from the response body is parsed and its existence is asserted with an attempt to ListParts using it.
|
||||||
func TestAPINewMultipartHandler(t *testing.T) {
|
func TestAPINewMultipartHandler(t *testing.T) {
|
||||||
defer DetectTestLeak(t)()
|
defer DetectTestLeak(t)()
|
||||||
|
@ -2614,7 +2614,7 @@ func testAPINewMultipartHandler(obj ObjectLayer, instanceType, bucketName string
|
||||||
t.Fatalf("Invalid UploadID: <ERROR> %s", err)
|
t.Fatalf("Invalid UploadID: <ERROR> %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Testing the response for Invalid AcccessID.
|
// Testing the response for Invalid AccessID.
|
||||||
// Forcing the signature check to fail.
|
// Forcing the signature check to fail.
|
||||||
rec = httptest.NewRecorder()
|
rec = httptest.NewRecorder()
|
||||||
// construct HTTP request for NewMultipart upload.
|
// construct HTTP request for NewMultipart upload.
|
||||||
|
@ -2664,7 +2664,7 @@ func testAPINewMultipartHandler(obj ObjectLayer, instanceType, bucketName string
|
||||||
t.Fatalf("Invalid UploadID: <ERROR> %s", err)
|
t.Fatalf("Invalid UploadID: <ERROR> %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Testing the response for invalid AcccessID.
|
// Testing the response for invalid AccessID.
|
||||||
// Forcing the V2 signature check to fail.
|
// Forcing the V2 signature check to fail.
|
||||||
recV2 = httptest.NewRecorder()
|
recV2 = httptest.NewRecorder()
|
||||||
// construct HTTP request for NewMultipartUpload endpoint.
|
// construct HTTP request for NewMultipartUpload endpoint.
|
||||||
|
@ -2819,7 +2819,7 @@ func testAPICompleteMultipartHandler(obj ObjectLayer, instanceType, bucketName s
|
||||||
PartID int
|
PartID int
|
||||||
inputReaderData string
|
inputReaderData string
|
||||||
inputMd5 string
|
inputMd5 string
|
||||||
intputDataSize int64
|
inputDataSize int64
|
||||||
}{
|
}{
|
||||||
// Case 1-4.
|
// Case 1-4.
|
||||||
// Creating sequence of parts for same uploadID.
|
// Creating sequence of parts for same uploadID.
|
||||||
|
@ -2839,7 +2839,7 @@ func testAPICompleteMultipartHandler(obj ObjectLayer, instanceType, bucketName s
|
||||||
// Iterating over creatPartCases to generate multipart chunks.
|
// Iterating over creatPartCases to generate multipart chunks.
|
||||||
for _, part := range parts {
|
for _, part := range parts {
|
||||||
_, err = obj.PutObjectPart(context.Background(), part.bucketName, part.objName, part.uploadID, part.PartID,
|
_, err = obj.PutObjectPart(context.Background(), part.bucketName, part.objName, part.uploadID, part.PartID,
|
||||||
mustGetPutObjReader(t, strings.NewReader(part.inputReaderData), part.intputDataSize, part.inputMd5, ""), opts)
|
mustGetPutObjReader(t, strings.NewReader(part.inputReaderData), part.inputDataSize, part.inputMd5, ""), opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("%s : %s", instanceType, err)
|
t.Fatalf("%s : %s", instanceType, err)
|
||||||
}
|
}
|
||||||
|
@ -3187,7 +3187,7 @@ func testAPIAbortMultipartHandler(obj ObjectLayer, instanceType, bucketName stri
|
||||||
PartID int
|
PartID int
|
||||||
inputReaderData string
|
inputReaderData string
|
||||||
inputMd5 string
|
inputMd5 string
|
||||||
intputDataSize int64
|
inputDataSize int64
|
||||||
}{
|
}{
|
||||||
// Case 1-4.
|
// Case 1-4.
|
||||||
// Creating sequence of parts for same uploadID.
|
// Creating sequence of parts for same uploadID.
|
||||||
|
@ -3207,7 +3207,7 @@ func testAPIAbortMultipartHandler(obj ObjectLayer, instanceType, bucketName stri
|
||||||
// Iterating over createPartCases to generate multipart chunks.
|
// Iterating over createPartCases to generate multipart chunks.
|
||||||
for _, part := range parts {
|
for _, part := range parts {
|
||||||
_, err = obj.PutObjectPart(context.Background(), part.bucketName, part.objName, part.uploadID, part.PartID,
|
_, err = obj.PutObjectPart(context.Background(), part.bucketName, part.objName, part.uploadID, part.PartID,
|
||||||
mustGetPutObjReader(t, strings.NewReader(part.inputReaderData), part.intputDataSize, part.inputMd5, ""), opts)
|
mustGetPutObjReader(t, strings.NewReader(part.inputReaderData), part.inputDataSize, part.inputMd5, ""), opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("%s : %s", instanceType, err)
|
t.Fatalf("%s : %s", instanceType, err)
|
||||||
}
|
}
|
||||||
|
@ -3233,7 +3233,7 @@ func testAPIAbortMultipartHandler(obj ObjectLayer, instanceType, bucketName stri
|
||||||
expectedRespStatus: http.StatusNoContent,
|
expectedRespStatus: http.StatusNoContent,
|
||||||
},
|
},
|
||||||
// Test case - 2.
|
// Test case - 2.
|
||||||
// Abort non-existng upload ID.
|
// Abort non-existing upload ID.
|
||||||
{
|
{
|
||||||
bucket: bucketName,
|
bucket: bucketName,
|
||||||
object: objectName,
|
object: objectName,
|
||||||
|
|
|
@ -473,13 +473,13 @@ func testObjectOverwriteWorks(obj ObjectLayer, instanceType string, t TestErrHan
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Wrapper for calling testNonExistantBucketOperations for both Erasure and FS.
|
// Wrapper for calling testNonExistentBucketOperations for both Erasure and FS.
|
||||||
func TestNonExistantBucketOperations(t *testing.T) {
|
func TestNonExistentBucketOperations(t *testing.T) {
|
||||||
ExecObjectLayerTest(t, testNonExistantBucketOperations)
|
ExecObjectLayerTest(t, testNonExistentBucketOperations)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Tests validate that bucket operation on non-existent bucket fails.
|
// Tests validate that bucket operation on non-existent bucket fails.
|
||||||
func testNonExistantBucketOperations(obj ObjectLayer, instanceType string, t TestErrHandler) {
|
func testNonExistentBucketOperations(obj ObjectLayer, instanceType string, t TestErrHandler) {
|
||||||
var opts ObjectOptions
|
var opts ObjectOptions
|
||||||
_, err := obj.PutObject(context.Background(), "bucket1", "object", mustGetPutObjReader(t, bytes.NewBufferString("one"), int64(len("one")), "", ""), opts)
|
_, err := obj.PutObject(context.Background(), "bucket1", "object", mustGetPutObjReader(t, bytes.NewBufferString("one"), int64(len("one")), "", ""), opts)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
|
@ -529,7 +529,7 @@ func enableCompression(t *testing.T, encrypt bool) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func enableEncrytion(t *testing.T) {
|
func enableEncryption(t *testing.T) {
|
||||||
// Exec with default settings...
|
// Exec with default settings...
|
||||||
globalCompressConfigMu.Lock()
|
globalCompressConfigMu.Lock()
|
||||||
globalCompressConfig.Enabled = false
|
globalCompressConfig.Enabled = false
|
||||||
|
@ -572,7 +572,7 @@ func execExtended(t *testing.T, fn func(t *testing.T)) {
|
||||||
|
|
||||||
t.Run("encrypted", func(t *testing.T) {
|
t.Run("encrypted", func(t *testing.T) {
|
||||||
resetCompressEncryption()
|
resetCompressEncryption()
|
||||||
enableEncrytion(t)
|
enableEncryption(t)
|
||||||
fn(t)
|
fn(t)
|
||||||
})
|
})
|
||||||
|
|
||||||
|
@ -760,13 +760,13 @@ func testListBucketsOrder(obj ObjectLayer, instanceType string, t TestErrHandler
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Wrapper for calling testListObjectsTestsForNonExistantBucket for both Erasure and FS.
|
// Wrapper for calling testListObjectsTestsForNonExistentBucket for both Erasure and FS.
|
||||||
func TestListObjectsTestsForNonExistantBucket(t *testing.T) {
|
func TestListObjectsTestsForNonExistentBucket(t *testing.T) {
|
||||||
ExecObjectLayerTest(t, testListObjectsTestsForNonExistantBucket)
|
ExecObjectLayerTest(t, testListObjectsTestsForNonExistentBucket)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Tests validate that ListObjects operation on a non-existent bucket fails as expected.
|
// Tests validate that ListObjects operation on a non-existent bucket fails as expected.
|
||||||
func testListObjectsTestsForNonExistantBucket(obj ObjectLayer, instanceType string, t TestErrHandler) {
|
func testListObjectsTestsForNonExistentBucket(obj ObjectLayer, instanceType string, t TestErrHandler) {
|
||||||
result, err := obj.ListObjects(context.Background(), "bucket", "", "", "", 1000)
|
result, err := obj.ListObjects(context.Background(), "bucket", "", "", "", 1000)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
t.Fatalf("%s: Expected error but found nil.", instanceType)
|
t.Fatalf("%s: Expected error but found nil.", instanceType)
|
||||||
|
@ -782,13 +782,13 @@ func testListObjectsTestsForNonExistantBucket(obj ObjectLayer, instanceType stri
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Wrapper for calling testNonExistantObjectInBucket for both Erasure and FS.
|
// Wrapper for calling testNonExistentObjectInBucket for both Erasure and FS.
|
||||||
func TestNonExistantObjectInBucket(t *testing.T) {
|
func TestNonExistentObjectInBucket(t *testing.T) {
|
||||||
ExecObjectLayerTest(t, testNonExistantObjectInBucket)
|
ExecObjectLayerTest(t, testNonExistentObjectInBucket)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Tests validate that GetObject fails on a non-existent bucket as expected.
|
// Tests validate that GetObject fails on a non-existent bucket as expected.
|
||||||
func testNonExistantObjectInBucket(obj ObjectLayer, instanceType string, t TestErrHandler) {
|
func testNonExistentObjectInBucket(obj ObjectLayer, instanceType string, t TestErrHandler) {
|
||||||
err := obj.MakeBucket(context.Background(), "bucket", MakeBucketOptions{})
|
err := obj.MakeBucket(context.Background(), "bucket", MakeBucketOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||||
|
|
|
@ -1974,8 +1974,8 @@ func (s *TestSuiteCommon) TestGetObjectLarge11MiB(c *check) {
|
||||||
c.Assert(putMD5, getMD5)
|
c.Assert(putMD5, getMD5)
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestGetPartialObjectMisAligned - tests get object partially mis-aligned.
|
// TestGetPartialObjectMisAligned - tests get object partially miss-aligned.
|
||||||
// create a large buffer of mis-aligned data and upload it.
|
// create a large buffer of miss-aligned data and upload it.
|
||||||
// then make partial range requests to while fetching it back and assert the response content.
|
// then make partial range requests to while fetching it back and assert the response content.
|
||||||
func (s *TestSuiteCommon) TestGetPartialObjectMisAligned(c *check) {
|
func (s *TestSuiteCommon) TestGetPartialObjectMisAligned(c *check) {
|
||||||
// generate a random bucket name.
|
// generate a random bucket name.
|
||||||
|
|
|
@ -258,7 +258,7 @@ func TestParseSignature(t *testing.T) {
|
||||||
expectedErrCode APIErrorCode
|
expectedErrCode APIErrorCode
|
||||||
}{
|
}{
|
||||||
// Test case - 1.
|
// Test case - 1.
|
||||||
// SignElemenet doesn't have 2 parts on an attempt to split at '='.
|
// SignElement doesn't have 2 parts on an attempt to split at '='.
|
||||||
// ErrMissingFields expected.
|
// ErrMissingFields expected.
|
||||||
{
|
{
|
||||||
inputSignElement: "Signature",
|
inputSignElement: "Signature",
|
||||||
|
@ -274,7 +274,7 @@ func TestParseSignature(t *testing.T) {
|
||||||
expectedErrCode: ErrMissingFields,
|
expectedErrCode: ErrMissingFields,
|
||||||
},
|
},
|
||||||
// Test case - 3.
|
// Test case - 3.
|
||||||
// SignElemenet with missing "SignatureTag",ErrMissingSignTag expected.
|
// SignElement with missing "SignatureTag",ErrMissingSignTag expected.
|
||||||
{
|
{
|
||||||
inputSignElement: "Sign=",
|
inputSignElement: "Sign=",
|
||||||
expectedSignStr: "",
|
expectedSignStr: "",
|
||||||
|
@ -310,7 +310,7 @@ func TestParseSignedHeaders(t *testing.T) {
|
||||||
expectedErrCode APIErrorCode
|
expectedErrCode APIErrorCode
|
||||||
}{
|
}{
|
||||||
// Test case - 1.
|
// Test case - 1.
|
||||||
// SignElemenet doesn't have 2 parts on an attempt to split at '='.
|
// SignElement doesn't have 2 parts on an attempt to split at '='.
|
||||||
// ErrMissingFields expected.
|
// ErrMissingFields expected.
|
||||||
{
|
{
|
||||||
inputSignElement: "SignedHeaders",
|
inputSignElement: "SignedHeaders",
|
||||||
|
@ -318,7 +318,7 @@ func TestParseSignedHeaders(t *testing.T) {
|
||||||
expectedErrCode: ErrMissingFields,
|
expectedErrCode: ErrMissingFields,
|
||||||
},
|
},
|
||||||
// Test case - 2.
|
// Test case - 2.
|
||||||
// SignElemenet with missing "SigHeaderTag",ErrMissingSignHeadersTag expected.
|
// SignElement with missing "SigHeaderTag",ErrMissingSignHeadersTag expected.
|
||||||
{
|
{
|
||||||
inputSignElement: "Sign=",
|
inputSignElement: "Sign=",
|
||||||
expectedSignedHeaders: nil,
|
expectedSignedHeaders: nil,
|
||||||
|
|
|
@ -26,7 +26,7 @@ import (
|
||||||
"github.com/minio/minio/internal/logger"
|
"github.com/minio/minio/internal/logger"
|
||||||
)
|
)
|
||||||
|
|
||||||
// writeSTSErrorRespone writes error headers
|
// writeSTSErrorResponse writes error headers
|
||||||
func writeSTSErrorResponse(ctx context.Context, w http.ResponseWriter, errCode STSErrorCode, err error) {
|
func writeSTSErrorResponse(ctx context.Context, w http.ResponseWriter, errCode STSErrorCode, err error) {
|
||||||
stsErr := stsErrCodes.ToSTSErr(errCode)
|
stsErr := stsErrCodes.ToSTSErr(errCode)
|
||||||
|
|
||||||
|
|
|
@ -147,7 +147,7 @@ func getSampleXLMeta(totalParts int) xlMetaV1Object {
|
||||||
|
|
||||||
// Compare the unmarshaled XLMetaV1 with the one obtained from jsoniter parsing.
|
// Compare the unmarshaled XLMetaV1 with the one obtained from jsoniter parsing.
|
||||||
func compareXLMetaV1(t *testing.T, unMarshalXLMeta, jsoniterXLMeta xlMetaV1Object) {
|
func compareXLMetaV1(t *testing.T, unMarshalXLMeta, jsoniterXLMeta xlMetaV1Object) {
|
||||||
// Start comparing the fields of xlMetaV1Object obtained from jsoniter parsing with one parsed using json unmarshaling.
|
// Start comparing the fields of xlMetaV1Object obtained from jsoniter parsing with one parsed using json unmarshalling.
|
||||||
if unMarshalXLMeta.Version != jsoniterXLMeta.Version {
|
if unMarshalXLMeta.Version != jsoniterXLMeta.Version {
|
||||||
t.Errorf("Expected the Version to be \"%s\", but got \"%s\".", unMarshalXLMeta.Version, jsoniterXLMeta.Version)
|
t.Errorf("Expected the Version to be \"%s\", but got \"%s\".", unMarshalXLMeta.Version, jsoniterXLMeta.Version)
|
||||||
}
|
}
|
||||||
|
|
|
@ -1702,7 +1702,7 @@ func (s *xlStorage) readAllData(ctx context.Context, volume, volumeDir string, f
|
||||||
}
|
}
|
||||||
|
|
||||||
if discard {
|
if discard {
|
||||||
// This discard is mostly true for DELETEs
|
// This discard is mostly true for DELETEEs
|
||||||
// so we need to make sure we do not keep
|
// so we need to make sure we do not keep
|
||||||
// page-cache references after.
|
// page-cache references after.
|
||||||
defer disk.Fdatasync(f)
|
defer disk.Fdatasync(f)
|
||||||
|
|
|
@ -84,7 +84,7 @@ func TestParseLegalHoldStatus(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestUnmarshalDefaultRetention checks if default retention
|
// TestUnmarshalDefaultRetention checks if default retention
|
||||||
// marshaling and unmarshaling work as expected
|
// marshaling and unmarshalling work as expected
|
||||||
func TestUnmarshalDefaultRetention(t *testing.T) {
|
func TestUnmarshalDefaultRetention(t *testing.T) {
|
||||||
days := uint64(4)
|
days := uint64(4)
|
||||||
years := uint64(1)
|
years := uint64(1)
|
||||||
|
|
|
@ -159,7 +159,7 @@ var NotifySubSystems = set.CreateStringSet(
|
||||||
NotifyWebhookSubSys,
|
NotifyWebhookSubSys,
|
||||||
)
|
)
|
||||||
|
|
||||||
// LambdaSubSystems - all lambda sub-systesm
|
// LambdaSubSystems - all lambda sub-systems
|
||||||
var LambdaSubSystems = set.CreateStringSet(
|
var LambdaSubSystems = set.CreateStringSet(
|
||||||
LambdaWebhookSubSys,
|
LambdaWebhookSubSys,
|
||||||
)
|
)
|
||||||
|
|
|
@ -299,7 +299,7 @@ func checkValidNotificationKeysForSubSys(subSys string, tgt map[string]config.KV
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// DefaultKakfaKVS - default KV for kafka target
|
// DefaultKafkaKVS - default KV for kafka target
|
||||||
var (
|
var (
|
||||||
DefaultKafkaKVS = config.KVS{
|
DefaultKafkaKVS = config.KVS{
|
||||||
config.KV{
|
config.KV{
|
||||||
|
|
|
@ -28,7 +28,7 @@ import (
|
||||||
|
|
||||||
// RemoveSensitiveHeaders removes confidential encryption
|
// RemoveSensitiveHeaders removes confidential encryption
|
||||||
// information - e.g. the SSE-C key - from the HTTP headers.
|
// information - e.g. the SSE-C key - from the HTTP headers.
|
||||||
// It has the same semantics as RemoveSensitiveEntires.
|
// It has the same semantics as RemoveSensitiveEntries.
|
||||||
func RemoveSensitiveHeaders(h http.Header) {
|
func RemoveSensitiveHeaders(h http.Header) {
|
||||||
h.Del(xhttp.AmzServerSideEncryptionCustomerKey)
|
h.Del(xhttp.AmzServerSideEncryptionCustomerKey)
|
||||||
h.Del(xhttp.AmzServerSideEncryptionCopyCustomerKey)
|
h.Del(xhttp.AmzServerSideEncryptionCopyCustomerKey)
|
||||||
|
|
|
@ -254,7 +254,7 @@ func (dm *DRWMutex) lockBlocking(ctx context.Context, lockLossCallback func(), i
|
||||||
log("lockBlocking %s/%s for %#v: granted\n", id, source, dm.Names)
|
log("lockBlocking %s/%s for %#v: granted\n", id, source, dm.Names)
|
||||||
|
|
||||||
// Refresh lock continuously and cancel if there is no quorum in the lock anymore
|
// Refresh lock continuously and cancel if there is no quorum in the lock anymore
|
||||||
dm.startContinousLockRefresh(lockLossCallback, id, source, quorum)
|
dm.startContinuousLockRefresh(lockLossCallback, id, source, quorum)
|
||||||
|
|
||||||
return locked
|
return locked
|
||||||
}
|
}
|
||||||
|
@ -272,7 +272,7 @@ func (dm *DRWMutex) lockBlocking(ctx context.Context, lockLossCallback func(), i
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (dm *DRWMutex) startContinousLockRefresh(lockLossCallback func(), id, source string, quorum int) {
|
func (dm *DRWMutex) startContinuousLockRefresh(lockLossCallback func(), id, source string, quorum int) {
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
|
||||||
dm.m.Lock()
|
dm.m.Lock()
|
||||||
|
|
|
@ -124,17 +124,17 @@ func Init(goPath string, goRoot string) {
|
||||||
var goRootList []string
|
var goRootList []string
|
||||||
var defaultgoPathList []string
|
var defaultgoPathList []string
|
||||||
var defaultgoRootList []string
|
var defaultgoRootList []string
|
||||||
pathSeperator := ":"
|
pathSeparator := ":"
|
||||||
// Add all possible GOPATH paths into trimStrings
|
// Add all possible GOPATH paths into trimStrings
|
||||||
// Split GOPATH depending on the OS type
|
// Split GOPATH depending on the OS type
|
||||||
if runtime.GOOS == "windows" {
|
if runtime.GOOS == "windows" {
|
||||||
pathSeperator = ";"
|
pathSeparator = ";"
|
||||||
}
|
}
|
||||||
|
|
||||||
goPathList = strings.Split(goPath, pathSeperator)
|
goPathList = strings.Split(goPath, pathSeparator)
|
||||||
goRootList = strings.Split(goRoot, pathSeperator)
|
goRootList = strings.Split(goRoot, pathSeparator)
|
||||||
defaultgoPathList = strings.Split(build.Default.GOPATH, pathSeperator)
|
defaultgoPathList = strings.Split(build.Default.GOPATH, pathSeparator)
|
||||||
defaultgoRootList = strings.Split(build.Default.GOROOT, pathSeperator)
|
defaultgoRootList = strings.Split(build.Default.GOROOT, pathSeparator)
|
||||||
|
|
||||||
// Add trim string "{GOROOT}/src/" into trimStrings
|
// Add trim string "{GOROOT}/src/" into trimStrings
|
||||||
trimStrings = []string{filepath.Join(runtime.GOROOT(), "src") + string(filepath.Separator)}
|
trimStrings = []string{filepath.Join(runtime.GOROOT(), "src") + string(filepath.Separator)}
|
||||||
|
|
|
@ -163,7 +163,7 @@ func (h *Target) Init(ctx context.Context) error {
|
||||||
if err := h.init(); err != nil {
|
if err := h.init(); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
go h.startKakfaLogger()
|
go h.startKafkaLogger()
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -181,7 +181,7 @@ func (h *Target) initQueueStore(ctx context.Context) (err error) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *Target) startKakfaLogger() {
|
func (h *Target) startKafkaLogger() {
|
||||||
h.logChMu.RLock()
|
h.logChMu.RLock()
|
||||||
logCh := h.logCh
|
logCh := h.logCh
|
||||||
if logCh != nil {
|
if logCh != nil {
|
||||||
|
|
|
@ -307,7 +307,7 @@ func NewReader(readCloser io.ReadCloser, args *ReaderArgs) (*Reader, error) {
|
||||||
ret.Comment = []rune(args.CommentCharacter)[0]
|
ret.Comment = []rune(args.CommentCharacter)[0]
|
||||||
ret.Quote = []rune{}
|
ret.Quote = []rune{}
|
||||||
if len([]rune(args.QuoteCharacter)) > 0 {
|
if len([]rune(args.QuoteCharacter)) > 0 {
|
||||||
// Add the first rune of args.QuoteChracter
|
// Add the first rune of args.QuoteCharacter
|
||||||
ret.Quote = append(ret.Quote, []rune(args.QuoteCharacter)[0])
|
ret.Quote = append(ret.Quote, []rune(args.QuoteCharacter)[0])
|
||||||
}
|
}
|
||||||
ret.QuoteEscape = []rune(args.QuoteEscapeCharacter)[0]
|
ret.QuoteEscape = []rune(args.QuoteEscapeCharacter)[0]
|
||||||
|
|
|
@ -25,11 +25,11 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
errKeyLookup = errors.New("Cannot look up key in non-object value")
|
errKeyLookup = errors.New("Cannot look up key in non-object value")
|
||||||
errIndexLookup = errors.New("Cannot look up array index in non-array value")
|
errIndexLookup = errors.New("Cannot look up array index in non-array value")
|
||||||
errWildcardObjectLookup = errors.New("Object wildcard used on non-object value")
|
errWildcardObjectLookup = errors.New("Object wildcard used on non-object value")
|
||||||
errWildcardArrayLookup = errors.New("Array wildcard used on non-array value")
|
errWildcardArrayLookup = errors.New("Array wildcard used on non-array value")
|
||||||
errWilcardObjectUsageInvalid = errors.New("Invalid usage of object wildcard")
|
errWildcardObjectUsageInvalid = errors.New("Invalid usage of object wildcard")
|
||||||
)
|
)
|
||||||
|
|
||||||
// jsonpathEval evaluates a JSON path and returns the value at the path.
|
// jsonpathEval evaluates a JSON path and returns the value at the path.
|
||||||
|
@ -85,13 +85,13 @@ func jsonpathEval(p []*JSONPathElement, v interface{}) (r interface{}, flat bool
|
||||||
switch kvs := v.(type) {
|
switch kvs := v.(type) {
|
||||||
case jstream.KVS:
|
case jstream.KVS:
|
||||||
if len(p[1:]) > 0 {
|
if len(p[1:]) > 0 {
|
||||||
return nil, false, errWilcardObjectUsageInvalid
|
return nil, false, errWildcardObjectUsageInvalid
|
||||||
}
|
}
|
||||||
|
|
||||||
return kvs, false, nil
|
return kvs, false, nil
|
||||||
case simdjson.Object:
|
case simdjson.Object:
|
||||||
if len(p[1:]) > 0 {
|
if len(p[1:]) > 0 {
|
||||||
return nil, false, errWilcardObjectUsageInvalid
|
return nil, false, errWildcardObjectUsageInvalid
|
||||||
}
|
}
|
||||||
|
|
||||||
return kvs, false, nil
|
return kvs, false, nil
|
||||||
|
|
Loading…
Reference in New Issue