From bef0a50bc1651960e3c83a173614ae8c84544fd1 Mon Sep 17 00:00:00 2001 From: Bala FA Date: Mon, 21 Nov 2016 13:51:05 -0800 Subject: [PATCH] Cleanup and fixes (#3273) * newRequestID() (previously generateUploadID()) returns string than byte array. * Remove unclear comments and added appropriate comments. * SHA-256, MD5 Hash functions return Hex/Base64 encoded string than byte array. * Remove duplicate MD5 hasher functions. * Rename listObjectsValidateArgs() into validateListObjectsArgs() * Remove repeated auth check code in all bucket request handlers. * Remove abbreviated names in bucket-metadata * Avoid nested if in bucketPolicyMatchStatement() * Use ioutil.ReadFile() instead of os.Open() and ioutil.ReadAll() * Set crossDomainXML as constant. --- cmd/api-errors.go | 4 + cmd/api-headers.go | 14 +- cmd/api-headers_test.go | 4 +- cmd/api-response-multipart.go | 4 - cmd/api-response.go | 18 +- cmd/auth-handler.go | 68 +++----- cmd/auth-handler_test.go | 2 +- cmd/benchmark-utils_test.go | 26 +-- cmd/bucket-handlers-listobjects.go | 57 +------ cmd/bucket-handlers.go | 118 ++----------- cmd/bucket-metadata.go | 42 ++--- cmd/bucket-notification-handlers.go | 12 +- cmd/bucket-notification-handlers_test.go | 16 ++ cmd/bucket-policy-handlers.go | 27 +-- cmd/bucket-policy-parser.go | 2 +- cmd/certs.go | 9 +- cmd/crossdomain-xml-handler.go | 2 +- cmd/event-notifier.go | 5 +- cmd/fs-v1-multipart_test.go | 14 +- cmd/hasher.go | 48 ++++++ cmd/lock-instrument.go | 2 +- cmd/object-api-multipart_test.go | 10 +- cmd/object-api-putobject_test.go | 44 ++--- cmd/object-handlers.go | 207 +++-------------------- cmd/object-handlers_test.go | 20 +-- cmd/object-utils.go | 5 +- cmd/object_api_suite_test.go | 14 +- cmd/s3-peer-client.go | 16 +- cmd/s3-peer-router.go | 2 +- cmd/s3-peer-rpc-handlers.go | 18 +- cmd/s3-peer-rpc-handlers_test.go | 6 +- cmd/server_test.go | 27 +-- cmd/signature-v4-utils_test.go | 2 +- cmd/test-utils_test.go | 12 +- cmd/xl-v1-object_test.go | 6 +- 35 files changed, 267 insertions(+), 616 deletions(-) create mode 100644 cmd/hasher.go diff --git a/cmd/api-errors.go b/cmd/api-errors.go index e6d3be692..02657ebf3 100644 --- a/cmd/api-errors.go +++ b/cmd/api-errors.go @@ -572,6 +572,7 @@ func toAPIErrorCode(err error) (apiErr APIErrorCode) { if err == nil { return ErrNone } + err = errorCause(err) // Verify if the underlying error is signature mismatch. switch err { @@ -580,10 +581,12 @@ func toAPIErrorCode(err error) (apiErr APIErrorCode) { case errContentSHA256Mismatch: apiErr = ErrContentSHA256Mismatch } + if apiErr != ErrNone { // If there was a match in the above switch case. return apiErr } + switch err.(type) { case StorageFull: apiErr = ErrStorageFull @@ -634,6 +637,7 @@ func toAPIErrorCode(err error) (apiErr APIErrorCode) { default: apiErr = ErrInternalError } + return apiErr } diff --git a/cmd/api-headers.go b/cmd/api-headers.go index 4bed0321d..5dd9ebeac 100644 --- a/cmd/api-headers.go +++ b/cmd/api-headers.go @@ -1,5 +1,5 @@ /* - * Minio Cloud Storage, (C) 2015 Minio, Inc. + * Minio Cloud Storage, (C) 2015,2016 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -25,25 +25,23 @@ import ( "strconv" ) -//// helpers - -// Static alphaNumeric table used for generating unique request ids +// Static alphanumeric table used for generating unique request ids var alphaNumericTable = []byte("0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ") -// generateRequestID - Generate request id -func generateRequestID() []byte { +// newRequestID generates and returns request ID string. +func newRequestID() string { alpha := make([]byte, 16) rand.Read(alpha) for i := 0; i < 16; i++ { alpha[i] = alphaNumericTable[alpha[i]%byte(len(alphaNumericTable))] } - return alpha + return string(alpha) } // Write http common headers func setCommonHeaders(w http.ResponseWriter) { // Set unique request ID for each reply. - w.Header().Set("X-Amz-Request-Id", string(generateRequestID())) + w.Header().Set("X-Amz-Request-Id", newRequestID()) w.Header().Set("Server", ("Minio/" + ReleaseTag + " (" + runtime.GOOS + "; " + runtime.GOARCH + ")")) w.Header().Set("Accept-Ranges", "bytes") } diff --git a/cmd/api-headers_test.go b/cmd/api-headers_test.go index 150bd5ea4..a20c4a3a1 100644 --- a/cmd/api-headers_test.go +++ b/cmd/api-headers_test.go @@ -20,9 +20,9 @@ import ( "testing" ) -func TestGenerateRequestID(t *testing.T) { +func TestNewRequestID(t *testing.T) { // Ensure that it returns an alphanumeric result of length 16. - var id = generateRequestID() + var id = newRequestID() if len(id) != 16 { t.Fail() diff --git a/cmd/api-response-multipart.go b/cmd/api-response-multipart.go index a0b9654c4..51563944a 100644 --- a/cmd/api-response-multipart.go +++ b/cmd/api-response-multipart.go @@ -14,8 +14,6 @@ * limitations under the License. */ -// Package cmd file carries any specific responses constructed/necessary in -// multipart operations. package cmd import "net/http" @@ -56,5 +54,3 @@ func writePartSmallErrorResponse(w http.ResponseWriter, r *http.Request, err Par w.Write(encodedErrorResponse) w.(http.Flusher).Flush() } - -// Add any other multipart specific responses here. diff --git a/cmd/api-response.go b/cmd/api-response.go index 7f21e3ce6..d89dafa5a 100644 --- a/cmd/api-response.go +++ b/cmd/api-response.go @@ -261,12 +261,8 @@ func getObjectLocation(bucketName string, key string) string { return "/" + bucketName + "/" + key } -// takes an array of Bucketmetadata information for serialization -// input: -// array of bucket metadata -// -// output: -// populated struct that can be serialized to match xml and json api spec output +// generates ListBucketsResponse from array of BucketInfo which can be +// serialized to match XML and JSON API spec output. func generateListBucketsResponse(buckets []BucketInfo) ListBucketsResponse { var listbuckets []Bucket var data = ListBucketsResponse{} @@ -381,7 +377,7 @@ func generateListObjectsV2Response(bucket, prefix, token, startAfter, delimiter return data } -// generateCopyObjectResponse +// generates CopyObjectResponse from etag and lastModified time. func generateCopyObjectResponse(etag string, lastModified time.Time) CopyObjectResponse { return CopyObjectResponse{ ETag: "\"" + etag + "\"", @@ -389,7 +385,7 @@ func generateCopyObjectResponse(etag string, lastModified time.Time) CopyObjectR } } -// generateInitiateMultipartUploadResponse +// generates InitiateMultipartUploadResponse for given bucket, key and uploadID. func generateInitiateMultipartUploadResponse(bucket, key, uploadID string) InitiateMultipartUploadResponse { return InitiateMultipartUploadResponse{ Bucket: bucket, @@ -398,7 +394,7 @@ func generateInitiateMultipartUploadResponse(bucket, key, uploadID string) Initi } } -// generateCompleteMultipartUploadResponse +// generates CompleteMultipartUploadResponse for given bucket, key, location and ETag. func generateCompleteMultpartUploadResponse(bucket, key, location, etag string) CompleteMultipartUploadResponse { return CompleteMultipartUploadResponse{ Location: location, @@ -408,7 +404,7 @@ func generateCompleteMultpartUploadResponse(bucket, key, location, etag string) } } -// generateListPartsResult +// generates ListPartsResponse from ListPartsInfo. func generateListPartsResponse(partsInfo ListPartsInfo) ListPartsResponse { // TODO - support EncodingType in xml decoding listPartsResponse := ListPartsResponse{} @@ -438,7 +434,7 @@ func generateListPartsResponse(partsInfo ListPartsInfo) ListPartsResponse { return listPartsResponse } -// generateListMultipartUploadsResponse +// generates ListMultipartUploadsResponse for given bucket and ListMultipartsInfo. func generateListMultipartUploadsResponse(bucket string, multipartsInfo ListMultipartsInfo) ListMultipartUploadsResponse { listMultipartUploadsResponse := ListMultipartUploadsResponse{} listMultipartUploadsResponse.Bucket = bucket diff --git a/cmd/auth-handler.go b/cmd/auth-handler.go index e5b346eba..a1708c7ef 100644 --- a/cmd/auth-handler.go +++ b/cmd/auth-handler.go @@ -18,10 +18,6 @@ package cmd import ( "bytes" - "crypto/md5" - "crypto/sha256" - "encoding/base64" - "encoding/hex" "io/ioutil" "net/http" "strings" @@ -108,18 +104,32 @@ func getRequestAuthType(r *http.Request) authType { return authTypeUnknown } -// sum256 calculate sha256 sum for an input byte array -func sum256(data []byte) []byte { - hash := sha256.New() - hash.Write(data) - return hash.Sum(nil) -} +func checkRequestAuthType(r *http.Request, bucket, policyAction, region string) APIErrorCode { + reqAuthType := getRequestAuthType(r) -// sumMD5 calculate md5 sum for an input byte array -func sumMD5(data []byte) []byte { - hash := md5.New() - hash.Write(data) - return hash.Sum(nil) + switch reqAuthType { + case authTypePresignedV2, authTypeSignedV2: + // Signature V2 validation. + s3Error := isReqAuthenticatedV2(r) + if s3Error != ErrNone { + errorIf(errSignatureMismatch, dumpRequest(r)) + } + return s3Error + case authTypeSigned, authTypePresigned: + s3Error := isReqAuthenticated(r, region) + if s3Error != ErrNone { + errorIf(errSignatureMismatch, dumpRequest(r)) + } + return s3Error + } + + if reqAuthType == authTypeAnonymous && policyAction != "" { + // http://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html + return enforceBucketPolicy(bucket, policyAction, r.URL) + } + + // By default return ErrAccessDenied + return ErrAccessDenied } // Verify if request has valid AWS Signature Version '2'. @@ -157,7 +167,7 @@ func isReqAuthenticated(r *http.Request, region string) (s3Error APIErrorCode) { } // Verify Content-Md5, if payload is set. if r.Header.Get("Content-Md5") != "" { - if r.Header.Get("Content-Md5") != base64.StdEncoding.EncodeToString(sumMD5(payload)) { + if r.Header.Get("Content-Md5") != getMD5HashBase64(payload) { return ErrBadDigest } } @@ -168,7 +178,7 @@ func isReqAuthenticated(r *http.Request, region string) (s3Error APIErrorCode) { if skipContentSha256Cksum(r) { sha256sum = unsignedPayload } else { - sha256sum = hex.EncodeToString(sum256(payload)) + sha256sum = getSHA256Hash(payload) } if isRequestSignatureV4(r) { return doesSignatureMatch(sha256sum, r, region) @@ -178,30 +188,6 @@ func isReqAuthenticated(r *http.Request, region string) (s3Error APIErrorCode) { return ErrAccessDenied } -// checkAuth - checks for conditions satisfying the authorization of -// the incoming request. Request should be either Presigned or Signed -// in accordance with AWS S3 Signature V4 requirements. ErrAccessDenied -// is returned for unhandled auth type. Once the auth type is indentified -// request headers and body are used to calculate the signature validating -// the client signature present in request. -func checkAuth(r *http.Request) APIErrorCode { - return checkAuthWithRegion(r, serverConfig.GetRegion()) -} - -// checkAuthWithRegion - similar to checkAuth but takes a custom region. -func checkAuthWithRegion(r *http.Request, region string) APIErrorCode { - // Validates the request for both Presigned and Signed. - aType := getRequestAuthType(r) - switch aType { - case authTypeSignedV2, authTypePresignedV2: // Signature V2. - return isReqAuthenticatedV2(r) - case authTypeSigned, authTypePresigned: // Signature V4. - return isReqAuthenticated(r, region) - } - // For all unhandled auth types return error AccessDenied. - return ErrAccessDenied -} - // authHandler - handles all the incoming authorization headers and validates them if possible. type authHandler struct { handler http.Handler diff --git a/cmd/auth-handler_test.go b/cmd/auth-handler_test.go index 92e11bb1b..0e6989390 100644 --- a/cmd/auth-handler_test.go +++ b/cmd/auth-handler_test.go @@ -198,7 +198,7 @@ func TestIsRequestUnsignedPayload(t *testing.T) { // Test case - 2. // Test case with "X-Amz-Content-Sha256" header set to "UNSIGNED-PAYLOAD" // The payload is flagged as unsigned When "X-Amz-Content-Sha256" header is set to "UNSIGNED-PAYLOAD". - {"UNSIGNED-PAYLOAD", true}, + {unsignedPayload, true}, // Test case - 3. // set to a random value. {"abcd", false}, diff --git a/cmd/benchmark-utils_test.go b/cmd/benchmark-utils_test.go index c2b071504..4b22fea18 100644 --- a/cmd/benchmark-utils_test.go +++ b/cmd/benchmark-utils_test.go @@ -18,8 +18,6 @@ package cmd import ( "bytes" - "crypto/md5" - "encoding/hex" "io/ioutil" "math" "math/rand" @@ -74,10 +72,8 @@ func runPutObjectBenchmark(b *testing.B, obj ObjectLayer, objSize int) { textData := generateBytesData(objSize) // generate md5sum for the generated data. // md5sum of the data to written is required as input for PutObject. - hasher := md5.New() - hasher.Write([]byte(textData)) metadata := make(map[string]string) - metadata["md5Sum"] = hex.EncodeToString(hasher.Sum(nil)) + metadata["md5Sum"] = getMD5Hash(textData) sha256sum := "" // benchmark utility which helps obtain number of allocations and bytes allocated per ops. b.ReportAllocs() @@ -120,10 +116,8 @@ func runPutObjectPartBenchmark(b *testing.B, obj ObjectLayer, partSize int) { textData := generateBytesData(objSize) // generate md5sum for the generated data. // md5sum of the data to written is required as input for NewMultipartUpload. - hasher := md5.New() - hasher.Write([]byte(textData)) metadata := make(map[string]string) - metadata["md5Sum"] = hex.EncodeToString(hasher.Sum(nil)) + metadata["md5Sum"] = getMD5Hash(textData) sha256sum := "" uploadID, err = obj.NewMultipartUpload(bucket, object, metadata) if err != nil { @@ -139,15 +133,13 @@ func runPutObjectPartBenchmark(b *testing.B, obj ObjectLayer, partSize int) { // insert the object. totalPartsNR := int(math.Ceil(float64(objSize) / float64(partSize))) for j := 0; j < totalPartsNR; j++ { - hasher.Reset() if j < totalPartsNR-1 { textPartData = textData[j*partSize : (j+1)*partSize-1] } else { textPartData = textData[j*partSize:] } - hasher.Write([]byte(textPartData)) metadata := make(map[string]string) - metadata["md5Sum"] = hex.EncodeToString(hasher.Sum(nil)) + metadata["md5Sum"] = getMD5Hash([]byte(textPartData)) md5Sum, err = obj.PutObjectPart(bucket, object, uploadID, j, int64(len(textPartData)), bytes.NewBuffer(textPartData), metadata["md5Sum"], sha256sum) if err != nil { b.Fatal(err) @@ -242,10 +234,8 @@ func runGetObjectBenchmark(b *testing.B, obj ObjectLayer, objSize int) { // generate md5sum for the generated data. // md5sum of the data to written is required as input for PutObject. // PutObject is the functions which writes the data onto the FS/XL backend. - hasher := md5.New() - hasher.Write([]byte(textData)) metadata := make(map[string]string) - metadata["md5Sum"] = hex.EncodeToString(hasher.Sum(nil)) + metadata["md5Sum"] = getMD5Hash(textData) // insert the object. var objInfo ObjectInfo objInfo, err = obj.PutObject(bucket, "object"+strconv.Itoa(i), int64(len(textData)), bytes.NewBuffer(textData), metadata, sha256sum) @@ -349,10 +339,8 @@ func runPutObjectBenchmarkParallel(b *testing.B, obj ObjectLayer, objSize int) { textData := generateBytesData(objSize) // generate md5sum for the generated data. // md5sum of the data to written is required as input for PutObject. - hasher := md5.New() - hasher.Write([]byte(textData)) metadata := make(map[string]string) - metadata["md5Sum"] = hex.EncodeToString(hasher.Sum(nil)) + metadata["md5Sum"] = getMD5Hash([]byte(textData)) sha256sum := "" // benchmark utility which helps obtain number of allocations and bytes allocated per ops. b.ReportAllocs() @@ -401,10 +389,8 @@ func runGetObjectBenchmarkParallel(b *testing.B, obj ObjectLayer, objSize int) { // generate md5sum for the generated data. // md5sum of the data to written is required as input for PutObject. // PutObject is the functions which writes the data onto the FS/XL backend. - hasher := md5.New() - hasher.Write([]byte(textData)) metadata := make(map[string]string) - metadata["md5Sum"] = hex.EncodeToString(hasher.Sum(nil)) + metadata["md5Sum"] = getMD5Hash([]byte(textData)) sha256sum := "" // insert the object. var objInfo ObjectInfo diff --git a/cmd/bucket-handlers-listobjects.go b/cmd/bucket-handlers-listobjects.go index 324f1fd87..2bdbe9698 100644 --- a/cmd/bucket-handlers-listobjects.go +++ b/cmd/bucket-handlers-listobjects.go @@ -29,7 +29,7 @@ import ( // - delimiter if set should be equal to '/', otherwise the request is rejected. // - marker if set should have a common prefix with 'prefix' param, otherwise // the request is rejected. -func listObjectsValidateArgs(prefix, marker, delimiter string, maxKeys int) APIErrorCode { +func validateListObjectsArgs(prefix, marker, delimiter string, maxKeys int) APIErrorCode { // Max keys cannot be negative. if maxKeys < 0 { return ErrInvalidMaxKeys @@ -70,31 +70,11 @@ func (api objectAPIHandlers) ListObjectsV2Handler(w http.ResponseWriter, r *http return } - switch getRequestAuthType(r) { - default: - // For all unknown auth types return error. - writeErrorResponse(w, r, ErrAccessDenied, r.URL.Path) + if s3Error := checkRequestAuthType(r, bucket, "s3:ListBucket", serverConfig.GetRegion()); s3Error != ErrNone { + writeErrorResponse(w, r, s3Error, r.URL.Path) return - case authTypeAnonymous: - // http://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html - if s3Error := enforceBucketPolicy(bucket, "s3:ListBucket", r.URL); s3Error != ErrNone { - writeErrorResponse(w, r, s3Error, r.URL.Path) - return - } - case authTypePresignedV2, authTypeSignedV2: - // Signature V2 validation. - if s3Error := isReqAuthenticatedV2(r); s3Error != ErrNone { - errorIf(errSignatureMismatch, dumpRequest(r)) - writeErrorResponse(w, r, s3Error, r.URL.Path) - return - } - case authTypeSigned, authTypePresigned: - if s3Error := isReqAuthenticated(r, serverConfig.GetRegion()); s3Error != ErrNone { - errorIf(errSignatureMismatch, dumpRequest(r)) - writeErrorResponse(w, r, s3Error, r.URL.Path) - return - } } + // Extract all the listObjectsV2 query params to their native values. prefix, token, startAfter, delimiter, fetchOwner, maxKeys, _ := getListObjectsV2Args(r.URL.Query()) @@ -107,7 +87,7 @@ func (api objectAPIHandlers) ListObjectsV2Handler(w http.ResponseWriter, r *http } // Validate the query params before beginning to serve the request. // fetch-owner is not validated since it is a boolean - if s3Error := listObjectsValidateArgs(prefix, marker, delimiter, maxKeys); s3Error != ErrNone { + if s3Error := validateListObjectsArgs(prefix, marker, delimiter, maxKeys); s3Error != ErrNone { writeErrorResponse(w, r, s3Error, r.URL.Path) return } @@ -144,37 +124,16 @@ func (api objectAPIHandlers) ListObjectsV1Handler(w http.ResponseWriter, r *http return } - switch getRequestAuthType(r) { - default: - // For all unknown auth types return error. - writeErrorResponse(w, r, ErrAccessDenied, r.URL.Path) + if s3Error := checkRequestAuthType(r, bucket, "s3:ListBucket", serverConfig.GetRegion()); s3Error != ErrNone { + writeErrorResponse(w, r, s3Error, r.URL.Path) return - case authTypeAnonymous: - // http://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html - if s3Error := enforceBucketPolicy(bucket, "s3:ListBucket", r.URL); s3Error != ErrNone { - writeErrorResponse(w, r, s3Error, r.URL.Path) - return - } - case authTypePresignedV2, authTypeSignedV2: - // Signature V2 validation. - if s3Error := isReqAuthenticatedV2(r); s3Error != ErrNone { - errorIf(errSignatureMismatch, dumpRequest(r)) - writeErrorResponse(w, r, s3Error, r.URL.Path) - return - } - case authTypeSigned, authTypePresigned: - if s3Error := isReqAuthenticated(r, serverConfig.GetRegion()); s3Error != ErrNone { - errorIf(errSignatureMismatch, dumpRequest(r)) - writeErrorResponse(w, r, s3Error, r.URL.Path) - return - } } // Extract all the litsObjectsV1 query params to their native values. prefix, marker, delimiter, maxKeys, _ := getListObjectsV1Args(r.URL.Query()) // Validate all the query params before beginning to serve the request. - if s3Error := listObjectsValidateArgs(prefix, marker, delimiter, maxKeys); s3Error != ErrNone { + if s3Error := validateListObjectsArgs(prefix, marker, delimiter, maxKeys); s3Error != ErrNone { writeErrorResponse(w, r, s3Error, r.URL.Path) return } diff --git a/cmd/bucket-handlers.go b/cmd/bucket-handlers.go index 80b515cb0..0ead0f93a 100644 --- a/cmd/bucket-handlers.go +++ b/cmd/bucket-handlers.go @@ -83,30 +83,9 @@ func (api objectAPIHandlers) GetBucketLocationHandler(w http.ResponseWriter, r * return } - switch getRequestAuthType(r) { - default: - // For all unknown auth types return error. - writeErrorResponse(w, r, ErrAccessDenied, r.URL.Path) + if s3Error := checkRequestAuthType(r, bucket, "s3:GetBucketLocation", "us-east-1"); s3Error != ErrNone { + writeErrorResponse(w, r, s3Error, r.URL.Path) return - case authTypeAnonymous: - // http://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html - if s3Error := enforceBucketPolicy(bucket, "s3:GetBucketLocation", r.URL); s3Error != ErrNone { - writeErrorResponse(w, r, s3Error, r.URL.Path) - return - } - case authTypePresignedV2, authTypeSignedV2: - // Signature V2 validation. - if s3Error := isReqAuthenticatedV2(r); s3Error != ErrNone { - errorIf(errSignatureMismatch, dumpRequest(r)) - writeErrorResponse(w, r, s3Error, r.URL.Path) - return - } - case authTypeSigned, authTypePresigned: - if s3Error := isReqAuthenticated(r, "us-east-1"); s3Error != ErrNone { - errorIf(errSignatureMismatch, dumpRequest(r)) - writeErrorResponse(w, r, s3Error, r.URL.Path) - return - } } if _, err := objectAPI.GetBucketInfo(bucket); err != nil { @@ -146,30 +125,9 @@ func (api objectAPIHandlers) ListMultipartUploadsHandler(w http.ResponseWriter, return } - switch getRequestAuthType(r) { - default: - // For all unknown auth types return error. - writeErrorResponse(w, r, ErrAccessDenied, r.URL.Path) + if s3Error := checkRequestAuthType(r, bucket, "s3:ListBucketMultipartUploads", serverConfig.GetRegion()); s3Error != ErrNone { + writeErrorResponse(w, r, s3Error, r.URL.Path) return - case authTypeAnonymous: - // http://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html - if s3Error := enforceBucketPolicy(bucket, "s3:ListBucketMultipartUploads", r.URL); s3Error != ErrNone { - writeErrorResponse(w, r, s3Error, r.URL.Path) - return - } - case authTypePresignedV2, authTypeSignedV2: - // Signature V2 validation. - if s3Error := isReqAuthenticatedV2(r); s3Error != ErrNone { - errorIf(errSignatureMismatch, dumpRequest(r)) - writeErrorResponse(w, r, s3Error, r.URL.Path) - return - } - case authTypePresigned, authTypeSigned: - if s3Error := isReqAuthenticated(r, serverConfig.GetRegion()); s3Error != ErrNone { - errorIf(errSignatureMismatch, dumpRequest(r)) - writeErrorResponse(w, r, s3Error, r.URL.Path) - return - } } prefix, keyMarker, uploadIDMarker, delimiter, maxUploads, _ := getBucketMultipartResources(r.URL.Query()) @@ -211,11 +169,8 @@ func (api objectAPIHandlers) ListBucketsHandler(w http.ResponseWriter, r *http.R return } - // List buckets does not support bucket policies, no need to enforce it. - // Proceed to validate signature. - // Validates the request for both Presigned and Signed. - if s3Error := checkAuthWithRegion(r, ""); s3Error != ErrNone { - errorIf(errSignatureMismatch, dumpRequest(r)) + // ListBuckets does not have any bucket action. + if s3Error := checkRequestAuthType(r, "", "", "us-east-1"); s3Error != ErrNone { writeErrorResponse(w, r, s3Error, r.URL.Path) return } @@ -248,30 +203,9 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter, return } - switch getRequestAuthType(r) { - default: - // For all unknown auth types return error. - writeErrorResponse(w, r, ErrAccessDenied, r.URL.Path) + if s3Error := checkRequestAuthType(r, bucket, "s3:DeleteObject", serverConfig.GetRegion()); s3Error != ErrNone { + writeErrorResponse(w, r, s3Error, r.URL.Path) return - case authTypeAnonymous: - // http://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html - if s3Error := enforceBucketPolicy(bucket, "s3:DeleteObject", r.URL); s3Error != ErrNone { - writeErrorResponse(w, r, s3Error, r.URL.Path) - return - } - case authTypePresignedV2, authTypeSignedV2: - // Signature V2 validation. - if s3Error := isReqAuthenticatedV2(r); s3Error != ErrNone { - errorIf(errSignatureMismatch, dumpRequest(r)) - writeErrorResponse(w, r, s3Error, r.URL.Path) - return - } - case authTypePresigned, authTypeSigned: - if s3Error := isReqAuthenticated(r, serverConfig.GetRegion()); s3Error != ErrNone { - errorIf(errSignatureMismatch, dumpRequest(r)) - writeErrorResponse(w, r, s3Error, r.URL.Path) - return - } } // Content-Length is required and should be non-zero @@ -380,9 +314,8 @@ func (api objectAPIHandlers) PutBucketHandler(w http.ResponseWriter, r *http.Req return } - // PutBucket does not support policies, use checkAuth to validate signature. - if s3Error := checkAuthWithRegion(r, "us-east-1"); s3Error != ErrNone { - errorIf(errSignatureMismatch, dumpRequest(r)) + // PutBucket does not have any bucket action. + if s3Error := checkRequestAuthType(r, "", "", "us-east-1"); s3Error != ErrNone { writeErrorResponse(w, r, s3Error, r.URL.Path) return } @@ -535,30 +468,10 @@ func (api objectAPIHandlers) HeadBucketHandler(w http.ResponseWriter, r *http.Re writeErrorResponse(w, r, ErrServerNotInitialized, r.URL.Path) return } - switch getRequestAuthType(r) { - default: - // For all unknown auth types return error. - writeErrorResponse(w, r, ErrAccessDenied, r.URL.Path) + + if s3Error := checkRequestAuthType(r, bucket, "s3:ListBucket", serverConfig.GetRegion()); s3Error != ErrNone { + writeErrorResponse(w, r, s3Error, r.URL.Path) return - case authTypeAnonymous: - // http://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html - if s3Error := enforceBucketPolicy(bucket, "s3:ListBucket", r.URL); s3Error != ErrNone { - writeErrorResponse(w, r, s3Error, r.URL.Path) - return - } - case authTypePresignedV2, authTypeSignedV2: - // Signature V2 validation. - if s3Error := isReqAuthenticatedV2(r); s3Error != ErrNone { - errorIf(errSignatureMismatch, dumpRequest(r)) - writeErrorResponse(w, r, s3Error, r.URL.Path) - return - } - case authTypePresigned, authTypeSigned: - if s3Error := isReqAuthenticated(r, serverConfig.GetRegion()); s3Error != ErrNone { - errorIf(errSignatureMismatch, dumpRequest(r)) - writeErrorResponse(w, r, s3Error, r.URL.Path) - return - } } if _, err := objectAPI.GetBucketInfo(bucket); err != nil { @@ -577,9 +490,8 @@ func (api objectAPIHandlers) DeleteBucketHandler(w http.ResponseWriter, r *http. return } - // DeleteBucket does not support bucket policies, use checkAuth to validate signature. - if s3Error := checkAuth(r); s3Error != ErrNone { - errorIf(errSignatureMismatch, dumpRequest(r)) + // DeleteBucket does not have any bucket action. + if s3Error := checkRequestAuthType(r, "", "", serverConfig.GetRegion()); s3Error != ErrNone { writeErrorResponse(w, r, s3Error, r.URL.Path) return } diff --git a/cmd/bucket-metadata.go b/cmd/bucket-metadata.go index 7f299e00a..25dba4090 100644 --- a/cmd/bucket-metadata.go +++ b/cmd/bucket-metadata.go @@ -22,26 +22,26 @@ import "encoding/json" // state. type BucketMetaState interface { // Updates bucket notification - UpdateBucketNotification(args *SetBNPArgs) error + UpdateBucketNotification(args *SetBucketNotificationPeerArgs) error // Updates bucket listener - UpdateBucketListener(args *SetBLPArgs) error + UpdateBucketListener(args *SetBucketListenerPeerArgs) error // Updates bucket policy - UpdateBucketPolicy(args *SetBPPArgs) error + UpdateBucketPolicy(args *SetBucketPolicyPeerArgs) error // Sends event SendEvent(args *EventArgs) error } // Type that implements BucketMetaState for local node. -type localBMS struct { +type localBucketMetaState struct { ObjectAPI func() ObjectLayer } -// localBMS.UpdateBucketNotification - updates in-memory global bucket +// localBucketMetaState.UpdateBucketNotification - updates in-memory global bucket // notification info. -func (lc *localBMS) UpdateBucketNotification(args *SetBNPArgs) error { +func (lc *localBucketMetaState) UpdateBucketNotification(args *SetBucketNotificationPeerArgs) error { // check if object layer is available. objAPI := lc.ObjectAPI() if objAPI == nil { @@ -53,9 +53,9 @@ func (lc *localBMS) UpdateBucketNotification(args *SetBNPArgs) error { return nil } -// localBMS.UpdateBucketListener - updates in-memory global bucket +// localBucketMetaState.UpdateBucketListener - updates in-memory global bucket // listeners info. -func (lc *localBMS) UpdateBucketListener(args *SetBLPArgs) error { +func (lc *localBucketMetaState) UpdateBucketListener(args *SetBucketListenerPeerArgs) error { // check if object layer is available. objAPI := lc.ObjectAPI() if objAPI == nil { @@ -66,9 +66,9 @@ func (lc *localBMS) UpdateBucketListener(args *SetBLPArgs) error { return globalEventNotifier.SetBucketListenerConfig(args.Bucket, args.LCfg) } -// localBMS.UpdateBucketPolicy - updates in-memory global bucket +// localBucketMetaState.UpdateBucketPolicy - updates in-memory global bucket // policy info. -func (lc *localBMS) UpdateBucketPolicy(args *SetBPPArgs) error { +func (lc *localBucketMetaState) UpdateBucketPolicy(args *SetBucketPolicyPeerArgs) error { // check if object layer is available. objAPI := lc.ObjectAPI() if objAPI == nil { @@ -83,9 +83,9 @@ func (lc *localBMS) UpdateBucketPolicy(args *SetBPPArgs) error { return globalBucketPolicies.SetBucketPolicy(args.Bucket, pCh) } -// localBMS.SendEvent - sends event to local event notifier via +// localBucketMetaState.SendEvent - sends event to local event notifier via // `globalEventNotifier` -func (lc *localBMS) SendEvent(args *EventArgs) error { +func (lc *localBucketMetaState) SendEvent(args *EventArgs) error { // check if object layer is available. objAPI := lc.ObjectAPI() if objAPI == nil { @@ -96,34 +96,34 @@ func (lc *localBMS) SendEvent(args *EventArgs) error { } // Type that implements BucketMetaState for remote node. -type remoteBMS struct { +type remoteBucketMetaState struct { *AuthRPCClient } -// remoteBMS.UpdateBucketNotification - sends bucket notification +// remoteBucketMetaState.UpdateBucketNotification - sends bucket notification // change to remote peer via RPC call. -func (rc *remoteBMS) UpdateBucketNotification(args *SetBNPArgs) error { +func (rc *remoteBucketMetaState) UpdateBucketNotification(args *SetBucketNotificationPeerArgs) error { reply := GenericReply{} return rc.Call("S3.SetBucketNotificationPeer", args, &reply) } -// remoteBMS.UpdateBucketListener - sends bucket listener change to +// remoteBucketMetaState.UpdateBucketListener - sends bucket listener change to // remote peer via RPC call. -func (rc *remoteBMS) UpdateBucketListener(args *SetBLPArgs) error { +func (rc *remoteBucketMetaState) UpdateBucketListener(args *SetBucketListenerPeerArgs) error { reply := GenericReply{} return rc.Call("S3.SetBucketListenerPeer", args, &reply) } -// remoteBMS.UpdateBucketPolicy - sends bucket policy change to remote +// remoteBucketMetaState.UpdateBucketPolicy - sends bucket policy change to remote // peer via RPC call. -func (rc *remoteBMS) UpdateBucketPolicy(args *SetBPPArgs) error { +func (rc *remoteBucketMetaState) UpdateBucketPolicy(args *SetBucketPolicyPeerArgs) error { reply := GenericReply{} return rc.Call("S3.SetBucketPolicyPeer", args, &reply) } -// remoteBMS.SendEvent - sends event for bucket listener to remote +// remoteBucketMetaState.SendEvent - sends event for bucket listener to remote // peer via RPC call. -func (rc *remoteBMS) SendEvent(args *EventArgs) error { +func (rc *remoteBucketMetaState) SendEvent(args *EventArgs) error { reply := GenericReply{} return rc.Call("S3.Event", args, &reply) } diff --git a/cmd/bucket-notification-handlers.go b/cmd/bucket-notification-handlers.go index e7a8dce5f..ba4ed2c71 100644 --- a/cmd/bucket-notification-handlers.go +++ b/cmd/bucket-notification-handlers.go @@ -47,11 +47,11 @@ func (api objectAPIHandlers) GetBucketNotificationHandler(w http.ResponseWriter, return } - // Validate request authorization. - if s3Error := checkAuth(r); s3Error != ErrNone { + if s3Error := checkRequestAuthType(r, "", "", serverConfig.GetRegion()); s3Error != ErrNone { writeErrorResponse(w, r, s3Error, r.URL.Path) return } + vars := mux.Vars(r) bucket := vars["bucket"] @@ -100,11 +100,11 @@ func (api objectAPIHandlers) PutBucketNotificationHandler(w http.ResponseWriter, return } - // Validate request authorization. - if s3Error := checkAuth(r); s3Error != ErrNone { + if s3Error := checkRequestAuthType(r, "", "", serverConfig.GetRegion()); s3Error != ErrNone { writeErrorResponse(w, r, s3Error, r.URL.Path) return } + vars := mux.Vars(r) bucket := vars["bucket"] @@ -254,11 +254,11 @@ func (api objectAPIHandlers) ListenBucketNotificationHandler(w http.ResponseWrit return } - // Validate request authorization. - if s3Error := checkAuth(r); s3Error != ErrNone { + if s3Error := checkRequestAuthType(r, "", "", serverConfig.GetRegion()); s3Error != ErrNone { writeErrorResponse(w, r, s3Error, r.URL.Path) return } + vars := mux.Vars(r) bucket := vars["bucket"] diff --git a/cmd/bucket-notification-handlers_test.go b/cmd/bucket-notification-handlers_test.go index 639a0c156..e4ef7b35b 100644 --- a/cmd/bucket-notification-handlers_test.go +++ b/cmd/bucket-notification-handlers_test.go @@ -1,3 +1,19 @@ +/* + * Minio Cloud Storage, (C) 2016 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + package cmd import ( diff --git a/cmd/bucket-policy-handlers.go b/cmd/bucket-policy-handlers.go index c73c6eadd..79804ceb3 100644 --- a/cmd/bucket-policy-handlers.go +++ b/cmd/bucket-policy-handlers.go @@ -50,17 +50,10 @@ func bucketPolicyEvalStatements(action string, resource string, conditions map[s // Verify if action, resource and conditions match input policy statement. func bucketPolicyMatchStatement(action string, resource string, conditions map[string]set.StringSet, statement policyStatement) bool { - // Verify if action matches. - if bucketPolicyActionMatch(action, statement) { - // Verify if resource matches. - if bucketPolicyResourceMatch(resource, statement) { - // Verify if condition matches. - if bucketPolicyConditionMatch(conditions, statement) { - return true - } - } - } - return false + // Verify if action, resource and condition match in given statement. + return (bucketPolicyActionMatch(action, statement) && + bucketPolicyResourceMatch(resource, statement) && + bucketPolicyConditionMatch(conditions, statement)) } // Verify if given action matches with policy statement. @@ -132,9 +125,7 @@ func (api objectAPIHandlers) PutBucketPolicyHandler(w http.ResponseWriter, r *ht return } - // PutBucketPolicy does not support bucket policies, use checkAuth to validate signature. - if s3Error := checkAuth(r); s3Error != ErrNone { - errorIf(errSignatureMismatch, dumpRequest(r)) + if s3Error := checkRequestAuthType(r, "", "", serverConfig.GetRegion()); s3Error != ErrNone { writeErrorResponse(w, r, s3Error, r.URL.Path) return } @@ -244,9 +235,7 @@ func (api objectAPIHandlers) DeleteBucketPolicyHandler(w http.ResponseWriter, r return } - // DeleteBucketPolicy does not support bucket policies, use checkAuth to validate signature. - if s3Error := checkAuth(r); s3Error != ErrNone { - errorIf(errSignatureMismatch, dumpRequest(r)) + if s3Error := checkRequestAuthType(r, "", "", serverConfig.GetRegion()); s3Error != ErrNone { writeErrorResponse(w, r, s3Error, r.URL.Path) return } @@ -289,9 +278,7 @@ func (api objectAPIHandlers) GetBucketPolicyHandler(w http.ResponseWriter, r *ht return } - // GetBucketPolicy does not support bucket policies, use checkAuth to validate signature. - if s3Error := checkAuth(r); s3Error != ErrNone { - errorIf(errSignatureMismatch, dumpRequest(r)) + if s3Error := checkRequestAuthType(r, "", "", serverConfig.GetRegion()); s3Error != ErrNone { writeErrorResponse(w, r, s3Error, r.URL.Path) return } diff --git a/cmd/bucket-policy-parser.go b/cmd/bucket-policy-parser.go index 36c179973..da47c3d8e 100644 --- a/cmd/bucket-policy-parser.go +++ b/cmd/bucket-policy-parser.go @@ -35,7 +35,7 @@ const ( ) // supportedActionMap - lists all the actions supported by minio. -var supportedActionMap = set.CreateStringSet("*", "*", "s3:*", "s3:GetObject", +var supportedActionMap = set.CreateStringSet("*", "s3:*", "s3:GetObject", "s3:ListBucket", "s3:PutObject", "s3:GetBucketLocation", "s3:DeleteObject", "s3:AbortMultipartUpload", "s3:ListBucketMultipartUploads", "s3:ListMultipartUploadParts") diff --git a/cmd/certs.go b/cmd/certs.go index 4683ea494..31896afd1 100644 --- a/cmd/certs.go +++ b/cmd/certs.go @@ -114,14 +114,7 @@ func isSSL() bool { // Reads certificated file and returns a list of parsed certificates. func readCertificateChain() ([]*x509.Certificate, error) { - file, err := os.Open(mustGetCertFile()) - if err != nil { - return nil, err - } - defer file.Close() - - // Read the cert successfully. - bytes, err := ioutil.ReadAll(file) + bytes, err := ioutil.ReadFile(mustGetCertFile()) if err != nil { return nil, err } diff --git a/cmd/crossdomain-xml-handler.go b/cmd/crossdomain-xml-handler.go index 2b1b79dc1..44e04ab64 100644 --- a/cmd/crossdomain-xml-handler.go +++ b/cmd/crossdomain-xml-handler.go @@ -19,7 +19,7 @@ package cmd import "net/http" // Standard cross domain policy information located at https://s3.amazonaws.com/crossdomain.xml -var crossDomainXML = `` +const crossDomainXML = `` // Cross domain policy implements http.Handler interface, implementing a custom ServerHTTP. type crossDomainPolicy struct { diff --git a/cmd/event-notifier.go b/cmd/event-notifier.go index b8801c853..97646a1f3 100644 --- a/cmd/event-notifier.go +++ b/cmd/event-notifier.go @@ -18,7 +18,6 @@ package cmd import ( "bytes" - "encoding/hex" "encoding/json" "encoding/xml" "fmt" @@ -401,7 +400,7 @@ func persistNotificationConfig(bucket string, ncfg *notificationConfig, obj Obje // build path ncPath := path.Join(bucketConfigPrefix, bucket, bucketNotificationConfig) // write object to path - sha256Sum := hex.EncodeToString(sum256(buf)) + sha256Sum := getSHA256Hash(buf) _, err = obj.PutObject(minioMetaBucket, ncPath, int64(len(buf)), bytes.NewReader(buf), nil, sha256Sum) if err != nil { errorIf(err, "Unable to write bucket notification configuration.") @@ -421,7 +420,7 @@ func persistListenerConfig(bucket string, lcfg []listenerConfig, obj ObjectLayer // build path lcPath := path.Join(bucketConfigPrefix, bucket, bucketListenerConfig) // write object to path - sha256Sum := hex.EncodeToString(sum256(buf)) + sha256Sum := getSHA256Hash(buf) _, err = obj.PutObject(minioMetaBucket, lcPath, int64(len(buf)), bytes.NewReader(buf), nil, sha256Sum) if err != nil { errorIf(err, "Unable to write bucket listener configuration to object layer.") diff --git a/cmd/fs-v1-multipart_test.go b/cmd/fs-v1-multipart_test.go index ab4431999..44de40196 100644 --- a/cmd/fs-v1-multipart_test.go +++ b/cmd/fs-v1-multipart_test.go @@ -18,8 +18,6 @@ package cmd import ( "bytes" - "crypto/md5" - "encoding/hex" "os" "path/filepath" "reflect" @@ -80,9 +78,7 @@ func TestPutObjectPartFaultyDisk(t *testing.T) { t.Fatal("Unexpected error ", err) } - md5Writer := md5.New() - md5Writer.Write(data) - md5Hex := hex.EncodeToString(md5Writer.Sum(nil)) + md5Hex := getMD5Hash(data) sha256sum := "" // Test with faulty disk @@ -133,9 +129,7 @@ func TestCompleteMultipartUploadFaultyDisk(t *testing.T) { t.Fatal("Unexpected error ", err) } - md5Writer := md5.New() - md5Writer.Write(data) - md5Hex := hex.EncodeToString(md5Writer.Sum(nil)) + md5Hex := getMD5Hash(data) sha256sum := "" if _, err := fs.PutObjectPart(bucketName, objectName, uploadID, 1, 5, bytes.NewReader(data), md5Hex, sha256sum); err != nil { @@ -185,9 +179,7 @@ func TestListMultipartUploadsFaultyDisk(t *testing.T) { t.Fatal("Unexpected error ", err) } - md5Writer := md5.New() - md5Writer.Write(data) - md5Hex := hex.EncodeToString(md5Writer.Sum(nil)) + md5Hex := getMD5Hash(data) sha256sum := "" if _, err := fs.PutObjectPart(bucketName, objectName, uploadID, 1, 5, bytes.NewReader(data), md5Hex, sha256sum); err != nil { diff --git a/cmd/hasher.go b/cmd/hasher.go new file mode 100644 index 000000000..83e2c474a --- /dev/null +++ b/cmd/hasher.go @@ -0,0 +1,48 @@ +/* + * Minio Cloud Storage, (C) 2016 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package cmd + +import ( + "crypto/md5" + "crypto/sha256" + "encoding/base64" + "encoding/hex" +) + +// getSHA256Hash returns SHA-256 hash of given data. +func getSHA256Hash(data []byte) string { + hash := sha256.New() + hash.Write(data) + return hex.EncodeToString(hash.Sum(nil)) +} + +// getMD5Sum returns MD5 sum of given data. +func getMD5Sum(data []byte) []byte { + hash := md5.New() + hash.Write(data) + return hash.Sum(nil) +} + +// getMD5Hash returns MD5 hash of given data. +func getMD5Hash(data []byte) string { + return hex.EncodeToString(getMD5Sum(data)) +} + +// getMD5HashBase64 returns MD5 hash in base64 encoding of given data. +func getMD5HashBase64(data []byte) string { + return base64.StdEncoding.EncodeToString(getMD5Sum(data)) +} diff --git a/cmd/lock-instrument.go b/cmd/lock-instrument.go index 66d9ea6fd..014c65039 100644 --- a/cmd/lock-instrument.go +++ b/cmd/lock-instrument.go @@ -268,5 +268,5 @@ func (n *nsLockMap) deleteLockInfoEntryForOps(param nsParam, opsID string) error // Return randomly generated string ID func getOpsID() string { - return string(generateRequestID()) + return newRequestID() } diff --git a/cmd/object-api-multipart_test.go b/cmd/object-api-multipart_test.go index ae11cf5cf..907ae42c3 100644 --- a/cmd/object-api-multipart_test.go +++ b/cmd/object-api-multipart_test.go @@ -18,8 +18,6 @@ package cmd import ( "bytes" - "crypto/md5" - "encoding/hex" "fmt" "strings" "testing" @@ -1759,12 +1757,6 @@ func TestObjectCompleteMultipartUpload(t *testing.T) { // Tests validate CompleteMultipart functionality. func testObjectCompleteMultipartUpload(obj ObjectLayer, instanceType string, t TestErrHandler) { - // Calculates MD5 sum of the given byte array. - findMD5 := func(toBeHashed []byte) string { - hasher := md5.New() - hasher.Write(toBeHashed) - return hex.EncodeToString(hasher.Sum(nil)) - } var err error var uploadID string bucketNames := []string{"minio-bucket", "minio-2-bucket"} @@ -1791,7 +1783,7 @@ func testObjectCompleteMultipartUpload(obj ObjectLayer, instanceType string, t T // Parts with size greater than 5 MB. // Generating a 6MB byte array. validPart := bytes.Repeat([]byte("abcdef"), 1024*1024) - validPartMD5 := findMD5(validPart) + validPartMD5 := getMD5Hash(validPart) // Create multipart parts. // Need parts to be uploaded before CompleteMultiPartUpload can be called tested. parts := []struct { diff --git a/cmd/object-api-putobject_test.go b/cmd/object-api-putobject_test.go index 39c18b11e..683da570d 100644 --- a/cmd/object-api-putobject_test.go +++ b/cmd/object-api-putobject_test.go @@ -26,16 +26,8 @@ import ( "testing" ) -// md5Hex ignores error from Write method since it never returns one. Check -// crypto/md5 doc for more details. -func md5Hex(b []byte) string { - md5Writer := md5.New() - md5Writer.Write(b) - return hex.EncodeToString(md5Writer.Sum(nil)) -} - func md5Header(data []byte) map[string]string { - return map[string]string{"md5Sum": md5Hex([]byte(data))} + return map[string]string{"md5Sum": getMD5Hash([]byte(data))} } // Wrapper for calling PutObject tests for both XL multiple disks and single node setup. @@ -68,7 +60,7 @@ func testObjectAPIPutObject(obj ObjectLayer, instanceType string, t TestErrHandl data = []byte("hello") fiveMBBytes = bytes.Repeat([]byte("a"), 5*1024*124) ) - invalidMD5 := md5Hex([]byte("meh")) + invalidMD5 := getMD5Hash([]byte("meh")) invalidMD5Header := md5Header([]byte("meh")) testCases := []struct { @@ -126,37 +118,37 @@ func testObjectAPIPutObject(obj ObjectLayer, instanceType string, t TestErrHandl // Test case 15-17. // With no metadata - {bucket, object, data, nil, "", int64(len(data)), md5Hex(data), nil}, - {bucket, object, nilBytes, nil, "", int64(len(nilBytes)), md5Hex(nilBytes), nil}, - {bucket, object, fiveMBBytes, nil, "", int64(len(fiveMBBytes)), md5Hex(fiveMBBytes), nil}, + {bucket, object, data, nil, "", int64(len(data)), getMD5Hash(data), nil}, + {bucket, object, nilBytes, nil, "", int64(len(nilBytes)), getMD5Hash(nilBytes), nil}, + {bucket, object, fiveMBBytes, nil, "", int64(len(fiveMBBytes)), getMD5Hash(fiveMBBytes), nil}, // Test case 18-20. // With arbitrary metadata - {bucket, object, data, map[string]string{"answer": "42"}, "", int64(len(data)), md5Hex(data), nil}, - {bucket, object, nilBytes, map[string]string{"answer": "42"}, "", int64(len(nilBytes)), md5Hex(nilBytes), nil}, - {bucket, object, fiveMBBytes, map[string]string{"answer": "42"}, "", int64(len(fiveMBBytes)), md5Hex(fiveMBBytes), nil}, + {bucket, object, data, map[string]string{"answer": "42"}, "", int64(len(data)), getMD5Hash(data), nil}, + {bucket, object, nilBytes, map[string]string{"answer": "42"}, "", int64(len(nilBytes)), getMD5Hash(nilBytes), nil}, + {bucket, object, fiveMBBytes, map[string]string{"answer": "42"}, "", int64(len(fiveMBBytes)), getMD5Hash(fiveMBBytes), nil}, // Test case 21-23. // With valid md5sum and sha256. - {bucket, object, data, md5Header(data), hex.EncodeToString(sum256(data)), int64(len(data)), md5Hex(data), nil}, - {bucket, object, nilBytes, md5Header(nilBytes), hex.EncodeToString(sum256(nilBytes)), int64(len(nilBytes)), md5Hex(nilBytes), nil}, - {bucket, object, fiveMBBytes, md5Header(fiveMBBytes), hex.EncodeToString(sum256(fiveMBBytes)), int64(len(fiveMBBytes)), md5Hex(fiveMBBytes), nil}, + {bucket, object, data, md5Header(data), getSHA256Hash(data), int64(len(data)), getMD5Hash(data), nil}, + {bucket, object, nilBytes, md5Header(nilBytes), getSHA256Hash(nilBytes), int64(len(nilBytes)), getMD5Hash(nilBytes), nil}, + {bucket, object, fiveMBBytes, md5Header(fiveMBBytes), getSHA256Hash(fiveMBBytes), int64(len(fiveMBBytes)), getMD5Hash(fiveMBBytes), nil}, // Test case 24-26. // data with invalid md5sum in header - {bucket, object, data, invalidMD5Header, "", int64(len(data)), md5Hex(data), BadDigest{invalidMD5, md5Hex(data)}}, - {bucket, object, nilBytes, invalidMD5Header, "", int64(len(nilBytes)), md5Hex(nilBytes), BadDigest{invalidMD5, md5Hex(nilBytes)}}, - {bucket, object, fiveMBBytes, invalidMD5Header, "", int64(len(fiveMBBytes)), md5Hex(fiveMBBytes), BadDigest{invalidMD5, md5Hex(fiveMBBytes)}}, + {bucket, object, data, invalidMD5Header, "", int64(len(data)), getMD5Hash(data), BadDigest{invalidMD5, getMD5Hash(data)}}, + {bucket, object, nilBytes, invalidMD5Header, "", int64(len(nilBytes)), getMD5Hash(nilBytes), BadDigest{invalidMD5, getMD5Hash(nilBytes)}}, + {bucket, object, fiveMBBytes, invalidMD5Header, "", int64(len(fiveMBBytes)), getMD5Hash(fiveMBBytes), BadDigest{invalidMD5, getMD5Hash(fiveMBBytes)}}, // Test case 27-29. // data with size different from the actual number of bytes available in the reader - {bucket, object, data, nil, "", int64(len(data) - 1), md5Hex(data[:len(data)-1]), nil}, - {bucket, object, nilBytes, nil, "", int64(len(nilBytes) + 1), md5Hex(nilBytes), IncompleteBody{}}, - {bucket, object, fiveMBBytes, nil, "", int64(0), md5Hex(fiveMBBytes), nil}, + {bucket, object, data, nil, "", int64(len(data) - 1), getMD5Hash(data[:len(data)-1]), nil}, + {bucket, object, nilBytes, nil, "", int64(len(nilBytes) + 1), getMD5Hash(nilBytes), IncompleteBody{}}, + {bucket, object, fiveMBBytes, nil, "", int64(0), getMD5Hash(fiveMBBytes), nil}, // Test case 30 // valid data with X-Amz-Meta- meta - {bucket, object, data, map[string]string{"X-Amz-Meta-AppID": "a42"}, "", int64(len(data)), md5Hex(data), nil}, + {bucket, object, data, map[string]string{"X-Amz-Meta-AppID": "a42"}, "", int64(len(data)), getMD5Hash(data), nil}, } for i, testCase := range testCases { diff --git a/cmd/object-handlers.go b/cmd/object-handlers.go index 4e29b5739..61a5d6f69 100644 --- a/cmd/object-handlers.go +++ b/cmd/object-handlers.go @@ -91,31 +91,11 @@ func (api objectAPIHandlers) GetObjectHandler(w http.ResponseWriter, r *http.Req return } - switch getRequestAuthType(r) { - default: - // For all unknown auth types return error. - writeErrorResponse(w, r, ErrAccessDenied, r.URL.Path) + if s3Error := checkRequestAuthType(r, bucket, "s3:GetObject", serverConfig.GetRegion()); s3Error != ErrNone { + writeErrorResponse(w, r, s3Error, r.URL.Path) return - case authTypeAnonymous: - // http://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html - if s3Error := enforceBucketPolicy(bucket, "s3:GetObject", r.URL); s3Error != ErrNone { - writeErrorResponse(w, r, s3Error, r.URL.Path) - return - } - case authTypePresignedV2, authTypeSignedV2: - // Signature V2 validation. - if s3Error := isReqAuthenticatedV2(r); s3Error != ErrNone { - errorIf(errSignatureMismatch, dumpRequest(r)) - writeErrorResponse(w, r, s3Error, r.URL.Path) - return - } - case authTypePresigned, authTypeSigned: - if s3Error := isReqAuthenticated(r, serverConfig.GetRegion()); s3Error != ErrNone { - errorIf(errSignatureMismatch, dumpRequest(r)) - writeErrorResponse(w, r, s3Error, r.URL.Path) - return - } } + objInfo, err := objectAPI.GetObjectInfo(bucket, object) if err != nil { errorIf(err, "Unable to fetch object info.") @@ -210,30 +190,9 @@ func (api objectAPIHandlers) HeadObjectHandler(w http.ResponseWriter, r *http.Re return } - switch getRequestAuthType(r) { - default: - // For all unknown auth types return error. - writeErrorResponse(w, r, ErrAccessDenied, r.URL.Path) + if s3Error := checkRequestAuthType(r, bucket, "s3:GetObject", serverConfig.GetRegion()); s3Error != ErrNone { + writeErrorResponse(w, r, s3Error, r.URL.Path) return - case authTypeAnonymous: - // http://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html - if s3Error := enforceBucketPolicy(bucket, "s3:GetObject", r.URL); s3Error != ErrNone { - writeErrorResponse(w, r, s3Error, r.URL.Path) - return - } - case authTypePresignedV2, authTypeSignedV2: - // Signature V2 validation. - if s3Error := isReqAuthenticatedV2(r); s3Error != ErrNone { - errorIf(errSignatureMismatch, dumpRequest(r)) - writeErrorResponse(w, r, s3Error, r.URL.Path) - return - } - case authTypePresigned, authTypeSigned: - if s3Error := isReqAuthenticated(r, serverConfig.GetRegion()); s3Error != ErrNone { - errorIf(errSignatureMismatch, dumpRequest(r)) - writeErrorResponse(w, r, s3Error, r.URL.Path) - return - } } objInfo, err := objectAPI.GetObjectInfo(bucket, object) @@ -274,30 +233,9 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re return } - switch getRequestAuthType(r) { - default: - // For all unknown auth types return error. - writeErrorResponse(w, r, ErrAccessDenied, r.URL.Path) + if s3Error := checkRequestAuthType(r, bucket, "s3:PutObject", serverConfig.GetRegion()); s3Error != ErrNone { + writeErrorResponse(w, r, s3Error, r.URL.Path) return - case authTypeAnonymous: - // http://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html - if s3Error := enforceBucketPolicy(bucket, "s3:PutObject", r.URL); s3Error != ErrNone { - writeErrorResponse(w, r, s3Error, r.URL.Path) - return - } - case authTypePresignedV2, authTypeSignedV2: - // Signature V2 validation. - if s3Error := isReqAuthenticatedV2(r); s3Error != ErrNone { - errorIf(errSignatureMismatch, dumpRequest(r)) - writeErrorResponse(w, r, s3Error, r.URL.Path) - return - } - case authTypePresigned, authTypeSigned: - if s3Error := isReqAuthenticated(r, serverConfig.GetRegion()); s3Error != ErrNone { - errorIf(errSignatureMismatch, dumpRequest(r)) - writeErrorResponse(w, r, s3Error, r.URL.Path) - return - } } // TODO: Reject requests where body/payload is present, for now we don't even read it. @@ -539,30 +477,9 @@ func (api objectAPIHandlers) NewMultipartUploadHandler(w http.ResponseWriter, r return } - switch getRequestAuthType(r) { - default: - // For all unknown auth types return error. - writeErrorResponse(w, r, ErrAccessDenied, r.URL.Path) + if s3Error := checkRequestAuthType(r, bucket, "s3:PutObject", serverConfig.GetRegion()); s3Error != ErrNone { + writeErrorResponse(w, r, s3Error, r.URL.Path) return - case authTypeAnonymous: - // http://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html - if s3Error := enforceBucketPolicy(bucket, "s3:PutObject", r.URL); s3Error != ErrNone { - writeErrorResponse(w, r, s3Error, r.URL.Path) - return - } - case authTypePresignedV2, authTypeSignedV2: - // Signature V2 validation. - if s3Error := isReqAuthenticatedV2(r); s3Error != ErrNone { - errorIf(errSignatureMismatch, dumpRequest(r)) - writeErrorResponse(w, r, s3Error, r.URL.Path) - return - } - case authTypePresigned, authTypeSigned: - if s3Error := isReqAuthenticated(r, serverConfig.GetRegion()); s3Error != ErrNone { - errorIf(errSignatureMismatch, dumpRequest(r)) - writeErrorResponse(w, r, s3Error, r.URL.Path) - return - } } // Extract metadata that needs to be saved. @@ -711,30 +628,9 @@ func (api objectAPIHandlers) AbortMultipartUploadHandler(w http.ResponseWriter, return } - switch getRequestAuthType(r) { - default: - // For all unknown auth types return error. - writeErrorResponse(w, r, ErrAccessDenied, r.URL.Path) + if s3Error := checkRequestAuthType(r, bucket, "s3:AbortMultipartUpload", serverConfig.GetRegion()); s3Error != ErrNone { + writeErrorResponse(w, r, s3Error, r.URL.Path) return - case authTypeAnonymous: - // http://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html - if s3Error := enforceBucketPolicy(bucket, "s3:AbortMultipartUpload", r.URL); s3Error != ErrNone { - writeErrorResponse(w, r, s3Error, r.URL.Path) - return - } - case authTypePresignedV2, authTypeSignedV2: - // Signature V2 validation. - if s3Error := isReqAuthenticatedV2(r); s3Error != ErrNone { - errorIf(errSignatureMismatch, dumpRequest(r)) - writeErrorResponse(w, r, s3Error, r.URL.Path) - return - } - case authTypePresigned, authTypeSigned: - if s3Error := isReqAuthenticated(r, serverConfig.GetRegion()); s3Error != ErrNone { - errorIf(errSignatureMismatch, dumpRequest(r)) - writeErrorResponse(w, r, s3Error, r.URL.Path) - return - } } uploadID, _, _, _ := getObjectResources(r.URL.Query()) @@ -758,30 +654,9 @@ func (api objectAPIHandlers) ListObjectPartsHandler(w http.ResponseWriter, r *ht return } - switch getRequestAuthType(r) { - default: - // For all unknown auth types return error. - writeErrorResponse(w, r, ErrAccessDenied, r.URL.Path) + if s3Error := checkRequestAuthType(r, bucket, "s3:ListMultipartUploadParts", serverConfig.GetRegion()); s3Error != ErrNone { + writeErrorResponse(w, r, s3Error, r.URL.Path) return - case authTypeAnonymous: - // http://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html - if s3Error := enforceBucketPolicy(bucket, "s3:ListMultipartUploadParts", r.URL); s3Error != ErrNone { - writeErrorResponse(w, r, s3Error, r.URL.Path) - return - } - case authTypePresignedV2, authTypeSignedV2: - // Signature V2 validation. - if s3Error := isReqAuthenticatedV2(r); s3Error != ErrNone { - errorIf(errSignatureMismatch, dumpRequest(r)) - writeErrorResponse(w, r, s3Error, r.URL.Path) - return - } - case authTypePresigned, authTypeSigned: - if s3Error := isReqAuthenticated(r, serverConfig.GetRegion()); s3Error != ErrNone { - errorIf(errSignatureMismatch, dumpRequest(r)) - writeErrorResponse(w, r, s3Error, r.URL.Path) - return - } } uploadID, partNumberMarker, maxParts, _ := getObjectResources(r.URL.Query()) @@ -819,35 +694,15 @@ func (api objectAPIHandlers) CompleteMultipartUploadHandler(w http.ResponseWrite return } + if s3Error := checkRequestAuthType(r, bucket, "s3:PutObject", serverConfig.GetRegion()); s3Error != ErrNone { + writeErrorResponse(w, r, s3Error, r.URL.Path) + return + } + // Get upload id. uploadID, _, _, _ := getObjectResources(r.URL.Query()) var md5Sum string - switch getRequestAuthType(r) { - default: - // For all unknown auth types return error. - writeErrorResponse(w, r, ErrAccessDenied, r.URL.Path) - return - case authTypeAnonymous: - // http://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html - if s3Error := enforceBucketPolicy(bucket, "s3:PutObject", r.URL); s3Error != ErrNone { - writeErrorResponse(w, r, s3Error, r.URL.Path) - return - } - case authTypePresignedV2, authTypeSignedV2: - // Signature V2 validation. - if s3Error := isReqAuthenticatedV2(r); s3Error != ErrNone { - errorIf(errSignatureMismatch, dumpRequest(r)) - writeErrorResponse(w, r, s3Error, r.URL.Path) - return - } - case authTypePresigned, authTypeSigned: - if s3Error := isReqAuthenticated(r, serverConfig.GetRegion()); s3Error != ErrNone { - errorIf(errSignatureMismatch, dumpRequest(r)) - writeErrorResponse(w, r, s3Error, r.URL.Path) - return - } - } completeMultipartBytes, err := ioutil.ReadAll(r.Body) if err != nil { errorIf(err, "Unable to complete multipart upload.") @@ -941,31 +796,11 @@ func (api objectAPIHandlers) DeleteObjectHandler(w http.ResponseWriter, r *http. return } - switch getRequestAuthType(r) { - default: - // For all unknown auth types return error. - writeErrorResponse(w, r, ErrAccessDenied, r.URL.Path) + if s3Error := checkRequestAuthType(r, bucket, "s3:DeleteObject", serverConfig.GetRegion()); s3Error != ErrNone { + writeErrorResponse(w, r, s3Error, r.URL.Path) return - case authTypeAnonymous: - // http://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html - if s3Error := enforceBucketPolicy(bucket, "s3:DeleteObject", r.URL); s3Error != ErrNone { - writeErrorResponse(w, r, s3Error, r.URL.Path) - return - } - case authTypePresignedV2, authTypeSignedV2: - // Signature V2 validation. - if s3Error := isReqAuthenticatedV2(r); s3Error != ErrNone { - errorIf(errSignatureMismatch, dumpRequest(r)) - writeErrorResponse(w, r, s3Error, r.URL.Path) - return - } - case authTypeSigned, authTypePresigned: - if s3Error := isReqAuthenticated(r, serverConfig.GetRegion()); s3Error != ErrNone { - errorIf(errSignatureMismatch, dumpRequest(r)) - writeErrorResponse(w, r, s3Error, r.URL.Path) - return - } } + /// http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectDELETE.html /// Ignore delete object errors, since we are suppposed to reply /// only 204. diff --git a/cmd/object-handlers_test.go b/cmd/object-handlers_test.go index 47fdab671..390b19db6 100644 --- a/cmd/object-handlers_test.go +++ b/cmd/object-handlers_test.go @@ -18,8 +18,6 @@ package cmd import ( "bytes" - "crypto/md5" - "encoding/hex" "encoding/xml" "fmt" "io" @@ -1385,13 +1383,6 @@ func testAPICompleteMultipartHandler(obj ObjectLayer, instanceType, bucketName s t.Fatal("Notifier initialization failed.") } - // Calculates MD5 sum of the given byte array. - findMD5 := func(toBeHashed []byte) string { - hasher := md5.New() - hasher.Write(toBeHashed) - return hex.EncodeToString(hasher.Sum(nil)) - } - // object used for the test. objectName := "test-object-new-multipart" @@ -1414,7 +1405,7 @@ func testAPICompleteMultipartHandler(obj ObjectLayer, instanceType, bucketName s // Parts with size greater than 5 MB. // Generating a 6MB byte array. validPart := bytes.Repeat([]byte("abcdef"), 1024*1024) - validPartMD5 := findMD5(validPart) + validPartMD5 := getMD5Hash(validPart) // Create multipart parts. // Need parts to be uploaded before CompleteMultiPartUpload can be called tested. parts := []struct { @@ -1747,13 +1738,6 @@ func testAPIAbortMultipartHandler(obj ObjectLayer, instanceType, bucketName stri t.Fatal("Notifier initialization failed.") } - // Calculates MD5 sum of the given byte array. - findMD5 := func(toBeHashed []byte) string { - hasher := md5.New() - hasher.Write(toBeHashed) - return hex.EncodeToString(hasher.Sum(nil)) - } - // object used for the test. objectName := "test-object-new-multipart" @@ -1776,7 +1760,7 @@ func testAPIAbortMultipartHandler(obj ObjectLayer, instanceType, bucketName stri // Parts with size greater than 5 MB. // Generating a 6MB byte array. validPart := bytes.Repeat([]byte("abcdef"), 1024*1024) - validPartMD5 := findMD5(validPart) + validPartMD5 := getMD5Hash(validPart) // Create multipart parts. // Need parts to be uploaded before AbortMultiPartUpload can be called tested. parts := []struct { diff --git a/cmd/object-utils.go b/cmd/object-utils.go index d6b68df41..b008b6b79 100644 --- a/cmd/object-utils.go +++ b/cmd/object-utils.go @@ -17,7 +17,6 @@ package cmd import ( - "crypto/md5" "encoding/hex" "fmt" "io" @@ -150,9 +149,7 @@ func getCompleteMultipartMD5(parts []completePart) (string, error) { } finalMD5Bytes = append(finalMD5Bytes, md5Bytes...) } - md5Hasher := md5.New() - md5Hasher.Write(finalMD5Bytes) - s3MD5 := fmt.Sprintf("%s-%d", hex.EncodeToString(md5Hasher.Sum(nil)), len(parts)) + s3MD5 := fmt.Sprintf("%s-%d", getMD5Hash(finalMD5Bytes), len(parts)) return s3MD5, nil } diff --git a/cmd/object_api_suite_test.go b/cmd/object_api_suite_test.go index 85c5dc7c9..252780afe 100644 --- a/cmd/object_api_suite_test.go +++ b/cmd/object_api_suite_test.go @@ -18,8 +18,6 @@ package cmd import ( "bytes" - "crypto/md5" - "encoding/hex" "io" "math/rand" "strconv" @@ -104,9 +102,7 @@ func testMultipartObjectCreation(obj ObjectLayer, instanceType string, c TestErr data := bytes.Repeat([]byte("0123456789abcdef"), 5*1024*1024/16) completedParts := completeMultipartUpload{} for i := 1; i <= 10; i++ { - hasher := md5.New() - hasher.Write(data) - expectedMD5Sumhex := hex.EncodeToString(hasher.Sum(nil)) + expectedMD5Sumhex := getMD5Hash(data) var calculatedMD5sum string calculatedMD5sum, err = obj.PutObjectPart("bucket", "key", uploadID, i, int64(len(data)), bytes.NewBuffer(data), expectedMD5Sumhex, "") @@ -152,9 +148,7 @@ func testMultipartObjectAbort(obj ObjectLayer, instanceType string, c TestErrHan randomString = randomString + strconv.Itoa(num) } - hasher := md5.New() - hasher.Write([]byte(randomString)) - expectedMD5Sumhex := hex.EncodeToString(hasher.Sum(nil)) + expectedMD5Sumhex := getMD5Hash([]byte(randomString)) metadata["md5"] = expectedMD5Sumhex var calculatedMD5sum string @@ -192,9 +186,7 @@ func testMultipleObjectCreation(obj ObjectLayer, instanceType string, c TestErrH randomString = randomString + strconv.Itoa(num) } - hasher := md5.New() - hasher.Write([]byte(randomString)) - expectedMD5Sumhex := hex.EncodeToString(hasher.Sum(nil)) + expectedMD5Sumhex := getMD5Hash([]byte(randomString)) key := "obj" + strconv.Itoa(i) objects[key] = []byte(randomString) diff --git a/cmd/s3-peer-client.go b/cmd/s3-peer-client.go index 2bdd809ca..0a218ec31 100644 --- a/cmd/s3-peer-client.go +++ b/cmd/s3-peer-client.go @@ -48,7 +48,7 @@ func makeS3Peers(eps []*url.URL) s3Peers { // add local (self) as peer in the array ret = append(ret, s3Peer{ globalMinioAddr, - &localBMS{ObjectAPI: newObjectLayerFn}, + &localBucketMetaState{ObjectAPI: newObjectLayerFn}, }) seenAddr[globalMinioAddr] = true @@ -72,7 +72,7 @@ func makeS3Peers(eps []*url.URL) s3Peers { ret = append(ret, s3Peer{ ep.Host, - &remoteBMS{newAuthClient(&cfg)}, + &remoteBucketMetaState{newAuthClient(&cfg)}, }) seenAddr[ep.Host] = true } @@ -127,13 +127,13 @@ func (s3p s3Peers) SendUpdate(peerIndex []int, args interface{}) []error { // Make the appropriate bucket metadata update // according to the argument type switch v := args.(type) { - case *SetBNPArgs: + case *SetBucketNotificationPeerArgs: err = client.UpdateBucketNotification(v) - case *SetBLPArgs: + case *SetBucketListenerPeerArgs: err = client.UpdateBucketListener(v) - case *SetBPPArgs: + case *SetBucketPolicyPeerArgs: err = client.UpdateBucketPolicy(v) default: @@ -173,7 +173,7 @@ func (s3p s3Peers) SendUpdate(peerIndex []int, args interface{}) []error { // S3PeersUpdateBucketNotification - Sends Update Bucket notification // request to all peers. Currently we log an error and continue. func S3PeersUpdateBucketNotification(bucket string, ncfg *notificationConfig) { - setBNPArgs := &SetBNPArgs{Bucket: bucket, NCfg: ncfg} + setBNPArgs := &SetBucketNotificationPeerArgs{Bucket: bucket, NCfg: ncfg} errs := globalS3Peers.SendUpdate(nil, setBNPArgs) for idx, err := range errs { errorIf( @@ -187,7 +187,7 @@ func S3PeersUpdateBucketNotification(bucket string, ncfg *notificationConfig) { // S3PeersUpdateBucketListener - Sends Update Bucket listeners request // to all peers. Currently we log an error and continue. func S3PeersUpdateBucketListener(bucket string, lcfg []listenerConfig) { - setBLPArgs := &SetBLPArgs{Bucket: bucket, LCfg: lcfg} + setBLPArgs := &SetBucketListenerPeerArgs{Bucket: bucket, LCfg: lcfg} errs := globalS3Peers.SendUpdate(nil, setBLPArgs) for idx, err := range errs { errorIf( @@ -206,7 +206,7 @@ func S3PeersUpdateBucketPolicy(bucket string, pCh policyChange) { errorIf(err, "Failed to marshal policyChange - this is a BUG!") return } - setBPPArgs := &SetBPPArgs{Bucket: bucket, PChBytes: byts} + setBPPArgs := &SetBucketPolicyPeerArgs{Bucket: bucket, PChBytes: byts} errs := globalS3Peers.SendUpdate(nil, setBPPArgs) for idx, err := range errs { errorIf( diff --git a/cmd/s3-peer-router.go b/cmd/s3-peer-router.go index f86a76d7a..b1c0e6681 100644 --- a/cmd/s3-peer-router.go +++ b/cmd/s3-peer-router.go @@ -32,7 +32,7 @@ type s3PeerAPIHandlers struct { func registerS3PeerRPCRouter(mux *router.Router) error { s3PeerHandlers := &s3PeerAPIHandlers{ - &localBMS{ + &localBucketMetaState{ ObjectAPI: newObjectLayerFn, }, } diff --git a/cmd/s3-peer-rpc-handlers.go b/cmd/s3-peer-rpc-handlers.go index 9985d9971..c97e3c8a7 100644 --- a/cmd/s3-peer-rpc-handlers.go +++ b/cmd/s3-peer-rpc-handlers.go @@ -36,9 +36,9 @@ func (s3 *s3PeerAPIHandlers) LoginHandler(args *RPCLoginArgs, reply *RPCLoginRep return nil } -// SetBNPArgs - Arguments collection to SetBucketNotificationPeer RPC +// SetBucketNotificationPeerArgs - Arguments collection to SetBucketNotificationPeer RPC // call -type SetBNPArgs struct { +type SetBucketNotificationPeerArgs struct { // For Auth GenericArgs @@ -48,7 +48,7 @@ type SetBNPArgs struct { NCfg *notificationConfig } -func (s3 *s3PeerAPIHandlers) SetBucketNotificationPeer(args *SetBNPArgs, reply *GenericReply) error { +func (s3 *s3PeerAPIHandlers) SetBucketNotificationPeer(args *SetBucketNotificationPeerArgs, reply *GenericReply) error { // check auth if !isRPCTokenValid(args.Token) { return errInvalidToken @@ -57,8 +57,8 @@ func (s3 *s3PeerAPIHandlers) SetBucketNotificationPeer(args *SetBNPArgs, reply * return s3.bms.UpdateBucketNotification(args) } -// SetBLPArgs - Arguments collection to SetBucketListenerPeer RPC call -type SetBLPArgs struct { +// SetBucketListenerPeerArgs - Arguments collection to SetBucketListenerPeer RPC call +type SetBucketListenerPeerArgs struct { // For Auth GenericArgs @@ -68,7 +68,7 @@ type SetBLPArgs struct { LCfg []listenerConfig } -func (s3 *s3PeerAPIHandlers) SetBucketListenerPeer(args *SetBLPArgs, reply *GenericReply) error { +func (s3 *s3PeerAPIHandlers) SetBucketListenerPeer(args *SetBucketListenerPeerArgs, reply *GenericReply) error { // check auth if !isRPCTokenValid(args.Token) { return errInvalidToken @@ -99,8 +99,8 @@ func (s3 *s3PeerAPIHandlers) Event(args *EventArgs, reply *GenericReply) error { return s3.bms.SendEvent(args) } -// SetBPPArgs - Arguments collection for SetBucketPolicyPeer RPC call -type SetBPPArgs struct { +// SetBucketPolicyPeerArgs - Arguments collection for SetBucketPolicyPeer RPC call +type SetBucketPolicyPeerArgs struct { // For Auth GenericArgs @@ -111,7 +111,7 @@ type SetBPPArgs struct { } // tell receiving server to update a bucket policy -func (s3 *s3PeerAPIHandlers) SetBucketPolicyPeer(args *SetBPPArgs, reply *GenericReply) error { +func (s3 *s3PeerAPIHandlers) SetBucketPolicyPeer(args *SetBucketPolicyPeerArgs, reply *GenericReply) error { // check auth if !isRPCTokenValid(args.Token) { return errInvalidToken diff --git a/cmd/s3-peer-rpc-handlers_test.go b/cmd/s3-peer-rpc-handlers_test.go index 21d4e95f4..c310cfbfe 100644 --- a/cmd/s3-peer-rpc-handlers_test.go +++ b/cmd/s3-peer-rpc-handlers_test.go @@ -73,7 +73,7 @@ func (s *TestRPCS3PeerSuite) testS3PeerRPC(t *testing.T) { } // Check bucket notification call works. - BNPArgs := SetBNPArgs{Bucket: "bucket", NCfg: ¬ificationConfig{}} + BNPArgs := SetBucketNotificationPeerArgs{Bucket: "bucket", NCfg: ¬ificationConfig{}} client := newAuthClient(s.testAuthConf) defer client.Close() err = client.Call("S3.SetBucketNotificationPeer", &BNPArgs, &GenericReply{}) @@ -82,7 +82,7 @@ func (s *TestRPCS3PeerSuite) testS3PeerRPC(t *testing.T) { } // Check bucket listener update call works. - BLPArgs := SetBLPArgs{Bucket: "bucket", LCfg: nil} + BLPArgs := SetBucketListenerPeerArgs{Bucket: "bucket", LCfg: nil} err = client.Call("S3.SetBucketListenerPeer", &BLPArgs, &GenericReply{}) if err != nil { t.Fatal(err) @@ -94,7 +94,7 @@ func (s *TestRPCS3PeerSuite) testS3PeerRPC(t *testing.T) { if err != nil { t.Fatal(err) } - BPPArgs := SetBPPArgs{Bucket: "bucket", PChBytes: pChBytes} + BPPArgs := SetBucketPolicyPeerArgs{Bucket: "bucket", PChBytes: pChBytes} err = client.Call("S3.SetBucketPolicyPeer", &BPPArgs, &GenericReply{}) if err != nil { t.Fatal(err) diff --git a/cmd/server_test.go b/cmd/server_test.go index e2ca25774..dfdad437c 100644 --- a/cmd/server_test.go +++ b/cmd/server_test.go @@ -18,11 +18,8 @@ package cmd import ( "bytes" - "crypto/md5" "crypto/tls" "crypto/x509" - "encoding/base64" - "encoding/hex" "encoding/xml" "fmt" "io" @@ -1676,7 +1673,7 @@ func (s *TestSuiteCommon) TestGetObjectLarge11MiB(c *C) { for i := 0; i < 11*1024; i++ { buffer.WriteString(fmt.Sprintf("[%05d] %s\n", i, line)) } - putMD5 := sumMD5(buffer.Bytes()) + putMD5 := getMD5Hash(buffer.Bytes()) objectName := "test-11Mb-object" // Put object @@ -1707,10 +1704,10 @@ func (s *TestSuiteCommon) TestGetObjectLarge11MiB(c *C) { c.Assert(err, IsNil) // Get md5Sum of the response content. - getMD5 := sumMD5(getContent) + getMD5 := getMD5Hash(getContent) // Compare putContent and getContent. - c.Assert(hex.EncodeToString(putMD5), Equals, hex.EncodeToString(getMD5)) + c.Assert(putMD5, Equals, getMD5) } // TestGetPartialObjectMisAligned - tests get object partially mis-aligned. @@ -2372,9 +2369,7 @@ func (s *TestSuiteCommon) TestObjectValidMD5(c *C) { // content for the object to be uploaded. data := bytes.Repeat([]byte("0123456789abcdef"), 5*1024*1024/16) // calculate md5Sum of the data. - hasher := md5.New() - hasher.Write(data) - md5Sum := hasher.Sum(nil) + md5SumBase64 := getMD5HashBase64(data) buffer1 := bytes.NewReader(data) objectName := "test-1-object" @@ -2383,7 +2378,7 @@ func (s *TestSuiteCommon) TestObjectValidMD5(c *C) { int64(buffer1.Len()), buffer1, s.accessKey, s.secretKey, s.signer) c.Assert(err, IsNil) // set the Content-Md5 to be the hash to content. - request.Header.Set("Content-Md5", base64.StdEncoding.EncodeToString(md5Sum)) + request.Header.Set("Content-Md5", md5SumBase64) client = http.Client{Transport: s.transport} response, err = client.Do(request) c.Assert(err, IsNil) @@ -2447,16 +2442,14 @@ func (s *TestSuiteCommon) TestObjectMultipart(c *C) { // Create a byte array of 5MB. data := bytes.Repeat([]byte("0123456789abcdef"), 5*1024*1024/16) // calculate md5Sum of the data. - hasher := md5.New() - hasher.Write(data) - md5Sum := hasher.Sum(nil) + md5SumBase64 := getMD5HashBase64(data) buffer1 := bytes.NewReader(data) // HTTP request for the part to be uploaded. request, err = newTestSignedRequest("PUT", getPartUploadURL(s.endPoint, bucketName, objectName, uploadID, "1"), int64(buffer1.Len()), buffer1, s.accessKey, s.secretKey, s.signer) // set the Content-Md5 header to the base64 encoding the md5Sum of the content. - request.Header.Set("Content-Md5", base64.StdEncoding.EncodeToString(md5Sum)) + request.Header.Set("Content-Md5", md5SumBase64) c.Assert(err, IsNil) client = http.Client{Transport: s.transport} @@ -2469,17 +2462,15 @@ func (s *TestSuiteCommon) TestObjectMultipart(c *C) { // Create a byte array of 1 byte. data = []byte("0") - hasher = md5.New() - hasher.Write(data) // calculate md5Sum of the data. - md5Sum = hasher.Sum(nil) + md5SumBase64 = getMD5HashBase64(data) buffer2 := bytes.NewReader(data) // HTTP request for the second part to be uploaded. request, err = newTestSignedRequest("PUT", getPartUploadURL(s.endPoint, bucketName, objectName, uploadID, "2"), int64(buffer2.Len()), buffer2, s.accessKey, s.secretKey, s.signer) // set the Content-Md5 header to the base64 encoding the md5Sum of the content. - request.Header.Set("Content-Md5", base64.StdEncoding.EncodeToString(md5Sum)) + request.Header.Set("Content-Md5", md5SumBase64) c.Assert(err, IsNil) client = http.Client{Transport: s.transport} diff --git a/cmd/signature-v4-utils_test.go b/cmd/signature-v4-utils_test.go index 9b36ee9ca..9a1928841 100644 --- a/cmd/signature-v4-utils_test.go +++ b/cmd/signature-v4-utils_test.go @@ -40,7 +40,7 @@ func TestSkipContentSha256Cksum(t *testing.T) { // Test case - 2. // Test case with "X-Amz-Content-Sha256" header set to "UNSIGNED-PAYLOAD" // When "X-Amz-Content-Sha256" header is set to "UNSIGNED-PAYLOAD", validation of content sha256 has to be skipped. - {"X-Amz-Content-Sha256", "UNSIGNED-PAYLOAD", "", "", true}, + {"X-Amz-Content-Sha256", unsignedPayload, "", "", true}, // Test case - 3. // Enabling PreSigned Signature v4. {"", "", "X-Amz-Credential", "", true}, diff --git a/cmd/test-utils_test.go b/cmd/test-utils_test.go index b334a12bd..b0b8964cb 100644 --- a/cmd/test-utils_test.go +++ b/cmd/test-utils_test.go @@ -624,7 +624,7 @@ func signStreamingRequest(req *http.Request, accessKey, secretKey string, currTi stringToSign := "AWS4-HMAC-SHA256" + "\n" + currTime.Format(iso8601Format) + "\n" stringToSign = stringToSign + scope + "\n" - stringToSign = stringToSign + hex.EncodeToString(sum256([]byte(canonicalRequest))) + stringToSign = stringToSign + getSHA256Hash([]byte(canonicalRequest)) date := sumHMAC([]byte("AWS4"+secretKey), []byte(currTime.Format(yyyymmdd))) region := sumHMAC(date, []byte("us-east-1")) @@ -707,7 +707,7 @@ func assembleStreamingChunks(req *http.Request, body io.ReadSeeker, chunkSize in stringToSign = stringToSign + scope + "\n" stringToSign = stringToSign + signature + "\n" stringToSign = stringToSign + "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" + "\n" // hex(sum256("")) - stringToSign = stringToSign + hex.EncodeToString(sum256(buffer[:n])) + stringToSign = stringToSign + getSHA256Hash(buffer[:n]) date := sumHMAC([]byte("AWS4"+secretKey), []byte(currTime.Format(yyyymmdd))) region := sumHMAC(date, []byte(regionStr)) @@ -1017,7 +1017,7 @@ func signRequestV4(req *http.Request, accessKey, secretKey string) error { stringToSign := "AWS4-HMAC-SHA256" + "\n" + currTime.Format(iso8601Format) + "\n" stringToSign = stringToSign + scope + "\n" - stringToSign = stringToSign + hex.EncodeToString(sum256([]byte(canonicalRequest))) + stringToSign = stringToSign + getSHA256Hash([]byte(canonicalRequest)) date := sumHMAC([]byte("AWS4"+secretKey), []byte(currTime.Format(yyyymmdd))) regionHMAC := sumHMAC(date, []byte(region)) @@ -1061,14 +1061,14 @@ func newTestRequest(method, urlStr string, contentLength int64, body io.ReadSeek var hashedPayload string switch { case body == nil: - hashedPayload = hex.EncodeToString(sum256([]byte{})) + hashedPayload = getSHA256Hash([]byte{}) default: payloadBytes, err := ioutil.ReadAll(body) if err != nil { return nil, err } - hashedPayload = hex.EncodeToString(sum256(payloadBytes)) - md5Base64 := base64.StdEncoding.EncodeToString(sumMD5(payloadBytes)) + hashedPayload = getSHA256Hash(payloadBytes) + md5Base64 := getMD5HashBase64(payloadBytes) req.Header.Set("Content-Md5", md5Base64) } req.Header.Set("x-amz-content-sha256", hashedPayload) diff --git a/cmd/xl-v1-object_test.go b/cmd/xl-v1-object_test.go index cb61bf288..f48d06a8c 100644 --- a/cmd/xl-v1-object_test.go +++ b/cmd/xl-v1-object_test.go @@ -18,8 +18,6 @@ package cmd import ( "bytes" - "crypto/md5" - "encoding/hex" "io/ioutil" "math/rand" "os" @@ -52,9 +50,7 @@ func TestRepeatPutObjectPart(t *testing.T) { t.Fatal(err) } fiveMBBytes := bytes.Repeat([]byte("a"), 5*1024*1024) - md5Writer := md5.New() - md5Writer.Write(fiveMBBytes) - md5Hex := hex.EncodeToString(md5Writer.Sum(nil)) + md5Hex := getMD5Hash(fiveMBBytes) _, err = objLayer.PutObjectPart("bucket1", "mpartObj1", uploadID, 1, 5*1024*1024, bytes.NewReader(fiveMBBytes), md5Hex, "") if err != nil { t.Fatal(err)