Merge pull request #1228 from harshavardhana/signature-cleanup

signature: Move signature outside, use a layered approach for signature verification
This commit is contained in:
Anand Babu (AB) Periasamy 2016-03-26 15:46:52 -07:00
commit 90a46faf31
39 changed files with 572 additions and 739 deletions

View File

@ -17,17 +17,16 @@
package main package main
import ( import (
"bytes"
"crypto/md5"
"crypto/sha256" "crypto/sha256"
"encoding/base64"
"encoding/hex" "encoding/hex"
"io/ioutil"
"net/http" "net/http"
"strings" "strings"
"github.com/minio/minio/pkg/s3/signature4" "github.com/minio/minio/pkg/probe"
)
const (
signV4Algorithm = "AWS4-HMAC-SHA256"
jwtAlgorithm = "Bearer"
) )
// Verify if request has JWT. // Verify if request has JWT.
@ -97,12 +96,41 @@ func getRequestAuthType(r *http.Request) authType {
return authTypeUnknown return authTypeUnknown
} }
// sum256 calculate sha256 sum for an input byte array
func sum256(data []byte) []byte {
hash := sha256.New()
hash.Write(data)
return hash.Sum(nil)
}
// sumMD5 calculate md5 sum for an input byte array
func sumMD5(data []byte) []byte {
hash := md5.New()
hash.Write(data)
return hash.Sum(nil)
}
// Verify if request has valid AWS Signature Version '4'. // Verify if request has valid AWS Signature Version '4'.
func isReqAuthenticated(sign *signature4.Sign, r *http.Request) (s3Error APIErrorCode) { func isReqAuthenticated(r *http.Request) (s3Error APIErrorCode) {
auth := sign.SetHTTPRequestToVerify(r) if r == nil {
errorIf(probe.NewError(errInvalidArgument), "HTTP request cannot be empty.", nil)
return ErrInternalError
}
payload, e := ioutil.ReadAll(r.Body)
if e != nil {
errorIf(probe.NewError(e), "Unable to read HTTP body.", nil)
return ErrInternalError
}
// Verify Content-Md5, if payload is set.
if r.Header.Get("Content-Md5") != "" {
if r.Header.Get("Content-Md5") != base64.StdEncoding.EncodeToString(sumMD5(payload)) {
return ErrBadDigest
}
}
// Populate back the payload.
r.Body = ioutil.NopCloser(bytes.NewReader(payload))
if isRequestSignatureV4(r) { if isRequestSignatureV4(r) {
dummyPayload := sha256.Sum256([]byte("")) ok, err := doesSignatureMatch(hex.EncodeToString(sum256(payload)), r)
ok, err := auth.DoesSignatureMatch(hex.EncodeToString(dummyPayload[:]))
if err != nil { if err != nil {
errorIf(err.Trace(), "Signature verification failed.", nil) errorIf(err.Trace(), "Signature verification failed.", nil)
return ErrInternalError return ErrInternalError
@ -112,7 +140,7 @@ func isReqAuthenticated(sign *signature4.Sign, r *http.Request) (s3Error APIErro
} }
return ErrNone return ErrNone
} else if isRequestPresignedSignatureV4(r) { } else if isRequestPresignedSignatureV4(r) {
ok, err := auth.DoesPresignedSignatureMatch() ok, err := doesPresignedSignatureMatch(r)
if err != nil { if err != nil {
errorIf(err.Trace(), "Presigned signature verification failed.", nil) errorIf(err.Trace(), "Presigned signature verification failed.", nil)
return ErrInternalError return ErrInternalError

View File

@ -18,9 +18,6 @@ package main
import ( import (
"bytes" "bytes"
"crypto/md5"
"encoding/base64"
"encoding/hex"
"encoding/xml" "encoding/xml"
"io" "io"
"io/ioutil" "io/ioutil"
@ -30,10 +27,8 @@ import (
"strings" "strings"
mux "github.com/gorilla/mux" mux "github.com/gorilla/mux"
"github.com/minio/minio/pkg/crypto/sha256"
"github.com/minio/minio/pkg/fs" "github.com/minio/minio/pkg/fs"
"github.com/minio/minio/pkg/probe" "github.com/minio/minio/pkg/probe"
"github.com/minio/minio/pkg/s3/signature4"
) )
// http://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html // http://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html
@ -94,7 +89,7 @@ func (api storageAPI) GetBucketLocationHandler(w http.ResponseWriter, r *http.Re
return return
} }
case authTypeSigned, authTypePresigned: case authTypeSigned, authTypePresigned:
if s3Error := isReqAuthenticated(api.Signature, r); s3Error != ErrNone { if s3Error := isReqAuthenticated(r); s3Error != ErrNone {
writeErrorResponse(w, r, s3Error, r.URL.Path) writeErrorResponse(w, r, s3Error, r.URL.Path)
return return
} }
@ -151,7 +146,7 @@ func (api storageAPI) ListMultipartUploadsHandler(w http.ResponseWriter, r *http
return return
} }
case authTypePresigned, authTypeSigned: case authTypePresigned, authTypeSigned:
if s3Error := isReqAuthenticated(api.Signature, r); s3Error != ErrNone { if s3Error := isReqAuthenticated(r); s3Error != ErrNone {
writeErrorResponse(w, r, s3Error, r.URL.Path) writeErrorResponse(w, r, s3Error, r.URL.Path)
return return
} }
@ -208,7 +203,7 @@ func (api storageAPI) ListObjectsHandler(w http.ResponseWriter, r *http.Request)
return return
} }
case authTypeSigned, authTypePresigned: case authTypeSigned, authTypePresigned:
if s3Error := isReqAuthenticated(api.Signature, r); s3Error != ErrNone { if s3Error := isReqAuthenticated(r); s3Error != ErrNone {
writeErrorResponse(w, r, s3Error, r.URL.Path) writeErrorResponse(w, r, s3Error, r.URL.Path)
return return
} }
@ -262,7 +257,7 @@ func (api storageAPI) ListBucketsHandler(w http.ResponseWriter, r *http.Request)
writeErrorResponse(w, r, ErrAccessDenied, r.URL.Path) writeErrorResponse(w, r, ErrAccessDenied, r.URL.Path)
return return
case authTypeSigned, authTypePresigned: case authTypeSigned, authTypePresigned:
if s3Error := isReqAuthenticated(api.Signature, r); s3Error != ErrNone { if s3Error := isReqAuthenticated(r); s3Error != ErrNone {
writeErrorResponse(w, r, s3Error, r.URL.Path) writeErrorResponse(w, r, s3Error, r.URL.Path)
return return
} }
@ -288,6 +283,24 @@ func (api storageAPI) DeleteMultipleObjectsHandler(w http.ResponseWriter, r *htt
vars := mux.Vars(r) vars := mux.Vars(r)
bucket := vars["bucket"] bucket := vars["bucket"]
switch getRequestAuthType(r) {
default:
// For all unknown auth types return error.
writeErrorResponse(w, r, ErrAccessDenied, r.URL.Path)
return
case authTypeAnonymous:
// http://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html
if s3Error := enforceBucketPolicy("s3:DeleteObject", bucket, r.URL); s3Error != ErrNone {
writeErrorResponse(w, r, s3Error, r.URL.Path)
return
}
case authTypePresigned, authTypeSigned:
if s3Error := isReqAuthenticated(r); s3Error != ErrNone {
writeErrorResponse(w, r, s3Error, r.URL.Path)
return
}
}
// Content-Length is required and should be non-zero // Content-Length is required and should be non-zero
// http://docs.aws.amazon.com/AmazonS3/latest/API/multiobjectdeleteapi.html // http://docs.aws.amazon.com/AmazonS3/latest/API/multiobjectdeleteapi.html
if r.ContentLength <= 0 { if r.ContentLength <= 0 {
@ -302,9 +315,6 @@ func (api storageAPI) DeleteMultipleObjectsHandler(w http.ResponseWriter, r *htt
return return
} }
// Set http request for signature.
auth := api.Signature.SetHTTPRequestToVerify(r)
// Allocate incoming content length bytes. // Allocate incoming content length bytes.
deleteXMLBytes := make([]byte, r.ContentLength) deleteXMLBytes := make([]byte, r.ContentLength)
@ -316,52 +326,6 @@ func (api storageAPI) DeleteMultipleObjectsHandler(w http.ResponseWriter, r *htt
return return
} }
switch getRequestAuthType(r) {
default:
// For all unknown auth types return error.
writeErrorResponse(w, r, ErrAccessDenied, r.URL.Path)
return
case authTypeAnonymous:
// http://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html
if s3Error := enforceBucketPolicy("s3:DeleteObject", bucket, r.URL); s3Error != ErrNone {
writeErrorResponse(w, r, s3Error, r.URL.Path)
return
}
case authTypePresigned:
// Check if request is presigned.
ok, err := auth.DoesPresignedSignatureMatch()
if err != nil {
errorIf(err.Trace(r.URL.String()), "Presigned signature verification failed.", nil)
writeErrorResponse(w, r, ErrSignatureDoesNotMatch, r.URL.Path)
return
}
if !ok {
writeErrorResponse(w, r, ErrSignatureDoesNotMatch, r.URL.Path)
return
}
case authTypeSigned:
// Check if request is signed.
sha := sha256.New()
mdSh := md5.New()
sha.Write(deleteXMLBytes)
mdSh.Write(deleteXMLBytes)
ok, err := auth.DoesSignatureMatch(hex.EncodeToString(sha.Sum(nil)))
if err != nil {
errorIf(err.Trace(), "DeleteMultipleObjects failed.", nil)
writeErrorResponse(w, r, ErrInternalError, r.URL.Path)
return
}
if !ok {
writeErrorResponse(w, r, ErrSignatureDoesNotMatch, r.URL.Path)
return
}
// Verify content md5.
if r.Header.Get("Content-Md5") != base64.StdEncoding.EncodeToString(mdSh.Sum(nil)) {
writeErrorResponse(w, r, ErrBadDigest, r.URL.Path)
return
}
}
// Unmarshal list of keys to be deleted. // Unmarshal list of keys to be deleted.
deleteObjects := &DeleteObjectsRequest{} deleteObjects := &DeleteObjectsRequest{}
if e := xml.Unmarshal(deleteXMLBytes, deleteObjects); e != nil { if e := xml.Unmarshal(deleteXMLBytes, deleteObjects); e != nil {
@ -431,41 +395,14 @@ func (api storageAPI) PutBucketHandler(w http.ResponseWriter, r *http.Request) {
bucket := vars["bucket"] bucket := vars["bucket"]
// Set http request for signature. // Set http request for signature.
auth := api.Signature.SetHTTPRequestToVerify(r)
switch getRequestAuthType(r) { switch getRequestAuthType(r) {
default: default:
// For all unknown auth types return error. // For all unknown auth types return error.
writeErrorResponse(w, r, ErrAccessDenied, r.URL.Path) writeErrorResponse(w, r, ErrAccessDenied, r.URL.Path)
return return
case authTypePresigned: case authTypePresigned, authTypeSigned:
ok, err := auth.DoesPresignedSignatureMatch() if s3Error := isReqAuthenticated(r); s3Error != ErrNone {
if err != nil { writeErrorResponse(w, r, s3Error, r.URL.Path)
errorIf(err.Trace(r.URL.String()), "Presigned signature verification failed.", nil)
writeErrorResponse(w, r, ErrSignatureDoesNotMatch, r.URL.Path)
return
}
if !ok {
writeErrorResponse(w, r, ErrSignatureDoesNotMatch, r.URL.Path)
return
}
case authTypeSigned:
// Verify signature for the incoming body if any.
locationBytes, e := ioutil.ReadAll(r.Body)
if e != nil {
errorIf(probe.NewError(e), "MakeBucket failed.", nil)
writeErrorResponse(w, r, ErrInternalError, r.URL.Path)
return
}
sh := sha256.New()
sh.Write(locationBytes)
ok, err := auth.DoesSignatureMatch(hex.EncodeToString(sh.Sum(nil)))
if err != nil {
errorIf(err.Trace(), "MakeBucket failed.", nil)
writeErrorResponse(w, r, ErrInternalError, r.URL.Path)
return
}
if !ok {
writeErrorResponse(w, r, ErrSignatureDoesNotMatch, r.URL.Path)
return return
} }
} }
@ -539,11 +476,8 @@ func (api storageAPI) PostPolicyBucketHandler(w http.ResponseWriter, r *http.Req
object := formValues["Key"] object := formValues["Key"]
var ok bool var ok bool
// Set http request for signature.
auth := api.Signature.SetHTTPRequestToVerify(r)
// Verify policy signature. // Verify policy signature.
ok, err = auth.DoesPolicySignatureMatch(formValues) ok, err = doesPolicySignatureMatch(formValues)
if err != nil { if err != nil {
errorIf(err.Trace(), "Unable to verify signature.", nil) errorIf(err.Trace(), "Unable to verify signature.", nil)
writeErrorResponse(w, r, ErrSignatureDoesNotMatch, r.URL.Path) writeErrorResponse(w, r, ErrSignatureDoesNotMatch, r.URL.Path)
@ -553,12 +487,12 @@ func (api storageAPI) PostPolicyBucketHandler(w http.ResponseWriter, r *http.Req
writeErrorResponse(w, r, ErrSignatureDoesNotMatch, r.URL.Path) writeErrorResponse(w, r, ErrSignatureDoesNotMatch, r.URL.Path)
return return
} }
if err = signature4.ApplyPolicyCond(formValues); err != nil { if err = checkPostPolicy(formValues); err != nil {
errorIf(err.Trace(), "Invalid request, policy doesn't match with the endpoint.", nil) errorIf(err.Trace(), "Invalid request, policy doesn't match.", nil)
writeErrorResponse(w, r, ErrMalformedPOSTRequest, r.URL.Path) writeErrorResponse(w, r, ErrMalformedPOSTRequest, r.URL.Path)
return return
} }
objectInfo, err := api.Filesystem.CreateObject(bucket, object, "", -1, fileBody, nil) objectInfo, err := api.Filesystem.CreateObject(bucket, object, -1, fileBody, nil)
if err != nil { if err != nil {
errorIf(err.Trace(), "CreateObject failed.", nil) errorIf(err.Trace(), "CreateObject failed.", nil)
switch err.ToGoError().(type) { switch err.ToGoError().(type) {
@ -572,8 +506,6 @@ func (api storageAPI) PostPolicyBucketHandler(w http.ResponseWriter, r *http.Req
writeErrorResponse(w, r, ErrBadDigest, r.URL.Path) writeErrorResponse(w, r, ErrBadDigest, r.URL.Path)
case fs.IncompleteBody: case fs.IncompleteBody:
writeErrorResponse(w, r, ErrIncompleteBody, r.URL.Path) writeErrorResponse(w, r, ErrIncompleteBody, r.URL.Path)
case fs.InvalidDigest:
writeErrorResponse(w, r, ErrInvalidDigest, r.URL.Path)
default: default:
writeErrorResponse(w, r, ErrInternalError, r.URL.Path) writeErrorResponse(w, r, ErrInternalError, r.URL.Path)
} }
@ -601,7 +533,7 @@ func (api storageAPI) HeadBucketHandler(w http.ResponseWriter, r *http.Request)
writeErrorResponse(w, r, ErrAccessDenied, r.URL.Path) writeErrorResponse(w, r, ErrAccessDenied, r.URL.Path)
return return
case authTypePresigned, authTypeSigned: case authTypePresigned, authTypeSigned:
if s3Error := isReqAuthenticated(api.Signature, r); s3Error != ErrNone { if s3Error := isReqAuthenticated(r); s3Error != ErrNone {
writeErrorResponse(w, r, s3Error, r.URL.Path) writeErrorResponse(w, r, s3Error, r.URL.Path)
return return
} }
@ -634,7 +566,7 @@ func (api storageAPI) DeleteBucketHandler(w http.ResponseWriter, r *http.Request
writeErrorResponse(w, r, ErrAccessDenied, r.URL.Path) writeErrorResponse(w, r, ErrAccessDenied, r.URL.Path)
return return
case authTypePresigned, authTypeSigned: case authTypePresigned, authTypeSigned:
if s3Error := isReqAuthenticated(api.Signature, r); s3Error != ErrNone { if s3Error := isReqAuthenticated(r); s3Error != ErrNone {
writeErrorResponse(w, r, s3Error, r.URL.Path) writeErrorResponse(w, r, s3Error, r.URL.Path)
return return
} }

View File

@ -18,8 +18,6 @@ package main
import ( import (
"bytes" "bytes"
"crypto/sha256"
"encoding/hex"
"io" "io"
"io/ioutil" "io/ioutil"
"net/http" "net/http"
@ -134,6 +132,18 @@ func (api storageAPI) PutBucketPolicyHandler(w http.ResponseWriter, r *http.Requ
vars := mux.Vars(r) vars := mux.Vars(r)
bucket := vars["bucket"] bucket := vars["bucket"]
switch getRequestAuthType(r) {
default:
// For all unknown auth types return error.
writeErrorResponse(w, r, ErrAccessDenied, r.URL.Path)
return
case authTypePresigned, authTypeSigned:
if s3Error := isReqAuthenticated(r); s3Error != ErrNone {
writeErrorResponse(w, r, s3Error, r.URL.Path)
return
}
}
// If Content-Length is unknown or zero, deny the // If Content-Length is unknown or zero, deny the
// request. PutBucketPolicy always needs a Content-Length if // request. PutBucketPolicy always needs a Content-Length if
// incoming request is not chunked. // incoming request is not chunked.
@ -173,34 +183,6 @@ func (api storageAPI) PutBucketPolicyHandler(w http.ResponseWriter, r *http.Requ
return return
} }
// Set http request for signature verification.
auth := api.Signature.SetHTTPRequestToVerify(r)
if isRequestPresignedSignatureV4(r) {
ok, err := auth.DoesPresignedSignatureMatch()
if err != nil {
errorIf(err.Trace(r.URL.String()), "Presigned signature verification failed.", nil)
writeErrorResponse(w, r, ErrSignatureDoesNotMatch, r.URL.Path)
return
}
if !ok {
writeErrorResponse(w, r, ErrSignatureDoesNotMatch, r.URL.Path)
return
}
} else if isRequestSignatureV4(r) {
sh := sha256.New()
sh.Write(bucketPolicyBuf)
ok, err := api.Signature.DoesSignatureMatch(hex.EncodeToString(sh.Sum(nil)))
if err != nil {
errorIf(err.Trace(string(bucketPolicyBuf)), "SaveBucketPolicy failed.", nil)
writeErrorResponse(w, r, ErrSignatureDoesNotMatch, r.URL.Path)
return
}
if !ok {
writeErrorResponse(w, r, ErrSignatureDoesNotMatch, r.URL.Path)
return
}
}
// Save bucket policy. // Save bucket policy.
err := writeBucketPolicy(bucket, bucketPolicyBuf) err := writeBucketPolicy(bucket, bucketPolicyBuf)
if err != nil { if err != nil {
@ -224,10 +206,16 @@ func (api storageAPI) DeleteBucketPolicyHandler(w http.ResponseWriter, r *http.R
vars := mux.Vars(r) vars := mux.Vars(r)
bucket := vars["bucket"] bucket := vars["bucket"]
// Validate incoming signature. switch getRequestAuthType(r) {
if s3Error := isReqAuthenticated(api.Signature, r); s3Error != ErrNone { default:
writeErrorResponse(w, r, s3Error, r.URL.Path) // For all unknown auth types return error.
writeErrorResponse(w, r, ErrAccessDenied, r.URL.Path)
return return
case authTypePresigned, authTypeSigned:
if s3Error := isReqAuthenticated(r); s3Error != ErrNone {
writeErrorResponse(w, r, s3Error, r.URL.Path)
return
}
} }
// Delete bucket access policy. // Delete bucket access policy.
@ -255,10 +243,16 @@ func (api storageAPI) GetBucketPolicyHandler(w http.ResponseWriter, r *http.Requ
vars := mux.Vars(r) vars := mux.Vars(r)
bucket := vars["bucket"] bucket := vars["bucket"]
// Validate incoming signature. switch getRequestAuthType(r) {
if s3Error := isReqAuthenticated(api.Signature, r); s3Error != ErrNone { default:
writeErrorResponse(w, r, s3Error, r.URL.Path) // For all unknown auth types return error.
writeErrorResponse(w, r, ErrAccessDenied, r.URL.Path)
return return
case authTypePresigned, authTypeSigned:
if s3Error := isReqAuthenticated(r); s3Error != ErrNone {
writeErrorResponse(w, r, s3Error, r.URL.Path)
return
}
} }
// Read bucket access policy. // Read bucket access policy.

View File

@ -28,11 +28,6 @@ import (
"github.com/rs/cors" "github.com/rs/cors"
) )
const (
iso8601Format = "20060102T150405Z"
privateBucket = "/minio"
)
// HandlerFunc - useful to chain different middleware http.Handler // HandlerFunc - useful to chain different middleware http.Handler
type HandlerFunc func(http.Handler) http.Handler type HandlerFunc func(http.Handler) http.Handler
@ -51,6 +46,11 @@ type redirectHandler struct {
locationPrefix string locationPrefix string
} }
// Private bucket.
const (
privateBucket = "/minio"
)
func setBrowserRedirectHandler(h http.Handler) http.Handler { func setBrowserRedirectHandler(h http.Handler) http.Handler {
return redirectHandler{handler: h, locationPrefix: privateBucket} return redirectHandler{handler: h, locationPrefix: privateBucket}
} }

View File

@ -17,7 +17,10 @@
package main package main
import ( import (
"crypto/sha256"
"encoding/hex"
"io" "io"
"io/ioutil"
"net/http" "net/http"
"net/url" "net/url"
"strconv" "strconv"
@ -73,7 +76,7 @@ func (api storageAPI) GetObjectHandler(w http.ResponseWriter, r *http.Request) {
return return
} }
case authTypePresigned, authTypeSigned: case authTypePresigned, authTypeSigned:
if s3Error := isReqAuthenticated(api.Signature, r); s3Error != ErrNone { if s3Error := isReqAuthenticated(r); s3Error != ErrNone {
writeErrorResponse(w, r, s3Error, r.URL.Path) writeErrorResponse(w, r, s3Error, r.URL.Path)
return return
} }
@ -230,9 +233,16 @@ func (api storageAPI) HeadObjectHandler(w http.ResponseWriter, r *http.Request)
bucket = vars["bucket"] bucket = vars["bucket"]
object = vars["object"] object = vars["object"]
if s3Error := isReqAuthenticated(api.Signature, r); s3Error != ErrNone { switch getRequestAuthType(r) {
writeErrorResponse(w, r, s3Error, r.URL.Path) default:
// For all unknown auth types return error.
writeErrorResponse(w, r, ErrAccessDenied, r.URL.Path)
return return
case authTypePresigned, authTypeSigned:
if s3Error := isReqAuthenticated(r); s3Error != ErrNone {
writeErrorResponse(w, r, s3Error, r.URL.Path)
return
}
} }
objectInfo, err := api.Filesystem.GetObjectInfo(bucket, object) objectInfo, err := api.Filesystem.GetObjectInfo(bucket, object)
@ -292,7 +302,7 @@ func (api storageAPI) CopyObjectHandler(w http.ResponseWriter, r *http.Request)
return return
} }
case authTypePresigned, authTypeSigned: case authTypePresigned, authTypeSigned:
if s3Error := isReqAuthenticated(api.Signature, r); s3Error != ErrNone { if s3Error := isReqAuthenticated(r); s3Error != ErrNone {
writeErrorResponse(w, r, s3Error, r.URL.Path) writeErrorResponse(w, r, s3Error, r.URL.Path)
return return
} }
@ -366,6 +376,17 @@ func (api storageAPI) CopyObjectHandler(w http.ResponseWriter, r *http.Request)
return return
} }
var md5Bytes []byte
if objectInfo.MD5Sum != "" {
var e error
md5Bytes, e = hex.DecodeString(objectInfo.MD5Sum)
if e != nil {
errorIf(probe.NewError(e), "Decoding md5 failed.", nil)
writeErrorResponse(w, r, ErrInvalidDigest, r.URL.Path)
return
}
}
// Initialize a pipe for data pipe line. // Initialize a pipe for data pipe line.
reader, writer := io.Pipe() reader, writer := io.Pipe()
@ -378,13 +399,11 @@ func (api storageAPI) CopyObjectHandler(w http.ResponseWriter, r *http.Request)
} }
}() }()
// Verify md5sum.
expectedMD5Sum := objectInfo.MD5Sum
// Size of object. // Size of object.
size := objectInfo.Size size := objectInfo.Size
// Create the object. // Create the object.
objectInfo, err = api.Filesystem.CreateObject(bucket, object, expectedMD5Sum, size, reader, nil) objectInfo, err = api.Filesystem.CreateObject(bucket, object, size, reader, md5Bytes)
if err != nil { if err != nil {
errorIf(err.Trace(), "CreateObject failed.", nil) errorIf(err.Trace(), "CreateObject failed.", nil)
switch err.ToGoError().(type) { switch err.ToGoError().(type) {
@ -398,8 +417,6 @@ func (api storageAPI) CopyObjectHandler(w http.ResponseWriter, r *http.Request)
writeErrorResponse(w, r, ErrBadDigest, r.URL.Path) writeErrorResponse(w, r, ErrBadDigest, r.URL.Path)
case fs.IncompleteBody: case fs.IncompleteBody:
writeErrorResponse(w, r, ErrIncompleteBody, r.URL.Path) writeErrorResponse(w, r, ErrIncompleteBody, r.URL.Path)
case fs.InvalidDigest:
writeErrorResponse(w, r, ErrInvalidDigest, r.URL.Path)
case fs.ObjectExistsAsPrefix: case fs.ObjectExistsAsPrefix:
writeErrorResponse(w, r, ErrObjectExistsAsPrefix, r.URL.Path) writeErrorResponse(w, r, ErrObjectExistsAsPrefix, r.URL.Path)
default: default:
@ -522,8 +539,9 @@ func (api storageAPI) PutObjectHandler(w http.ResponseWriter, r *http.Request) {
object := vars["object"] object := vars["object"]
// Get Content-Md5 sent by client and verify if valid // Get Content-Md5 sent by client and verify if valid
md5 := r.Header.Get("Content-Md5") md5Bytes, err := checkValidMD5(r.Header.Get("Content-Md5"))
if !isValidMD5(md5) { if err != nil {
errorIf(err.Trace(r.Header.Get("Content-Md5")), "Decoding md5 failed.", nil)
writeErrorResponse(w, r, ErrInvalidDigest, r.URL.Path) writeErrorResponse(w, r, ErrInvalidDigest, r.URL.Path)
return return
} }
@ -539,10 +557,7 @@ func (api storageAPI) PutObjectHandler(w http.ResponseWriter, r *http.Request) {
return return
} }
// Set http request for signature.
auth := api.Signature.SetHTTPRequestToVerify(r)
var objectInfo fs.ObjectInfo var objectInfo fs.ObjectInfo
var err *probe.Error
switch getRequestAuthType(r) { switch getRequestAuthType(r) {
default: default:
// For all unknown auth types return error. // For all unknown auth types return error.
@ -555,11 +570,11 @@ func (api storageAPI) PutObjectHandler(w http.ResponseWriter, r *http.Request) {
return return
} }
// Create anonymous object. // Create anonymous object.
objectInfo, err = api.Filesystem.CreateObject(bucket, object, md5, size, r.Body, nil) objectInfo, err = api.Filesystem.CreateObject(bucket, object, size, r.Body, nil)
case authTypePresigned: case authTypePresigned:
// For presigned requests verify them right here. // For presigned requests verify them right here.
var ok bool var ok bool
ok, err = auth.DoesPresignedSignatureMatch() ok, err = doesPresignedSignatureMatch(r)
if err != nil { if err != nil {
errorIf(err.Trace(r.URL.String()), "Presigned signature verification failed.", nil) errorIf(err.Trace(r.URL.String()), "Presigned signature verification failed.", nil)
writeErrorResponse(w, r, ErrSignatureDoesNotMatch, r.URL.Path) writeErrorResponse(w, r, ErrSignatureDoesNotMatch, r.URL.Path)
@ -570,14 +585,46 @@ func (api storageAPI) PutObjectHandler(w http.ResponseWriter, r *http.Request) {
return return
} }
// Create presigned object. // Create presigned object.
objectInfo, err = api.Filesystem.CreateObject(bucket, object, md5, size, r.Body, nil) objectInfo, err = api.Filesystem.CreateObject(bucket, object, size, r.Body, nil)
case authTypeSigned: case authTypeSigned:
// Initialize a pipe for data pipe line.
reader, writer := io.Pipe()
// Start writing in a routine.
go func() {
shaWriter := sha256.New()
multiWriter := io.MultiWriter(shaWriter, writer)
if _, e := io.CopyN(multiWriter, r.Body, size); e != nil {
errorIf(probe.NewError(e), "Unable to read HTTP body.", nil)
writer.CloseWithError(e)
return
}
shaPayload := shaWriter.Sum(nil)
ok, serr := doesSignatureMatch(hex.EncodeToString(shaPayload), r)
if serr != nil {
errorIf(serr.Trace(), "Signature verification failed.", nil)
writer.CloseWithError(probe.WrapError(serr))
return
}
if !ok {
writer.CloseWithError(errSignatureMismatch)
return
}
writer.Close()
}()
// Create object. // Create object.
objectInfo, err = api.Filesystem.CreateObject(bucket, object, md5, size, r.Body, &auth) objectInfo, err = api.Filesystem.CreateObject(bucket, object, size, reader, md5Bytes)
} }
if err != nil { if err != nil {
errorIf(err.Trace(), "CreateObject failed.", nil) errorIf(err.Trace(), "CreateObject failed.", nil)
switch err.ToGoError().(type) { e := err.ToGoError()
// Verify if the underlying error is signature mismatch.
if e == errSignatureMismatch {
writeErrorResponse(w, r, ErrSignatureDoesNotMatch, r.URL.Path)
return
}
switch e.(type) {
case fs.RootPathFull: case fs.RootPathFull:
writeErrorResponse(w, r, ErrRootPathFull, r.URL.Path) writeErrorResponse(w, r, ErrRootPathFull, r.URL.Path)
case fs.BucketNotFound: case fs.BucketNotFound:
@ -586,12 +633,8 @@ func (api storageAPI) PutObjectHandler(w http.ResponseWriter, r *http.Request) {
writeErrorResponse(w, r, ErrInvalidBucketName, r.URL.Path) writeErrorResponse(w, r, ErrInvalidBucketName, r.URL.Path)
case fs.BadDigest: case fs.BadDigest:
writeErrorResponse(w, r, ErrBadDigest, r.URL.Path) writeErrorResponse(w, r, ErrBadDigest, r.URL.Path)
case fs.SignDoesNotMatch:
writeErrorResponse(w, r, ErrSignatureDoesNotMatch, r.URL.Path)
case fs.IncompleteBody: case fs.IncompleteBody:
writeErrorResponse(w, r, ErrIncompleteBody, r.URL.Path) writeErrorResponse(w, r, ErrIncompleteBody, r.URL.Path)
case fs.InvalidDigest:
writeErrorResponse(w, r, ErrInvalidDigest, r.URL.Path)
case fs.ObjectExistsAsPrefix: case fs.ObjectExistsAsPrefix:
writeErrorResponse(w, r, ErrObjectExistsAsPrefix, r.URL.Path) writeErrorResponse(w, r, ErrObjectExistsAsPrefix, r.URL.Path)
default: default:
@ -615,14 +658,18 @@ func (api storageAPI) NewMultipartUploadHandler(w http.ResponseWriter, r *http.R
object = vars["object"] object = vars["object"]
switch getRequestAuthType(r) { switch getRequestAuthType(r) {
default:
// For all unknown auth types return error.
writeErrorResponse(w, r, ErrAccessDenied, r.URL.Path)
return
case authTypeAnonymous: case authTypeAnonymous:
// http://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html // http://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html
if s3Error := enforceBucketPolicy("s3:PutObject", bucket, r.URL); s3Error != ErrNone { if s3Error := enforceBucketPolicy("s3:PutObject", bucket, r.URL); s3Error != ErrNone {
writeErrorResponse(w, r, s3Error, r.URL.Path) writeErrorResponse(w, r, s3Error, r.URL.Path)
return return
} }
default: case authTypePresigned, authTypeSigned:
if s3Error := isReqAuthenticated(api.Signature, r); s3Error != ErrNone { if s3Error := isReqAuthenticated(r); s3Error != ErrNone {
writeErrorResponse(w, r, s3Error, r.URL.Path) writeErrorResponse(w, r, s3Error, r.URL.Path)
return return
} }
@ -663,8 +710,9 @@ func (api storageAPI) PutObjectPartHandler(w http.ResponseWriter, r *http.Reques
object := vars["object"] object := vars["object"]
// get Content-Md5 sent by client and verify if valid // get Content-Md5 sent by client and verify if valid
md5 := r.Header.Get("Content-Md5") md5Bytes, err := checkValidMD5(r.Header.Get("Content-Md5"))
if !isValidMD5(md5) { if err != nil {
errorIf(err.Trace(r.Header.Get("Content-Md5")), "Decoding md5 failed.", nil)
writeErrorResponse(w, r, ErrInvalidDigest, r.URL.Path) writeErrorResponse(w, r, ErrInvalidDigest, r.URL.Path)
return return
} }
@ -691,11 +739,12 @@ func (api storageAPI) PutObjectPartHandler(w http.ResponseWriter, r *http.Reques
return return
} }
// Set http request for signature.
auth := api.Signature.SetHTTPRequestToVerify(r)
var partMD5 string var partMD5 string
var err *probe.Error
switch getRequestAuthType(r) { switch getRequestAuthType(r) {
default:
// For all unknown auth types return error.
writeErrorResponse(w, r, ErrAccessDenied, r.URL.Path)
return
case authTypeAnonymous: case authTypeAnonymous:
// http://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html // http://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html
if s3Error := enforceBucketPolicy("s3:PutObject", bucket, r.URL); s3Error != ErrNone { if s3Error := enforceBucketPolicy("s3:PutObject", bucket, r.URL); s3Error != ErrNone {
@ -704,11 +753,11 @@ func (api storageAPI) PutObjectPartHandler(w http.ResponseWriter, r *http.Reques
} }
// No need to verify signature, anonymous request access is // No need to verify signature, anonymous request access is
// already allowed. // already allowed.
partMD5, err = api.Filesystem.CreateObjectPart(bucket, object, uploadID, md5, partID, size, r.Body, nil) partMD5, err = api.Filesystem.CreateObjectPart(bucket, object, uploadID, partID, size, r.Body, nil)
case authTypePresigned: case authTypePresigned:
// For presigned requests verify right here. // For presigned requests verify right here.
var ok bool var ok bool
ok, err = auth.DoesPresignedSignatureMatch() ok, err = doesPresignedSignatureMatch(r)
if err != nil { if err != nil {
errorIf(err.Trace(r.URL.String()), "Presigned signature verification failed.", nil) errorIf(err.Trace(r.URL.String()), "Presigned signature verification failed.", nil)
writeErrorResponse(w, r, ErrSignatureDoesNotMatch, r.URL.Path) writeErrorResponse(w, r, ErrSignatureDoesNotMatch, r.URL.Path)
@ -718,25 +767,52 @@ func (api storageAPI) PutObjectPartHandler(w http.ResponseWriter, r *http.Reques
writeErrorResponse(w, r, ErrSignatureDoesNotMatch, r.URL.Path) writeErrorResponse(w, r, ErrSignatureDoesNotMatch, r.URL.Path)
return return
} }
partMD5, err = api.Filesystem.CreateObjectPart(bucket, object, uploadID, md5, partID, size, r.Body, nil) partMD5, err = api.Filesystem.CreateObjectPart(bucket, object, uploadID, partID, size, r.Body, nil)
default: case authTypeSigned:
partMD5, err = api.Filesystem.CreateObjectPart(bucket, object, uploadID, md5, partID, size, r.Body, &auth) // Initialize a pipe for data pipe line.
reader, writer := io.Pipe()
// Start writing in a routine.
go func() {
shaWriter := sha256.New()
multiWriter := io.MultiWriter(shaWriter, writer)
if _, e := io.CopyN(multiWriter, r.Body, size); e != nil {
errorIf(probe.NewError(e), "Unable to read HTTP body.", nil)
writer.CloseWithError(e)
return
}
shaPayload := shaWriter.Sum(nil)
ok, serr := doesSignatureMatch(hex.EncodeToString(shaPayload), r)
if serr != nil {
errorIf(serr.Trace(), "Signature verification failed.", nil)
writer.CloseWithError(probe.WrapError(serr))
return
}
if !ok {
writer.CloseWithError(errSignatureMismatch)
return
}
writer.Close()
}()
partMD5, err = api.Filesystem.CreateObjectPart(bucket, object, uploadID, partID, size, reader, md5Bytes)
} }
if err != nil { if err != nil {
errorIf(err.Trace(), "CreateObjectPart failed.", nil) errorIf(err.Trace(), "CreateObjectPart failed.", nil)
switch err.ToGoError().(type) { e := err.ToGoError()
// Verify if the underlying error is signature mismatch.
if e == errSignatureMismatch {
writeErrorResponse(w, r, ErrSignatureDoesNotMatch, r.URL.Path)
return
}
switch e.(type) {
case fs.RootPathFull: case fs.RootPathFull:
writeErrorResponse(w, r, ErrRootPathFull, r.URL.Path) writeErrorResponse(w, r, ErrRootPathFull, r.URL.Path)
case fs.InvalidUploadID: case fs.InvalidUploadID:
writeErrorResponse(w, r, ErrNoSuchUpload, r.URL.Path) writeErrorResponse(w, r, ErrNoSuchUpload, r.URL.Path)
case fs.BadDigest: case fs.BadDigest:
writeErrorResponse(w, r, ErrBadDigest, r.URL.Path) writeErrorResponse(w, r, ErrBadDigest, r.URL.Path)
case fs.SignDoesNotMatch:
writeErrorResponse(w, r, ErrSignatureDoesNotMatch, r.URL.Path)
case fs.IncompleteBody: case fs.IncompleteBody:
writeErrorResponse(w, r, ErrIncompleteBody, r.URL.Path) writeErrorResponse(w, r, ErrIncompleteBody, r.URL.Path)
case fs.InvalidDigest:
writeErrorResponse(w, r, ErrInvalidDigest, r.URL.Path)
default: default:
writeErrorResponse(w, r, ErrInternalError, r.URL.Path) writeErrorResponse(w, r, ErrInternalError, r.URL.Path)
} }
@ -755,14 +831,18 @@ func (api storageAPI) AbortMultipartUploadHandler(w http.ResponseWriter, r *http
object := vars["object"] object := vars["object"]
switch getRequestAuthType(r) { switch getRequestAuthType(r) {
default:
// For all unknown auth types return error.
writeErrorResponse(w, r, ErrAccessDenied, r.URL.Path)
return
case authTypeAnonymous: case authTypeAnonymous:
// http://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html // http://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html
if s3Error := enforceBucketPolicy("s3:AbortMultipartUpload", bucket, r.URL); s3Error != ErrNone { if s3Error := enforceBucketPolicy("s3:AbortMultipartUpload", bucket, r.URL); s3Error != ErrNone {
writeErrorResponse(w, r, s3Error, r.URL.Path) writeErrorResponse(w, r, s3Error, r.URL.Path)
return return
} }
default: case authTypePresigned, authTypeSigned:
if s3Error := isReqAuthenticated(api.Signature, r); s3Error != ErrNone { if s3Error := isReqAuthenticated(r); s3Error != ErrNone {
writeErrorResponse(w, r, s3Error, r.URL.Path) writeErrorResponse(w, r, s3Error, r.URL.Path)
return return
} }
@ -798,14 +878,18 @@ func (api storageAPI) ListObjectPartsHandler(w http.ResponseWriter, r *http.Requ
object := vars["object"] object := vars["object"]
switch getRequestAuthType(r) { switch getRequestAuthType(r) {
default:
// For all unknown auth types return error.
writeErrorResponse(w, r, ErrAccessDenied, r.URL.Path)
return
case authTypeAnonymous: case authTypeAnonymous:
// http://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html // http://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html
if s3Error := enforceBucketPolicy("s3:ListMultipartUploadParts", bucket, r.URL); s3Error != ErrNone { if s3Error := enforceBucketPolicy("s3:ListMultipartUploadParts", bucket, r.URL); s3Error != ErrNone {
writeErrorResponse(w, r, s3Error, r.URL.Path) writeErrorResponse(w, r, s3Error, r.URL.Path)
return return
} }
default: case authTypePresigned, authTypeSigned:
if s3Error := isReqAuthenticated(api.Signature, r); s3Error != ErrNone { if s3Error := isReqAuthenticated(r); s3Error != ErrNone {
writeErrorResponse(w, r, s3Error, r.URL.Path) writeErrorResponse(w, r, s3Error, r.URL.Path)
return return
} }
@ -860,9 +944,6 @@ func (api storageAPI) CompleteMultipartUploadHandler(w http.ResponseWriter, r *h
// Extract object resources. // Extract object resources.
objectResourcesMetadata := getObjectResources(r.URL.Query()) objectResourcesMetadata := getObjectResources(r.URL.Query())
// Set http request for signature.
auth := api.Signature.SetHTTPRequestToVerify(r)
var objectInfo fs.ObjectInfo var objectInfo fs.ObjectInfo
var err *probe.Error var err *probe.Error
switch getRequestAuthType(r) { switch getRequestAuthType(r) {
@ -876,26 +957,27 @@ func (api storageAPI) CompleteMultipartUploadHandler(w http.ResponseWriter, r *h
writeErrorResponse(w, r, s3Error, r.URL.Path) writeErrorResponse(w, r, s3Error, r.URL.Path)
return return
} }
// Complete multipart upload anonymous. completePartBytes, e := ioutil.ReadAll(r.Body)
objectInfo, err = api.Filesystem.CompleteMultipartUpload(bucket, object, objectResourcesMetadata.UploadID, r.Body, nil) if e != nil {
case authTypePresigned: errorIf(probe.NewError(e), "CompleteMultipartUpload failed.", nil)
// For presigned requests verify right here. writeErrorResponse(w, r, ErrInternalError, r.URL.Path)
var ok bool
ok, err = auth.DoesPresignedSignatureMatch()
if err != nil {
errorIf(err.Trace(r.URL.String()), "Presigned signature verification failed.", nil)
writeErrorResponse(w, r, ErrSignatureDoesNotMatch, r.URL.Path)
return return
} }
if !ok { // Complete multipart upload anonymous.
writeErrorResponse(w, r, ErrSignatureDoesNotMatch, r.URL.Path) objectInfo, err = api.Filesystem.CompleteMultipartUpload(bucket, object, objectResourcesMetadata.UploadID, completePartBytes)
case authTypePresigned, authTypeSigned:
if s3Error := isReqAuthenticated(r); s3Error != ErrNone {
writeErrorResponse(w, r, s3Error, r.URL.Path)
return
}
completePartBytes, e := ioutil.ReadAll(r.Body)
if e != nil {
errorIf(probe.NewError(e), "CompleteMultipartUpload failed.", nil)
writeErrorResponse(w, r, ErrInternalError, r.URL.Path)
return return
} }
// Complete multipart upload presigned. // Complete multipart upload presigned.
objectInfo, err = api.Filesystem.CompleteMultipartUpload(bucket, object, objectResourcesMetadata.UploadID, r.Body, nil) objectInfo, err = api.Filesystem.CompleteMultipartUpload(bucket, object, objectResourcesMetadata.UploadID, completePartBytes)
case authTypeSigned:
// Complete multipart upload.
objectInfo, err = api.Filesystem.CompleteMultipartUpload(bucket, object, objectResourcesMetadata.UploadID, r.Body, &auth)
} }
if err != nil { if err != nil {
errorIf(err.Trace(), "CompleteMultipartUpload failed.", nil) errorIf(err.Trace(), "CompleteMultipartUpload failed.", nil)
@ -914,8 +996,6 @@ func (api storageAPI) CompleteMultipartUploadHandler(w http.ResponseWriter, r *h
writeErrorResponse(w, r, ErrInvalidPart, r.URL.Path) writeErrorResponse(w, r, ErrInvalidPart, r.URL.Path)
case fs.InvalidPartOrder: case fs.InvalidPartOrder:
writeErrorResponse(w, r, ErrInvalidPartOrder, r.URL.Path) writeErrorResponse(w, r, ErrInvalidPartOrder, r.URL.Path)
case fs.SignDoesNotMatch:
writeErrorResponse(w, r, ErrSignatureDoesNotMatch, r.URL.Path)
case fs.IncompleteBody: case fs.IncompleteBody:
writeErrorResponse(w, r, ErrIncompleteBody, r.URL.Path) writeErrorResponse(w, r, ErrIncompleteBody, r.URL.Path)
case fs.MalformedXML: case fs.MalformedXML:
@ -925,12 +1005,12 @@ func (api storageAPI) CompleteMultipartUploadHandler(w http.ResponseWriter, r *h
} }
return return
} }
// get object location. // Get object location.
location := getLocation(r) location := getLocation(r)
// Generate complete multipart response. // Generate complete multipart response.
response := generateCompleteMultpartUploadResponse(bucket, object, location, objectInfo.MD5Sum) response := generateCompleteMultpartUploadResponse(bucket, object, location, objectInfo.MD5Sum)
encodedSuccessResponse := encodeResponse(response) encodedSuccessResponse := encodeResponse(response)
// write headers // Write headers.
setCommonHeaders(w) setCommonHeaders(w)
// write success response. // write success response.
writeSuccessResponse(w, encodedSuccessResponse) writeSuccessResponse(w, encodedSuccessResponse)
@ -956,7 +1036,7 @@ func (api storageAPI) DeleteObjectHandler(w http.ResponseWriter, r *http.Request
return return
} }
case authTypeSigned, authTypePresigned: case authTypeSigned, authTypePresigned:
if s3Error := isReqAuthenticated(api.Signature, r); s3Error != ErrNone { if s3Error := isReqAuthenticated(r); s3Error != ErrNone {
writeErrorResponse(w, r, s3Error, r.URL.Path) writeErrorResponse(w, r, s3Error, r.URL.Path)
return return
} }
@ -976,6 +1056,7 @@ func (api storageAPI) DeleteObjectHandler(w http.ResponseWriter, r *http.Request
default: default:
writeErrorResponse(w, r, ErrInternalError, r.URL.Path) writeErrorResponse(w, r, ErrInternalError, r.URL.Path)
} }
return
} }
writeSuccessNoContent(w) writeSuccessNoContent(w)
} }

View File

@ -19,7 +19,6 @@ package fs
import ( import (
"bytes" "bytes"
"crypto/md5" "crypto/md5"
"encoding/base64"
"encoding/hex" "encoding/hex"
"encoding/xml" "encoding/xml"
"math/rand" "math/rand"
@ -61,7 +60,7 @@ func testMultipartObjectCreation(c *check.C, create func() Filesystem) {
c.Assert(err, check.IsNil) c.Assert(err, check.IsNil)
completedParts := CompleteMultipartUpload{} completedParts := CompleteMultipartUpload{}
completedParts.Part = make([]CompletePart, 0) //completedParts.Part = make([]CompletePart, 10)
for i := 1; i <= 10; i++ { for i := 1; i <= 10; i++ {
randomPerm := rand.Perm(10) randomPerm := rand.Perm(10)
randomString := "" randomString := ""
@ -71,19 +70,17 @@ func testMultipartObjectCreation(c *check.C, create func() Filesystem) {
hasher := md5.New() hasher := md5.New()
hasher.Write([]byte(randomString)) hasher.Write([]byte(randomString))
expectedmd5Sum := base64.StdEncoding.EncodeToString(hasher.Sum(nil)) expectedMD5Sumhex := hex.EncodeToString(hasher.Sum(nil))
expectedmd5Sumhex := hex.EncodeToString(hasher.Sum(nil))
var calculatedmd5sum string var calculatedMD5sum string
calculatedmd5sum, err = fs.CreateObjectPart("bucket", "key", uploadID, expectedmd5Sum, i, int64(len(randomString)), calculatedMD5sum, err = fs.CreateObjectPart("bucket", "key", uploadID, i, int64(len(randomString)), bytes.NewBufferString(randomString), hasher.Sum(nil))
bytes.NewBufferString(randomString), nil)
c.Assert(err, check.IsNil) c.Assert(err, check.IsNil)
c.Assert(calculatedmd5sum, check.Equals, expectedmd5Sumhex) c.Assert(calculatedMD5sum, check.Equals, expectedMD5Sumhex)
completedParts.Part = append(completedParts.Part, CompletePart{PartNumber: i, ETag: calculatedmd5sum}) completedParts.Part = append(completedParts.Part, CompletePart{PartNumber: i, ETag: calculatedMD5sum})
} }
completedPartsBytes, e := xml.Marshal(completedParts) completedPartsBytes, e := xml.Marshal(completedParts)
c.Assert(e, check.IsNil) c.Assert(e, check.IsNil)
objectInfo, err := fs.CompleteMultipartUpload("bucket", "key", uploadID, bytes.NewReader(completedPartsBytes), nil) objectInfo, err := fs.CompleteMultipartUpload("bucket", "key", uploadID, completedPartsBytes)
c.Assert(err, check.IsNil) c.Assert(err, check.IsNil)
c.Assert(objectInfo.MD5Sum, check.Equals, "9b7d6f13ba00e24d0b02de92e814891b-10") c.Assert(objectInfo.MD5Sum, check.Equals, "9b7d6f13ba00e24d0b02de92e814891b-10")
} }
@ -105,15 +102,13 @@ func testMultipartObjectAbort(c *check.C, create func() Filesystem) {
hasher := md5.New() hasher := md5.New()
hasher.Write([]byte(randomString)) hasher.Write([]byte(randomString))
expectedmd5Sum := base64.StdEncoding.EncodeToString(hasher.Sum(nil)) expectedMD5Sumhex := hex.EncodeToString(hasher.Sum(nil))
expectedmd5Sumhex := hex.EncodeToString(hasher.Sum(nil))
var calculatedmd5sum string var calculatedMD5sum string
calculatedmd5sum, err = fs.CreateObjectPart("bucket", "key", uploadID, expectedmd5Sum, i, int64(len(randomString)), calculatedMD5sum, err = fs.CreateObjectPart("bucket", "key", uploadID, i, int64(len(randomString)), bytes.NewBufferString(randomString), hasher.Sum(nil))
bytes.NewBufferString(randomString), nil)
c.Assert(err, check.IsNil) c.Assert(err, check.IsNil)
c.Assert(calculatedmd5sum, check.Equals, expectedmd5Sumhex) c.Assert(calculatedMD5sum, check.Equals, expectedMD5Sumhex)
parts[i] = calculatedmd5sum parts[i] = expectedMD5Sumhex
} }
err = fs.AbortMultipartUpload("bucket", "key", uploadID) err = fs.AbortMultipartUpload("bucket", "key", uploadID)
c.Assert(err, check.IsNil) c.Assert(err, check.IsNil)
@ -133,14 +128,14 @@ func testMultipleObjectCreation(c *check.C, create func() Filesystem) {
hasher := md5.New() hasher := md5.New()
hasher.Write([]byte(randomString)) hasher.Write([]byte(randomString))
expectedmd5Sum := base64.StdEncoding.EncodeToString(hasher.Sum(nil)) expectedMD5Sumhex := hex.EncodeToString(hasher.Sum(nil))
expectedmd5Sumhex := hex.EncodeToString(hasher.Sum(nil))
key := "obj" + strconv.Itoa(i) key := "obj" + strconv.Itoa(i)
objects[key] = []byte(randomString) objects[key] = []byte(randomString)
objectInfo, err := fs.CreateObject("bucket", key, expectedmd5Sum, int64(len(randomString)), bytes.NewBufferString(randomString), nil) var objectInfo ObjectInfo
objectInfo, err = fs.CreateObject("bucket", key, int64(len(randomString)), bytes.NewBufferString(randomString), hasher.Sum(nil))
c.Assert(err, check.IsNil) c.Assert(err, check.IsNil)
c.Assert(objectInfo.MD5Sum, check.Equals, expectedmd5Sumhex) c.Assert(objectInfo.MD5Sum, check.Equals, expectedMD5Sumhex)
} }
for key, value := range objects { for key, value := range objects {
@ -165,7 +160,7 @@ func testPaging(c *check.C, create func() Filesystem) {
// check before paging occurs // check before paging occurs
for i := 0; i < 5; i++ { for i := 0; i < 5; i++ {
key := "obj" + strconv.Itoa(i) key := "obj" + strconv.Itoa(i)
_, err = fs.CreateObject("bucket", key, "", int64(len(key)), bytes.NewBufferString(key), nil) _, err = fs.CreateObject("bucket", key, int64(len(key)), bytes.NewBufferString(key), nil)
c.Assert(err, check.IsNil) c.Assert(err, check.IsNil)
result, err = fs.ListObjects("bucket", "", "", "", 5) result, err = fs.ListObjects("bucket", "", "", "", 5)
c.Assert(err, check.IsNil) c.Assert(err, check.IsNil)
@ -175,7 +170,7 @@ func testPaging(c *check.C, create func() Filesystem) {
// check after paging occurs pages work // check after paging occurs pages work
for i := 6; i <= 10; i++ { for i := 6; i <= 10; i++ {
key := "obj" + strconv.Itoa(i) key := "obj" + strconv.Itoa(i)
_, err = fs.CreateObject("bucket", key, "", int64(len(key)), bytes.NewBufferString(key), nil) _, err = fs.CreateObject("bucket", key, int64(len(key)), bytes.NewBufferString(key), nil)
c.Assert(err, check.IsNil) c.Assert(err, check.IsNil)
result, err = fs.ListObjects("bucket", "obj", "", "", 5) result, err = fs.ListObjects("bucket", "obj", "", "", 5)
c.Assert(err, check.IsNil) c.Assert(err, check.IsNil)
@ -184,9 +179,9 @@ func testPaging(c *check.C, create func() Filesystem) {
} }
// check paging with prefix at end returns less objects // check paging with prefix at end returns less objects
{ {
_, err = fs.CreateObject("bucket", "newPrefix", "", int64(len("prefix1")), bytes.NewBufferString("prefix1"), nil) _, err = fs.CreateObject("bucket", "newPrefix", int64(len("prefix1")), bytes.NewBufferString("prefix1"), nil)
c.Assert(err, check.IsNil) c.Assert(err, check.IsNil)
_, err = fs.CreateObject("bucket", "newPrefix2", "", int64(len("prefix2")), bytes.NewBufferString("prefix2"), nil) _, err = fs.CreateObject("bucket", "newPrefix2", int64(len("prefix2")), bytes.NewBufferString("prefix2"), nil)
c.Assert(err, check.IsNil) c.Assert(err, check.IsNil)
result, err = fs.ListObjects("bucket", "new", "", "", 5) result, err = fs.ListObjects("bucket", "new", "", "", 5)
c.Assert(err, check.IsNil) c.Assert(err, check.IsNil)
@ -206,9 +201,9 @@ func testPaging(c *check.C, create func() Filesystem) {
// check delimited results with delimiter and prefix // check delimited results with delimiter and prefix
{ {
_, err = fs.CreateObject("bucket", "this/is/delimited", "", int64(len("prefix1")), bytes.NewBufferString("prefix1"), nil) _, err = fs.CreateObject("bucket", "this/is/delimited", int64(len("prefix1")), bytes.NewBufferString("prefix1"), nil)
c.Assert(err, check.IsNil) c.Assert(err, check.IsNil)
_, err = fs.CreateObject("bucket", "this/is/also/a/delimited/file", "", int64(len("prefix2")), bytes.NewBufferString("prefix2"), nil) _, err = fs.CreateObject("bucket", "this/is/also/a/delimited/file", int64(len("prefix2")), bytes.NewBufferString("prefix2"), nil)
c.Assert(err, check.IsNil) c.Assert(err, check.IsNil)
result, err = fs.ListObjects("bucket", "this/is/", "", "/", 10) result, err = fs.ListObjects("bucket", "this/is/", "", "/", 10)
c.Assert(err, check.IsNil) c.Assert(err, check.IsNil)
@ -260,18 +255,11 @@ func testObjectOverwriteWorks(c *check.C, create func() Filesystem) {
err := fs.MakeBucket("bucket") err := fs.MakeBucket("bucket")
c.Assert(err, check.IsNil) c.Assert(err, check.IsNil)
hasher1 := md5.New() _, err = fs.CreateObject("bucket", "object", int64(len("one")), bytes.NewBufferString("one"), nil)
hasher1.Write([]byte("one"))
md5Sum1 := base64.StdEncoding.EncodeToString(hasher1.Sum(nil))
md5Sum1hex := hex.EncodeToString(hasher1.Sum(nil))
objectInfo, err := fs.CreateObject("bucket", "object", md5Sum1, int64(len("one")), bytes.NewBufferString("one"), nil)
c.Assert(err, check.IsNil) c.Assert(err, check.IsNil)
c.Assert(md5Sum1hex, check.Equals, objectInfo.MD5Sum) // c.Assert(md5Sum1hex, check.Equals, objectInfo.MD5Sum)
hasher2 := md5.New() _, err = fs.CreateObject("bucket", "object", int64(len("three")), bytes.NewBufferString("three"), nil)
hasher2.Write([]byte("three"))
md5Sum2 := base64.StdEncoding.EncodeToString(hasher2.Sum(nil))
_, err = fs.CreateObject("bucket", "object", md5Sum2, int64(len("three")), bytes.NewBufferString("three"), nil)
c.Assert(err, check.IsNil) c.Assert(err, check.IsNil)
var bytesBuffer bytes.Buffer var bytesBuffer bytes.Buffer
@ -283,7 +271,7 @@ func testObjectOverwriteWorks(c *check.C, create func() Filesystem) {
func testNonExistantBucketOperations(c *check.C, create func() Filesystem) { func testNonExistantBucketOperations(c *check.C, create func() Filesystem) {
fs := create() fs := create()
_, err := fs.CreateObject("bucket", "object", "", int64(len("one")), bytes.NewBufferString("one"), nil) _, err := fs.CreateObject("bucket", "object", int64(len("one")), bytes.NewBufferString("one"), nil)
c.Assert(err, check.Not(check.IsNil)) c.Assert(err, check.Not(check.IsNil))
} }
@ -300,13 +288,8 @@ func testPutObjectInSubdir(c *check.C, create func() Filesystem) {
err := fs.MakeBucket("bucket") err := fs.MakeBucket("bucket")
c.Assert(err, check.IsNil) c.Assert(err, check.IsNil)
hasher := md5.New() _, err = fs.CreateObject("bucket", "dir1/dir2/object", int64(len("hello world")), bytes.NewBufferString("hello world"), nil)
hasher.Write([]byte("hello world"))
md5Sum1 := base64.StdEncoding.EncodeToString(hasher.Sum(nil))
md5Sum1hex := hex.EncodeToString(hasher.Sum(nil))
objectInfo, err := fs.CreateObject("bucket", "dir1/dir2/object", md5Sum1, int64(len("hello world")), bytes.NewBufferString("hello world"), nil)
c.Assert(err, check.IsNil) c.Assert(err, check.IsNil)
c.Assert(objectInfo.MD5Sum, check.Equals, md5Sum1hex)
var bytesBuffer bytes.Buffer var bytesBuffer bytes.Buffer
length, err := fs.GetObject(&bytesBuffer, "bucket", "dir1/dir2/object", 0, 0) length, err := fs.GetObject(&bytesBuffer, "bucket", "dir1/dir2/object", 0, 0)
@ -396,7 +379,7 @@ func testGetDirectoryReturnsObjectNotFound(c *check.C, create func() Filesystem)
err := fs.MakeBucket("bucket") err := fs.MakeBucket("bucket")
c.Assert(err, check.IsNil) c.Assert(err, check.IsNil)
_, err = fs.CreateObject("bucket", "dir1/dir2/object", "", int64(len("hello world")), bytes.NewBufferString("hello world"), nil) _, err = fs.CreateObject("bucket", "dir1/dir2/object", int64(len("hello world")), bytes.NewBufferString("hello world"), nil)
c.Assert(err, check.IsNil) c.Assert(err, check.IsNil)
var byteBuffer bytes.Buffer var byteBuffer bytes.Buffer
@ -431,26 +414,9 @@ func testDefaultContentType(c *check.C, create func() Filesystem) {
err := fs.MakeBucket("bucket") err := fs.MakeBucket("bucket")
c.Assert(err, check.IsNil) c.Assert(err, check.IsNil)
// test empty // Test empty
_, err = fs.CreateObject("bucket", "one", "", int64(len("one")), bytes.NewBufferString("one"), nil) _, err = fs.CreateObject("bucket", "one", int64(len("one")), bytes.NewBufferString("one"), nil)
metadata, err := fs.GetObjectInfo("bucket", "one") metadata, err := fs.GetObjectInfo("bucket", "one")
c.Assert(err, check.IsNil) c.Assert(err, check.IsNil)
c.Assert(metadata.ContentType, check.Equals, "application/octet-stream") c.Assert(metadata.ContentType, check.Equals, "application/octet-stream")
} }
func testContentMD5Set(c *check.C, create func() Filesystem) {
fs := create()
err := fs.MakeBucket("bucket")
c.Assert(err, check.IsNil)
// test md5 invalid
badmd5Sum := "NWJiZjVhNTIzMjhlNzQzOWFlNmU3MTlkZmU3MTIyMDA"
calculatedmd5sum, err := fs.CreateObject("bucket", "one", badmd5Sum, int64(len("one")), bytes.NewBufferString("one"), nil)
c.Assert(err, check.Not(check.IsNil))
c.Assert(calculatedmd5sum, check.Not(check.Equals), badmd5Sum)
goodmd5sum := "NWJiZjVhNTIzMjhlNzQzOWFlNmU3MTlkZmU3MTIyMDA="
calculatedmd5sum, err = fs.CreateObject("bucket", "two", goodmd5sum, int64(len("one")), bytes.NewBufferString("one"), nil)
c.Assert(err, check.IsNil)
c.Assert(calculatedmd5sum, check.Equals, goodmd5sum)
}

View File

@ -27,7 +27,6 @@ import (
) )
func TestListObjects(t *testing.T) { func TestListObjects(t *testing.T) {
// Make a temporary directory to use as the filesystem. // Make a temporary directory to use as the filesystem.
directory, e := ioutil.TempDir("", "minio-list-object-test") directory, e := ioutil.TempDir("", "minio-list-object-test")
if e != nil { if e != nil {
@ -58,36 +57,36 @@ func TestListObjects(t *testing.T) {
} }
defer os.Remove(tmpfile.Name()) // clean up defer os.Remove(tmpfile.Name()) // clean up
_, err = fs.CreateObject("test-bucket-list-object", "Asia-maps", "", int64(len("asia-maps")), bytes.NewBufferString("asia-maps"), nil) _, err = fs.CreateObject("test-bucket-list-object", "Asia-maps", int64(len("asia-maps")), bytes.NewBufferString("asia-maps"), nil)
if err != nil { if err != nil {
t.Fatal(e) t.Fatal(e)
} }
_, err = fs.CreateObject("test-bucket-list-object", "Asia/India/India-summer-photos-1", "", int64(len("contentstring")), bytes.NewBufferString("contentstring"), nil) _, err = fs.CreateObject("test-bucket-list-object", "Asia/India/India-summer-photos-1", int64(len("contentstring")), bytes.NewBufferString("contentstring"), nil)
if err != nil { if err != nil {
t.Fatal(e) t.Fatal(e)
} }
_, err = fs.CreateObject("test-bucket-list-object", "Asia/India/Karnataka/Bangalore/Koramangala/pics", "", int64(len("contentstring")), bytes.NewBufferString("contentstring"), nil) _, err = fs.CreateObject("test-bucket-list-object", "Asia/India/Karnataka/Bangalore/Koramangala/pics", int64(len("contentstring")), bytes.NewBufferString("contentstring"), nil)
if err != nil { if err != nil {
t.Fatal(e) t.Fatal(e)
} }
for i := 0; i < 2; i++ { for i := 0; i < 2; i++ {
key := "newPrefix" + strconv.Itoa(i) key := "newPrefix" + strconv.Itoa(i)
_, err = fs.CreateObject("test-bucket-list-object", key, "", int64(len(key)), bytes.NewBufferString(key), nil) _, err = fs.CreateObject("test-bucket-list-object", key, int64(len(key)), bytes.NewBufferString(key), nil)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
} }
_, err = fs.CreateObject("test-bucket-list-object", "newzen/zen/recurse/again/again/again/pics", "", int64(len("recurse")), bytes.NewBufferString("recurse"), nil) _, err = fs.CreateObject("test-bucket-list-object", "newzen/zen/recurse/again/again/again/pics", int64(len("recurse")), bytes.NewBufferString("recurse"), nil)
if err != nil { if err != nil {
t.Fatal(e) t.Fatal(e)
} }
for i := 0; i < 3; i++ { for i := 0; i < 3; i++ {
key := "obj" + strconv.Itoa(i) key := "obj" + strconv.Itoa(i)
_, err = fs.CreateObject("test-bucket-list-object", key, "", int64(len(key)), bytes.NewBufferString(key), nil) _, err = fs.CreateObject("test-bucket-list-object", key, int64(len(key)), bytes.NewBufferString(key), nil)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -592,7 +591,7 @@ func BenchmarkListObjects(b *testing.B) {
for i := 0; i < 20000; i++ { for i := 0; i < 20000; i++ {
key := "obj" + strconv.Itoa(i) key := "obj" + strconv.Itoa(i)
_, err = filesystem.CreateObject("ls-benchmark-bucket", key, "", int64(len(key)), bytes.NewBufferString(key), nil) _, err = filesystem.CreateObject("ls-benchmark-bucket", key, int64(len(key)), bytes.NewBufferString(key), nil)
if err != nil { if err != nil {
b.Fatal(err) b.Fatal(err)
} }

View File

@ -18,13 +18,6 @@ package fs
import "fmt" import "fmt"
// SignDoesNotMatch - signature does not match.
type SignDoesNotMatch struct{}
func (e SignDoesNotMatch) Error() string {
return "Signature does not match."
}
// InvalidArgument invalid argument // InvalidArgument invalid argument
type InvalidArgument struct{} type InvalidArgument struct{}
@ -131,15 +124,14 @@ func (e InvalidDisksArgument) Error() string {
return "Invalid number of disks per node" return "Invalid number of disks per node"
} }
// BadDigest bad md5sum // BadDigest - Content-MD5 you specified did not match what we received.
type BadDigest struct { type BadDigest struct {
MD5 string ExpectedMD5 string
Bucket string CalculatedMD5 string
Object string
} }
func (e BadDigest) Error() string { func (e BadDigest) Error() string {
return "Bad digest" return "Bad digest expected " + e.ExpectedMD5 + " is not valid with what we calculated " + e.CalculatedMD5
} }
// InternalError - generic internal error // InternalError - generic internal error
@ -183,13 +175,6 @@ type ImplementationError struct {
Err error Err error
} }
// DigestError - Generic MD5 error
type DigestError struct {
Bucket string
Key string
MD5 string
}
/// Bucket related errors /// Bucket related errors
// BucketNameInvalid - bucketname provided is invalid // BucketNameInvalid - bucketname provided is invalid
@ -200,9 +185,6 @@ type BucketNameInvalid GenericBucketError
// ObjectNameInvalid - object name provided is invalid // ObjectNameInvalid - object name provided is invalid
type ObjectNameInvalid GenericObjectError type ObjectNameInvalid GenericObjectError
// InvalidDigest - md5 in request header invalid
type InvalidDigest DigestError
// Return string an error formatted as the given text // Return string an error formatted as the given text
func (e ImplementationError) Error() string { func (e ImplementationError) Error() string {
error := "" error := ""
@ -258,11 +240,6 @@ func (e BackendCorrupted) Error() string {
return "Backend corrupted: " + e.Path return "Backend corrupted: " + e.Path
} }
// Return string an error formatted as the given text
func (e InvalidDigest) Error() string {
return "MD5 provided " + e.MD5 + " is invalid"
}
// OperationNotPermitted - operation not permitted // OperationNotPermitted - operation not permitted
type OperationNotPermitted struct { type OperationNotPermitted struct {
Op string Op string

View File

@ -24,7 +24,6 @@ import (
"errors" "errors"
"fmt" "fmt"
"io" "io"
"io/ioutil"
"math/rand" "math/rand"
"os" "os"
"path/filepath" "path/filepath"
@ -34,12 +33,10 @@ import (
"time" "time"
"github.com/minio/minio/pkg/atomic" "github.com/minio/minio/pkg/atomic"
"github.com/minio/minio/pkg/crypto/sha256"
"github.com/minio/minio/pkg/crypto/sha512" "github.com/minio/minio/pkg/crypto/sha512"
"github.com/minio/minio/pkg/disk" "github.com/minio/minio/pkg/disk"
"github.com/minio/minio/pkg/mimedb" "github.com/minio/minio/pkg/mimedb"
"github.com/minio/minio/pkg/probe" "github.com/minio/minio/pkg/probe"
"github.com/minio/minio/pkg/s3/signature4"
) )
// isValidUploadID - is upload id. // isValidUploadID - is upload id.
@ -322,7 +319,7 @@ func (a partNumber) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a partNumber) Less(i, j int) bool { return a[i].PartNumber < a[j].PartNumber } func (a partNumber) Less(i, j int) bool { return a[i].PartNumber < a[j].PartNumber }
// CreateObjectPart - create a part in a multipart session // CreateObjectPart - create a part in a multipart session
func (fs Filesystem) CreateObjectPart(bucket, object, uploadID, expectedMD5Sum string, partID int, size int64, data io.Reader, signature *signature4.Sign) (string, *probe.Error) { func (fs Filesystem) CreateObjectPart(bucket, object, uploadID string, partID int, size int64, data io.Reader, md5Bytes []byte) (string, *probe.Error) {
di, err := disk.GetInfo(fs.path) di, err := disk.GetInfo(fs.path)
if err != nil { if err != nil {
return "", probe.NewError(err) return "", probe.NewError(err)
@ -355,71 +352,58 @@ func (fs Filesystem) CreateObjectPart(bucket, object, uploadID, expectedMD5Sum s
return "", probe.NewError(InvalidUploadID{UploadID: uploadID}) return "", probe.NewError(InvalidUploadID{UploadID: uploadID})
} }
if expectedMD5Sum != "" {
var expectedMD5SumBytes []byte
expectedMD5SumBytes, err = base64.StdEncoding.DecodeString(expectedMD5Sum)
if err != nil {
// Pro-actively close the connection
return "", probe.NewError(InvalidDigest{MD5: expectedMD5Sum})
}
expectedMD5Sum = hex.EncodeToString(expectedMD5SumBytes)
}
bucket = fs.denormalizeBucket(bucket) bucket = fs.denormalizeBucket(bucket)
bucketPath := filepath.Join(fs.path, bucket) bucketPath := filepath.Join(fs.path, bucket)
if _, err = os.Stat(bucketPath); err != nil { if _, e := os.Stat(bucketPath); e != nil {
// Check bucket exists. // Check bucket exists.
if os.IsNotExist(err) { if os.IsNotExist(e) {
return "", probe.NewError(BucketNotFound{Bucket: bucket}) return "", probe.NewError(BucketNotFound{Bucket: bucket})
} }
return "", probe.NewError(err) return "", probe.NewError(e)
}
// md5Hex representation.
var md5Hex string
if len(md5Bytes) != 0 {
md5Hex = hex.EncodeToString(md5Bytes)
} }
objectPath := filepath.Join(bucketPath, object) objectPath := filepath.Join(bucketPath, object)
partPathPrefix := objectPath + uploadID partPathPrefix := objectPath + uploadID
partPath := partPathPrefix + expectedMD5Sum + fmt.Sprintf("$%d-$multiparts", partID) partPath := partPathPrefix + md5Hex + fmt.Sprintf("$%d-$multiparts", partID)
partFile, e := atomic.FileCreateWithPrefix(partPath, "$multiparts") partFile, e := atomic.FileCreateWithPrefix(partPath, "$multiparts")
if e != nil { if e != nil {
return "", probe.NewError(e) return "", probe.NewError(e)
} }
defer partFile.Close()
md5Hasher := md5.New() // Initialize md5 writer.
sha256Hasher := sha256.New() md5Writer := md5.New()
partWriter := io.MultiWriter(partFile, md5Hasher, sha256Hasher)
if _, e = io.CopyN(partWriter, data, size); e != nil { // Create a multiwriter.
multiWriter := io.MultiWriter(md5Writer, partFile)
if _, e = io.CopyN(multiWriter, data, size); e != nil {
partFile.CloseAndPurge() partFile.CloseAndPurge()
return "", probe.NewError(e) return "", probe.NewError(e)
} }
md5sum := hex.EncodeToString(md5Hasher.Sum(nil)) // Finalize new md5.
// Verify if the written object is equal to what is expected, only newMD5Hex := hex.EncodeToString(md5Writer.Sum(nil))
// if it is requested as such. if len(md5Bytes) != 0 {
if expectedMD5Sum != "" { if newMD5Hex != md5Hex {
if !isMD5SumEqual(expectedMD5Sum, md5sum) { return "", probe.NewError(BadDigest{md5Hex, newMD5Hex})
partFile.CloseAndPurge()
return "", probe.NewError(BadDigest{MD5: expectedMD5Sum, Bucket: bucket, Object: object})
} }
} }
if signature != nil {
ok, err := signature.DoesSignatureMatch(hex.EncodeToString(sha256Hasher.Sum(nil)))
if err != nil {
partFile.CloseAndPurge()
return "", err.Trace()
}
if !ok {
partFile.CloseAndPurge()
return "", probe.NewError(SignDoesNotMatch{})
}
}
partFile.Close()
fi, e := os.Stat(partPath) // Stat the file to get the latest information.
fi, e := os.Stat(partFile.Name())
if e != nil { if e != nil {
return "", probe.NewError(e) return "", probe.NewError(e)
} }
partMetadata := PartMetadata{} partMetadata := PartMetadata{}
partMetadata.ETag = md5sum
partMetadata.PartNumber = partID partMetadata.PartNumber = partID
partMetadata.ETag = newMD5Hex
partMetadata.Size = fi.Size() partMetadata.Size = fi.Size()
partMetadata.LastModified = fi.ModTime() partMetadata.LastModified = fi.ModTime()
@ -450,13 +434,11 @@ func (fs Filesystem) CreateObjectPart(bucket, object, uploadID, expectedMD5Sum s
if err := saveMultipartsSession(*fs.multiparts); err != nil { if err := saveMultipartsSession(*fs.multiparts); err != nil {
return "", err.Trace(partPathPrefix) return "", err.Trace(partPathPrefix)
} }
return newMD5Hex, nil
// Return etag.
return partMetadata.ETag, nil
} }
// CompleteMultipartUpload - complete a multipart upload and persist the data // CompleteMultipartUpload - complete a multipart upload and persist the data
func (fs Filesystem) CompleteMultipartUpload(bucket, object, uploadID string, data io.Reader, signature *signature4.Sign) (ObjectInfo, *probe.Error) { func (fs Filesystem) CompleteMultipartUpload(bucket string, object string, uploadID string, completeMultipartBytes []byte) (ObjectInfo, *probe.Error) {
// Check bucket name is valid. // Check bucket name is valid.
if !IsValidBucketName(bucket) { if !IsValidBucketName(bucket) {
return ObjectInfo{}, probe.NewError(BucketNameInvalid{Bucket: bucket}) return ObjectInfo{}, probe.NewError(BucketNameInvalid{Bucket: bucket})
@ -488,26 +470,8 @@ func (fs Filesystem) CompleteMultipartUpload(bucket, object, uploadID string, da
return ObjectInfo{}, probe.NewError(e) return ObjectInfo{}, probe.NewError(e)
} }
partBytes, e := ioutil.ReadAll(data)
if e != nil {
objectWriter.CloseAndPurge()
return ObjectInfo{}, probe.NewError(e)
}
if signature != nil {
sh := sha256.New()
sh.Write(partBytes)
ok, err := signature.DoesSignatureMatch(hex.EncodeToString(sh.Sum(nil)))
if err != nil {
objectWriter.CloseAndPurge()
return ObjectInfo{}, err.Trace()
}
if !ok {
objectWriter.CloseAndPurge()
return ObjectInfo{}, probe.NewError(SignDoesNotMatch{})
}
}
completeMultipartUpload := &CompleteMultipartUpload{} completeMultipartUpload := &CompleteMultipartUpload{}
if e = xml.Unmarshal(partBytes, completeMultipartUpload); e != nil { if e = xml.Unmarshal(completeMultipartBytes, completeMultipartUpload); e != nil {
objectWriter.CloseAndPurge() objectWriter.CloseAndPurge()
return ObjectInfo{}, probe.NewError(MalformedXML{}) return ObjectInfo{}, probe.NewError(MalformedXML{})
} }

View File

@ -18,22 +18,19 @@ package fs
import ( import (
"bytes" "bytes"
"crypto/md5"
"io" "io"
"os" "os"
"path/filepath" "path/filepath"
"strings" "strings"
"crypto/md5"
"encoding/base64"
"encoding/hex" "encoding/hex"
"runtime" "runtime"
"github.com/minio/minio/pkg/atomic" "github.com/minio/minio/pkg/atomic"
"github.com/minio/minio/pkg/crypto/sha256"
"github.com/minio/minio/pkg/disk" "github.com/minio/minio/pkg/disk"
"github.com/minio/minio/pkg/mimedb" "github.com/minio/minio/pkg/mimedb"
"github.com/minio/minio/pkg/probe" "github.com/minio/minio/pkg/probe"
"github.com/minio/minio/pkg/s3/signature4"
) )
/// Object Operations /// Object Operations
@ -200,7 +197,7 @@ func isMD5SumEqual(expectedMD5Sum, actualMD5Sum string) bool {
} }
// CreateObject - create an object. // CreateObject - create an object.
func (fs Filesystem) CreateObject(bucket, object, expectedMD5Sum string, size int64, data io.Reader, sig *signature4.Sign) (ObjectInfo, *probe.Error) { func (fs Filesystem) CreateObject(bucket string, object string, size int64, data io.Reader, md5Bytes []byte) (ObjectInfo, *probe.Error) {
di, e := disk.GetInfo(fs.path) di, e := disk.GetInfo(fs.path)
if e != nil { if e != nil {
return ObjectInfo{}, probe.NewError(e) return ObjectInfo{}, probe.NewError(e)
@ -234,18 +231,15 @@ func (fs Filesystem) CreateObject(bucket, object, expectedMD5Sum string, size in
// Get object path. // Get object path.
objectPath := filepath.Join(bucketPath, object) objectPath := filepath.Join(bucketPath, object)
if expectedMD5Sum != "" {
var expectedMD5SumBytes []byte // md5Hex representation.
expectedMD5SumBytes, e = base64.StdEncoding.DecodeString(expectedMD5Sum) var md5Hex string
if e != nil { if len(md5Bytes) != 0 {
// Pro-actively close the connection. md5Hex = hex.EncodeToString(md5Bytes)
return ObjectInfo{}, probe.NewError(InvalidDigest{MD5: expectedMD5Sum})
}
expectedMD5Sum = hex.EncodeToString(expectedMD5SumBytes)
} }
// Write object. // Write object.
file, e := atomic.FileCreateWithPrefix(objectPath, expectedMD5Sum+"$tmpobject") file, e := atomic.FileCreateWithPrefix(objectPath, md5Hex+"$tmpobject")
if e != nil { if e != nil {
switch e := e.(type) { switch e := e.(type) {
case *os.PathError: case *os.PathError:
@ -259,52 +253,40 @@ func (fs Filesystem) CreateObject(bucket, object, expectedMD5Sum string, size in
return ObjectInfo{}, probe.NewError(e) return ObjectInfo{}, probe.NewError(e)
} }
} }
defer file.Close()
// Initialize md5 writer.
md5Writer := md5.New()
// Instantiate a new multi writer.
multiWriter := io.MultiWriter(md5Writer, file)
// Instantiate checksum hashers and create a multiwriter. // Instantiate checksum hashers and create a multiwriter.
md5Hasher := md5.New()
sha256Hasher := sha256.New()
objectWriter := io.MultiWriter(file, md5Hasher, sha256Hasher)
if size > 0 { if size > 0 {
if _, e = io.CopyN(objectWriter, data, size); e != nil { if _, e = io.CopyN(multiWriter, data, size); e != nil {
file.CloseAndPurge() file.CloseAndPurge()
return ObjectInfo{}, probe.NewError(e) return ObjectInfo{}, probe.NewError(e)
} }
} else { } else {
if _, e = io.Copy(objectWriter, data); e != nil { if _, e = io.Copy(multiWriter, data); e != nil {
file.CloseAndPurge() file.CloseAndPurge()
return ObjectInfo{}, probe.NewError(e) return ObjectInfo{}, probe.NewError(e)
} }
} }
md5Sum := hex.EncodeToString(md5Hasher.Sum(nil)) newMD5Hex := hex.EncodeToString(md5Writer.Sum(nil))
// Verify if the written object is equal to what is expected, only if len(md5Bytes) != 0 {
// if it is requested as such. if newMD5Hex != md5Hex {
if expectedMD5Sum != "" { return ObjectInfo{}, probe.NewError(BadDigest{md5Hex, newMD5Hex})
if !isMD5SumEqual(expectedMD5Sum, md5Sum) {
file.CloseAndPurge()
return ObjectInfo{}, probe.NewError(BadDigest{MD5: expectedMD5Sum, Bucket: bucket, Object: object})
} }
} }
sha256Sum := hex.EncodeToString(sha256Hasher.Sum(nil))
if sig != nil {
ok, err := sig.DoesSignatureMatch(sha256Sum)
if err != nil {
file.CloseAndPurge()
return ObjectInfo{}, err.Trace()
}
if !ok {
file.CloseAndPurge()
return ObjectInfo{}, probe.NewError(SignDoesNotMatch{})
}
}
file.Close()
// Set stat again to get the latest metadata. // Set stat again to get the latest metadata.
st, e := os.Stat(objectPath) st, e := os.Stat(file.Name())
if e != nil { if e != nil {
return ObjectInfo{}, probe.NewError(e) return ObjectInfo{}, probe.NewError(e)
} }
contentType := "application/octet-stream" contentType := "application/octet-stream"
if objectExt := filepath.Ext(objectPath); objectExt != "" { if objectExt := filepath.Ext(objectPath); objectExt != "" {
content, ok := mimedb.DB[strings.ToLower(strings.TrimPrefix(objectExt, "."))] content, ok := mimedb.DB[strings.ToLower(strings.TrimPrefix(objectExt, "."))]
@ -317,8 +299,8 @@ func (fs Filesystem) CreateObject(bucket, object, expectedMD5Sum string, size in
Name: object, Name: object,
ModifiedTime: st.ModTime(), ModifiedTime: st.ModTime(),
Size: st.Size(), Size: st.Size(),
MD5Sum: newMD5Hex,
ContentType: contentType, ContentType: contentType,
MD5Sum: md5Sum,
} }
return newObject, nil return newObject, nil
} }

View File

@ -19,7 +19,6 @@ package fs
import ( import (
"bytes" "bytes"
"crypto/md5" "crypto/md5"
"encoding/base64"
"fmt" "fmt"
"io/ioutil" "io/ioutil"
"os" "os"
@ -47,7 +46,7 @@ func TestGetObjectInfo(t *testing.T) {
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
_, err = fs.CreateObject("test-getobjectinfo", "Asia/asiapics.jpg", "", int64(len("asiapics")), bytes.NewBufferString("asiapics"), nil) _, err = fs.CreateObject("test-getobjectinfo", "Asia/asiapics.jpg", int64(len("asiapics")), bytes.NewBufferString("asiapics"), nil)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -138,7 +137,7 @@ func TestGetObjectInfoCore(t *testing.T) {
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
_, err = fs.CreateObject("test-getobjinfo", "Asia/asiapics.jpg", "", int64(len("asiapics")), bytes.NewBufferString("asiapics"), nil) _, err = fs.CreateObject("test-getobjinfo", "Asia/asiapics.jpg", int64(len("asiapics")), bytes.NewBufferString("asiapics"), nil)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -228,16 +227,14 @@ func BenchmarkGetObject(b *testing.B) {
text := "Jack and Jill went up the hill / To fetch a pail of water." text := "Jack and Jill went up the hill / To fetch a pail of water."
hasher := md5.New() hasher := md5.New()
hasher.Write([]byte(text)) hasher.Write([]byte(text))
sum := base64.StdEncoding.EncodeToString(hasher.Sum(nil))
for i := 0; i < 10; i++ { for i := 0; i < 10; i++ {
_, err = filesystem.CreateObject("bucket", "object"+strconv.Itoa(i), sum, int64(len(text)), bytes.NewBufferString(text), nil) _, err = filesystem.CreateObject("bucket", "object"+strconv.Itoa(i), int64(len(text)), bytes.NewBufferString(text), hasher.Sum(nil))
if err != nil { if err != nil {
b.Fatal(err) b.Fatal(err)
} }
} }
var w bytes.Buffer var w bytes.Buffer
b.ResetTimer() b.ResetTimer()
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {

View File

@ -1,3 +1,5 @@
// +build ignore
package xl package xl
// BucketACL - bucket level access control // BucketACL - bucket level access control

View File

@ -1,3 +1,5 @@
// +build ignore
/* /*
* Minio Cloud Storage, (C) 2015 Minio, Inc. * Minio Cloud Storage, (C) 2015 Minio, Inc.
* *

View File

@ -1,3 +1,5 @@
// +build ignore
/* /*
* Minio Cloud Storage, (C) 2015 Minio, Inc. * Minio Cloud Storage, (C) 2015 Minio, Inc.
* *

View File

@ -1,3 +1,5 @@
// +build ignore
/* /*
* Minio Cloud Storage, (C) 2015 Minio, Inc. * Minio Cloud Storage, (C) 2015 Minio, Inc.
* *

View File

@ -1,3 +1,5 @@
// +build ignore
/* /*
* Minio Cloud Storage, (C) 2015 Minio, Inc. * Minio Cloud Storage, (C) 2015 Minio, Inc.
* *

3
pkg/xl/doc.go Normal file
View File

@ -0,0 +1,3 @@
// +build ignore
package xl

View File

@ -1,3 +1,5 @@
// +build ignore
/* /*
* Minio Cloud Storage, (C) 2015 Minio, Inc. * Minio Cloud Storage, (C) 2015 Minio, Inc.
* *

View File

@ -1,3 +1,5 @@
// +build ignore
/* /*
* Minio Cloud Storage, (C) 2015 Minio, Inc. * Minio Cloud Storage, (C) 2015 Minio, Inc.
* *

View File

@ -1,3 +1,5 @@
// +build ignore
/* /*
* Minio Cloud Storage, (C) 2015 Minio, Inc. * Minio Cloud Storage, (C) 2015 Minio, Inc.
* *

View File

@ -1,3 +1,5 @@
// +build ignore
/* /*
* Minio Cloud Storage, (C) 2015 Minio, Inc. * Minio Cloud Storage, (C) 2015 Minio, Inc.
* *

View File

@ -1,3 +1,5 @@
// +build ignore
/* /*
* Minio Cloud Storage, (C) 2015 Minio, Inc. * Minio Cloud Storage, (C) 2015 Minio, Inc.
* *

View File

@ -1,3 +1,5 @@
// +build ignore
/* /*
* Minio Cloud Storage, (C) 2015 Minio, Inc. * Minio Cloud Storage, (C) 2015 Minio, Inc.
* *

View File

@ -1,3 +1,5 @@
// +build ignore
/* /*
* Minio Cloud Storage, (C) 2015 Minio, Inc. * Minio Cloud Storage, (C) 2015 Minio, Inc.
* *

View File

@ -1,3 +1,5 @@
// +build ignore
/* /*
* Minio Cloud Storage, (C) 2015 Minio, Inc. * Minio Cloud Storage, (C) 2015 Minio, Inc.
* *

View File

@ -1,3 +1,5 @@
// +build ignore
/* /*
* Minio Cloud Storage, (C) 2015 Minio, Inc. * Minio Cloud Storage, (C) 2015 Minio, Inc.
* *

View File

@ -1,3 +1,5 @@
// +build ignore
/* /*
* Minio Cloud Storage, (C) 2015 Minio, Inc. * Minio Cloud Storage, (C) 2015 Minio, Inc.
* *

View File

@ -1,3 +1,5 @@
// +build ignore
/* /*
* Minio Cloud Storage, (C) 2015 Minio, Inc. * Minio Cloud Storage, (C) 2015 Minio, Inc.
* *

View File

@ -29,7 +29,6 @@ import (
"github.com/minio/minio-go" "github.com/minio/minio-go"
"github.com/minio/minio/pkg/fs" "github.com/minio/minio/pkg/fs"
"github.com/minio/minio/pkg/probe" "github.com/minio/minio/pkg/probe"
"github.com/minio/minio/pkg/s3/signature4"
"github.com/minio/miniobrowser" "github.com/minio/miniobrowser"
) )
@ -37,8 +36,6 @@ import (
type storageAPI struct { type storageAPI struct {
// Filesystem instance. // Filesystem instance.
Filesystem fs.Filesystem Filesystem fs.Filesystem
// Signature instance.
Signature *signature4.Sign
} }
// webAPI container for Web API. // webAPI container for Web API.
@ -163,19 +160,12 @@ func configureServerHandler(filesystem fs.Filesystem) http.Handler {
// Access credentials. // Access credentials.
cred := serverConfig.GetCredential() cred := serverConfig.GetCredential()
// Server region.
region := serverConfig.GetRegion()
// Server addr. // Server addr.
addr := serverConfig.GetAddr() addr := serverConfig.GetAddr()
sign, err := signature4.New(cred.AccessKeyID, cred.SecretAccessKey, region)
fatalIf(err.Trace(cred.AccessKeyID, cred.SecretAccessKey, region), "Initializing signature version '4' failed.", nil)
// Initialize API. // Initialize API.
api := storageAPI{ api := storageAPI{
Filesystem: filesystem, Filesystem: filesystem,
Signature: sign,
} }
// Split host port. // Split host port.

View File

@ -38,10 +38,6 @@ import (
. "gopkg.in/check.v1" . "gopkg.in/check.v1"
) )
const (
yyyymmdd = "20060102"
)
type MyAPIFSCacheSuite struct { type MyAPIFSCacheSuite struct {
root string root string
req *http.Request req *http.Request
@ -154,30 +150,32 @@ func (s *MyAPIFSCacheSuite) newRequest(method, urlStr string, contentLength int6
req.Header.Set("x-amz-date", t.Format(iso8601Format)) req.Header.Set("x-amz-date", t.Format(iso8601Format))
// add Content-Length // Add Content-Length
req.ContentLength = contentLength req.ContentLength = contentLength
// add body // Save for subsequent use
var hashedPayload string
switch { switch {
case body == nil: case body == nil:
req.Body = nil hashedPayload = hex.EncodeToString(sum256([]byte{}))
default: default:
payloadBytes, e := ioutil.ReadAll(body)
if e != nil {
return nil, e
}
hashedPayload = hex.EncodeToString(sum256(payloadBytes))
md5base64 := base64.StdEncoding.EncodeToString(sumMD5(payloadBytes))
req.Header.Set("Content-Md5", md5base64)
}
req.Header.Set("x-amz-content-sha256", hashedPayload)
// Seek back to beginning.
if body != nil {
body.Seek(0, 0)
// Add body
req.Body = ioutil.NopCloser(body) req.Body = ioutil.NopCloser(body)
} }
// save for subsequent use
hash := func() string {
switch {
case body == nil:
return hex.EncodeToString(sum256([]byte{}))
default:
sum256Bytes, _ := sum256Reader(body)
return hex.EncodeToString(sum256Bytes)
}
}
hashedPayload := hash()
req.Header.Set("x-amz-content-sha256", hashedPayload)
var headers []string var headers []string
vals := make(map[string][]string) vals := make(map[string][]string)
for k, vv := range req.Header { for k, vv := range req.Header {

View File

@ -25,6 +25,8 @@ import (
"golang.org/x/crypto/bcrypt" "golang.org/x/crypto/bcrypt"
) )
const jwtAlgorithm = "Bearer"
// JWT - jwt auth backend // JWT - jwt auth backend
type JWT struct { type JWT struct {
credential credential

View File

@ -14,7 +14,7 @@
* limitations under the License. * limitations under the License.
*/ */
package signature4 package main
import ( import (
"fmt" "fmt"
@ -48,10 +48,7 @@ var (
ErrMalformedExpires = errFactory() ErrMalformedExpires = errFactory()
ErrAuthHeaderEmpty = errFactory() ErrAuthHeaderEmpty = errFactory()
ErrUnsuppSignAlgo = errFactory() ErrUnsuppSignAlgo = errFactory()
ErrMissingExpiresQuery = errFactory()
ErrExpiredPresignRequest = errFactory() ErrExpiredPresignRequest = errFactory()
ErrSignDoesNotMath = errFactory()
ErrInvalidAccessKeyID = errFactory()
ErrInvalidSecretKey = errFactory()
ErrRegionISEmpty = errFactory() ErrRegionISEmpty = errFactory()
ErrInvalidAccessKey = errFactory()
) )

View File

@ -14,7 +14,7 @@
* limitations under the License. * limitations under the License.
*/ */
package signature4 package main
import ( import (
"net/url" "net/url"
@ -24,11 +24,11 @@ import (
"github.com/minio/minio/pkg/probe" "github.com/minio/minio/pkg/probe"
) )
// credential data type represents structured form of Credential // credentialHeader data type represents structured form of Credential
// string from authorization header. // string from authorization header.
type credential struct { type credentialHeader struct {
accessKeyID string accessKey string
scope struct { scope struct {
date time.Time date time.Time
region string region string
service string service string
@ -36,40 +36,41 @@ type credential struct {
} }
} }
// parse credential string into its structured form. // parse credentialHeader string into its structured form.
func parseCredential(credElement string) (credential, *probe.Error) { func parseCredentialHeader(credElement string) (credentialHeader, *probe.Error) {
creds := strings.Split(strings.TrimSpace(credElement), "=") creds := strings.Split(strings.TrimSpace(credElement), "=")
if len(creds) != 2 { if len(creds) != 2 {
return credential{}, ErrMissingFields("Credential tag has missing fields.", credElement).Trace(credElement) return credentialHeader{}, ErrMissingFields("Credential tag has missing fields.", credElement).Trace(credElement)
} }
if creds[0] != "Credential" { if creds[0] != "Credential" {
return credential{}, ErrMissingCredTag("Missing credentials tag.", credElement).Trace(credElement) return credentialHeader{}, ErrMissingCredTag("Missing credentials tag.", credElement).Trace(credElement)
} }
credElements := strings.Split(strings.TrimSpace(creds[1]), "/") credElements := strings.Split(strings.TrimSpace(creds[1]), "/")
if len(credElements) != 5 { if len(credElements) != 5 {
return credential{}, ErrCredMalformed("Credential values malformed.", credElement).Trace(credElement) return credentialHeader{}, ErrCredMalformed("Credential values malformed.", credElement).Trace(credElement)
} }
if !isValidAccessKey.MatchString(credElements[0]) { if !isValidAccessKey.MatchString(credElements[0]) {
return credential{}, ErrInvalidAccessKeyID("Invalid access key id.", credElement).Trace(credElement) return credentialHeader{}, ErrInvalidAccessKey("Invalid access key id.", credElement).Trace(credElement)
} }
cred := credential{ // Save access key id.
accessKeyID: credElements[0], cred := credentialHeader{
accessKey: credElements[0],
} }
var e error var e error
cred.scope.date, e = time.Parse(yyyymmdd, credElements[1]) cred.scope.date, e = time.Parse(yyyymmdd, credElements[1])
if e != nil { if e != nil {
return credential{}, ErrInvalidDateFormat("Invalid date format.", credElement).Trace(credElement) return credentialHeader{}, ErrInvalidDateFormat("Invalid date format.", credElement).Trace(credElement)
} }
if credElements[2] == "" { if credElements[2] == "" {
return credential{}, ErrRegionISEmpty("Region is empty.", credElement).Trace(credElement) return credentialHeader{}, ErrRegionISEmpty("Region is empty.", credElement).Trace(credElement)
} }
cred.scope.region = credElements[2] cred.scope.region = credElements[2]
if credElements[3] != "s3" { if credElements[3] != "s3" {
return credential{}, ErrInvalidService("Invalid service detected.", credElement).Trace(credElement) return credentialHeader{}, ErrInvalidService("Invalid service detected.", credElement).Trace(credElement)
} }
cred.scope.service = credElements[3] cred.scope.service = credElements[3]
if credElements[4] != "aws4_request" { if credElements[4] != "aws4_request" {
return credential{}, ErrInvalidRequestVersion("Invalid request version detected.", credElement).Trace(credElement) return credentialHeader{}, ErrInvalidRequestVersion("Invalid request version detected.", credElement).Trace(credElement)
} }
cred.scope.request = credElements[4] cred.scope.request = credElements[4]
return cred, nil return cred, nil
@ -103,7 +104,7 @@ func parseSignedHeaders(signedHdrElement string) ([]string, *probe.Error) {
// signValues data type represents structured form of AWS Signature V4 header. // signValues data type represents structured form of AWS Signature V4 header.
type signValues struct { type signValues struct {
Credential credential Credential credentialHeader
SignedHeaders []string SignedHeaders []string
Signature string Signature string
} }
@ -118,7 +119,7 @@ type preSignValues struct {
// Parses signature version '4' query string of the following form. // Parses signature version '4' query string of the following form.
// //
// querystring = X-Amz-Algorithm=algorithm // querystring = X-Amz-Algorithm=algorithm
// querystring += &X-Amz-Credential= urlencode(access_key_ID + '/' + credential_scope) // querystring += &X-Amz-Credential= urlencode(accessKey + '/' + credential_scope)
// querystring += &X-Amz-Date=date // querystring += &X-Amz-Date=date
// querystring += &X-Amz-Expires=timeout interval // querystring += &X-Amz-Expires=timeout interval
// querystring += &X-Amz-SignedHeaders=signed_headers // querystring += &X-Amz-SignedHeaders=signed_headers
@ -135,7 +136,7 @@ func parsePreSignV4(query url.Values) (preSignValues, *probe.Error) {
var err *probe.Error var err *probe.Error
// Save credential. // Save credential.
preSignV4Values.Credential, err = parseCredential("Credential=" + query.Get("X-Amz-Credential")) preSignV4Values.Credential, err = parseCredentialHeader("Credential=" + query.Get("X-Amz-Credential"))
if err != nil { if err != nil {
return preSignValues{}, err.Trace(query.Get("X-Amz-Credential")) return preSignValues{}, err.Trace(query.Get("X-Amz-Credential"))
} }
@ -171,8 +172,8 @@ func parsePreSignV4(query url.Values) (preSignValues, *probe.Error) {
// Parses signature version '4' header of the following form. // Parses signature version '4' header of the following form.
// //
// Authorization: algorithm Credential=access key ID/credential scope, \ // Authorization: algorithm Credential=accessKeyID/credScope, \
// SignedHeaders=SignedHeaders, Signature=signature // SignedHeaders=signedHeaders, Signature=signature
// //
func parseSignV4(v4Auth string) (signValues, *probe.Error) { func parseSignV4(v4Auth string) (signValues, *probe.Error) {
// Replace all spaced strings, some clients can send spaced // Replace all spaced strings, some clients can send spaced
@ -200,7 +201,7 @@ func parseSignV4(v4Auth string) (signValues, *probe.Error) {
var err *probe.Error var err *probe.Error
// Save credentail values. // Save credentail values.
signV4Values.Credential, err = parseCredential(authFields[0]) signV4Values.Credential, err = parseCredentialHeader(authFields[0])
if err != nil { if err != nil {
return signValues{}, err.Trace(v4Auth) return signValues{}, err.Trace(v4Auth)
} }

View File

@ -14,7 +14,7 @@
* limitations under the License. * limitations under the License.
*/ */
package signature4 package main
import ( import (
"encoding/base64" "encoding/base64"
@ -158,8 +158,8 @@ func parsePostPolicyFormV4(policy string) (PostPolicyForm, *probe.Error) {
return parsedPolicy, nil return parsedPolicy, nil
} }
// ApplyPolicyCond - apply policy conditions and validate input values. // checkPostPolicy - apply policy conditions and validate input values.
func ApplyPolicyCond(formValues map[string]string) *probe.Error { func checkPostPolicy(formValues map[string]string) *probe.Error {
if formValues["X-Amz-Algorithm"] != signV4Algorithm { if formValues["X-Amz-Algorithm"] != signV4Algorithm {
return ErrUnsuppSignAlgo("Unsupported signature algorithm in policy form data.", formValues["X-Amz-Algorithm"]).Trace(formValues["X-Amz-Algorithm"]) return ErrUnsuppSignAlgo("Unsupported signature algorithm in policy form data.", formValues["X-Amz-Algorithm"]).Trace(formValues["X-Amz-Algorithm"])
} }

View File

@ -14,7 +14,7 @@
* limitations under the License. * limitations under the License.
*/ */
package signature4 package main
import ( import (
"crypto/hmac" "crypto/hmac"
@ -27,14 +27,6 @@ import (
"github.com/minio/minio/pkg/crypto/sha256" "github.com/minio/minio/pkg/crypto/sha256"
) )
/// helpers
// isValidSecretKey - validate secret key.
var isValidSecretKey = regexp.MustCompile("^.{40}$")
// isValidAccessKey - validate access key.
var isValidAccessKey = regexp.MustCompile("^[A-Z0-9\\-\\.\\_\\~]{20}$")
// isValidRegion - verify if incoming region value is valid with configured Region. // isValidRegion - verify if incoming region value is valid with configured Region.
func isValidRegion(reqRegion string, confRegion string) bool { func isValidRegion(reqRegion string, confRegion string) bool {
if confRegion == "" || confRegion == "US" { if confRegion == "" || confRegion == "US" {

View File

@ -1,5 +1,5 @@
/* /*
* Minio Cloud Storage, (C) 2015 Minio, Inc. * Minio Cloud Storage, (C) 2015, 2016 Minio, Inc.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.
@ -14,7 +14,7 @@
* limitations under the License. * limitations under the License.
*/ */
// Package signature4 implements helper functions to validate AWS // This file implements helper functions to validate AWS
// Signature Version '4' authorization header. // Signature Version '4' authorization header.
// //
// This package provides comprehensive helpers for following signature // This package provides comprehensive helpers for following signature
@ -22,7 +22,7 @@
// - Based on Authorization header. // - Based on Authorization header.
// - Based on Query parameters. // - Based on Query parameters.
// - Based on Form POST policy. // - Based on Form POST policy.
package signature4 package main
import ( import (
"bytes" "bytes"
@ -38,15 +38,6 @@ import (
"github.com/minio/minio/pkg/probe" "github.com/minio/minio/pkg/probe"
) )
// Sign - local variables
type Sign struct {
accessKeyID string
secretAccessKey string
region string
httpRequest *http.Request
extractedSignedHeaders http.Header
}
// AWS Signature Version '4' constants. // AWS Signature Version '4' constants.
const ( const (
signV4Algorithm = "AWS4-HMAC-SHA256" signV4Algorithm = "AWS4-HMAC-SHA256"
@ -54,37 +45,8 @@ const (
yyyymmdd = "20060102" yyyymmdd = "20060102"
) )
// New - initialize a new authorization checkes.
func New(accessKeyID, secretAccessKey, region string) (*Sign, *probe.Error) {
if !isValidAccessKey.MatchString(accessKeyID) {
return nil, ErrInvalidAccessKeyID("Invalid access key id.", accessKeyID).Trace(accessKeyID)
}
if !isValidSecretKey.MatchString(secretAccessKey) {
return nil, ErrInvalidAccessKeyID("Invalid secret key.", secretAccessKey).Trace(secretAccessKey)
}
if region == "" {
return nil, ErrRegionISEmpty("Region is empty.").Trace()
}
signature := &Sign{
accessKeyID: accessKeyID,
secretAccessKey: secretAccessKey,
region: region,
}
return signature, nil
}
// SetHTTPRequestToVerify - sets the http request which needs to be verified.
func (s *Sign) SetHTTPRequestToVerify(r *http.Request) Sign {
// Do not set http request if its 'nil'.
if r == nil {
return *s
}
s.httpRequest = r
return *s
}
// getCanonicalHeaders generate a list of request headers with their values // getCanonicalHeaders generate a list of request headers with their values
func (s Sign) getCanonicalHeaders(signedHeaders http.Header) string { func getCanonicalHeaders(signedHeaders http.Header, host string) string {
var headers []string var headers []string
vals := make(http.Header) vals := make(http.Header)
for k, vv := range signedHeaders { for k, vv := range signedHeaders {
@ -100,7 +62,7 @@ func (s Sign) getCanonicalHeaders(signedHeaders http.Header) string {
buf.WriteByte(':') buf.WriteByte(':')
switch { switch {
case k == "host": case k == "host":
buf.WriteString(s.httpRequest.Host) buf.WriteString(host)
fallthrough fallthrough
default: default:
for idx, v := range vals[k] { for idx, v := range vals[k] {
@ -116,7 +78,7 @@ func (s Sign) getCanonicalHeaders(signedHeaders http.Header) string {
} }
// getSignedHeaders generate a string i.e alphabetically sorted, semicolon-separated list of lowercase request header names // getSignedHeaders generate a string i.e alphabetically sorted, semicolon-separated list of lowercase request header names
func (s Sign) getSignedHeaders(signedHeaders http.Header) string { func getSignedHeaders(signedHeaders http.Header) string {
var headers []string var headers []string
for k := range signedHeaders { for k := range signedHeaders {
headers = append(headers, strings.ToLower(k)) headers = append(headers, strings.ToLower(k))
@ -136,18 +98,17 @@ func (s Sign) getSignedHeaders(signedHeaders http.Header) string {
// <SignedHeaders>\n // <SignedHeaders>\n
// <HashedPayload> // <HashedPayload>
// //
func (s *Sign) getCanonicalRequest() string { func getCanonicalRequest(extractedSignedHeaders http.Header, payload, queryStr, urlPath, method, host string) string {
payload := s.httpRequest.Header.Get(http.CanonicalHeaderKey("x-amz-content-sha256")) rawQuery := strings.Replace(queryStr, "+", "%20", -1)
s.httpRequest.URL.RawQuery = strings.Replace(s.httpRequest.URL.Query().Encode(), "+", "%20", -1) encodedPath := getURLEncodedName(urlPath)
encodedPath := getURLEncodedName(s.httpRequest.URL.Path)
// Convert any space strings back to "+". // Convert any space strings back to "+".
encodedPath = strings.Replace(encodedPath, "+", "%20", -1) encodedPath = strings.Replace(encodedPath, "+", "%20", -1)
canonicalRequest := strings.Join([]string{ canonicalRequest := strings.Join([]string{
s.httpRequest.Method, method,
encodedPath, encodedPath,
s.httpRequest.URL.RawQuery, rawQuery,
s.getCanonicalHeaders(s.extractedSignedHeaders), getCanonicalHeaders(extractedSignedHeaders, host),
s.getSignedHeaders(s.extractedSignedHeaders), getSignedHeaders(extractedSignedHeaders),
payload, payload,
}, "\n") }, "\n")
return canonicalRequest return canonicalRequest
@ -163,27 +124,27 @@ func (s *Sign) getCanonicalRequest() string {
// <SignedHeaders>\n // <SignedHeaders>\n
// <HashedPayload> // <HashedPayload>
// //
func (s Sign) getPresignedCanonicalRequest(presignedQuery string) string { func getPresignCanonicalRequest(extractedSignedHeaders http.Header, presignedQuery, urlPath, method, host string) string {
rawQuery := strings.Replace(presignedQuery, "+", "%20", -1) rawQuery := strings.Replace(presignedQuery, "+", "%20", -1)
encodedPath := getURLEncodedName(s.httpRequest.URL.Path) encodedPath := getURLEncodedName(urlPath)
// Convert any space strings back to "+". // Convert any space strings back to "+".
encodedPath = strings.Replace(encodedPath, "+", "%20", -1) encodedPath = strings.Replace(encodedPath, "+", "%20", -1)
canonicalRequest := strings.Join([]string{ canonicalRequest := strings.Join([]string{
s.httpRequest.Method, method,
encodedPath, encodedPath,
rawQuery, rawQuery,
s.getCanonicalHeaders(s.extractedSignedHeaders), getCanonicalHeaders(extractedSignedHeaders, host),
s.getSignedHeaders(s.extractedSignedHeaders), getSignedHeaders(extractedSignedHeaders),
"UNSIGNED-PAYLOAD", "UNSIGNED-PAYLOAD",
}, "\n") }, "\n")
return canonicalRequest return canonicalRequest
} }
// getScope generate a string of a specific date, an AWS region, and a service. // getScope generate a string of a specific date, an AWS region, and a service.
func (s Sign) getScope(t time.Time) string { func getScope(t time.Time, region string) string {
scope := strings.Join([]string{ scope := strings.Join([]string{
t.Format(yyyymmdd), t.Format(yyyymmdd),
s.region, region,
"s3", "s3",
"aws4_request", "aws4_request",
}, "/") }, "/")
@ -191,92 +152,105 @@ func (s Sign) getScope(t time.Time) string {
} }
// getStringToSign a string based on selected query values. // getStringToSign a string based on selected query values.
func (s Sign) getStringToSign(canonicalRequest string, t time.Time) string { func getStringToSign(canonicalRequest string, t time.Time, region string) string {
stringToSign := signV4Algorithm + "\n" + t.Format(iso8601Format) + "\n" stringToSign := signV4Algorithm + "\n" + t.Format(iso8601Format) + "\n"
stringToSign = stringToSign + s.getScope(t) + "\n" stringToSign = stringToSign + getScope(t, region) + "\n"
canonicalRequestBytes := sha256.Sum256([]byte(canonicalRequest)) canonicalRequestBytes := sha256.Sum256([]byte(canonicalRequest))
stringToSign = stringToSign + hex.EncodeToString(canonicalRequestBytes[:]) stringToSign = stringToSign + hex.EncodeToString(canonicalRequestBytes[:])
return stringToSign return stringToSign
} }
// getSigningKey hmac seed to calculate final signature. // getSigningKey hmac seed to calculate final signature.
func (s Sign) getSigningKey(t time.Time) []byte { func getSigningKey(secretKey string, t time.Time, region string) []byte {
secret := s.secretAccessKey date := sumHMAC([]byte("AWS4"+secretKey), []byte(t.Format(yyyymmdd)))
date := sumHMAC([]byte("AWS4"+secret), []byte(t.Format(yyyymmdd))) regionBytes := sumHMAC(date, []byte(region))
region := sumHMAC(date, []byte(s.region)) service := sumHMAC(regionBytes, []byte("s3"))
service := sumHMAC(region, []byte("s3"))
signingKey := sumHMAC(service, []byte("aws4_request")) signingKey := sumHMAC(service, []byte("aws4_request"))
return signingKey return signingKey
} }
// getSignature final signature in hexadecimal form. // getSignature final signature in hexadecimal form.
func (s Sign) getSignature(signingKey []byte, stringToSign string) string { func getSignature(signingKey []byte, stringToSign string) string {
return hex.EncodeToString(sumHMAC(signingKey, []byte(stringToSign))) return hex.EncodeToString(sumHMAC(signingKey, []byte(stringToSign)))
} }
// DoesPolicySignatureMatch - Verify query headers with post policy // doesPolicySignatureMatch - Verify query headers with post policy
// - http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-HTTPPOSTConstructPolicy.html // - http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-HTTPPOSTConstructPolicy.html
// returns true if matches, false otherwise. if error is not nil then it is always false // returns true if matches, false otherwise. if error is not nil then it is always false
func (s *Sign) DoesPolicySignatureMatch(formValues map[string]string) (bool, *probe.Error) { func doesPolicySignatureMatch(formValues map[string]string) (bool, *probe.Error) {
// Access credentials.
cred := serverConfig.GetCredential()
// Server region.
region := serverConfig.GetRegion()
// Parse credential tag. // Parse credential tag.
credential, err := parseCredential("Credential=" + formValues["X-Amz-Credential"]) credHeader, err := parseCredentialHeader("Credential=" + formValues["X-Amz-Credential"])
if err != nil { if err != nil {
return false, err.Trace(formValues["X-Amz-Credential"]) return false, err.Trace(formValues["X-Amz-Credential"])
} }
// Verify if the access key id matches. // Verify if the access key id matches.
if credential.accessKeyID != s.accessKeyID { if credHeader.accessKey != cred.AccessKeyID {
return false, ErrInvalidAccessKeyID("Access key id does not match with our records.", credential.accessKeyID).Trace(credential.accessKeyID) return false, ErrInvalidAccessKey("Access key id does not match with our records.", credHeader.accessKey).Trace(credHeader.accessKey)
} }
// Verify if the region is valid. // Verify if the region is valid.
reqRegion := credential.scope.region if !isValidRegion(credHeader.scope.region, region) {
if !isValidRegion(reqRegion, s.region) { return false, ErrInvalidRegion("Requested region is not recognized.", credHeader.scope.region).Trace(credHeader.scope.region)
return false, ErrInvalidRegion("Requested region is not recognized.", reqRegion).Trace(reqRegion)
} }
// Save region.
s.region = reqRegion
// Parse date string. // Parse date string.
t, e := time.Parse(iso8601Format, formValues["X-Amz-Date"]) t, e := time.Parse(iso8601Format, formValues["X-Amz-Date"])
if e != nil { if e != nil {
return false, probe.NewError(e) return false, probe.NewError(e)
} }
signingKey := s.getSigningKey(t)
newSignature := s.getSignature(signingKey, formValues["Policy"]) // Get signing key.
signingKey := getSigningKey(cred.SecretAccessKey, t, region)
// Get signature.
newSignature := getSignature(signingKey, formValues["Policy"])
// Verify signature.
if newSignature != formValues["X-Amz-Signature"] { if newSignature != formValues["X-Amz-Signature"] {
return false, nil return false, nil
} }
return true, nil return true, nil
} }
// DoesPresignedSignatureMatch - Verify query headers with presigned signature // doesPresignedSignatureMatch - Verify query headers with presigned signature
// - http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html // - http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html
// returns true if matches, false otherwise. if error is not nil then it is always false // returns true if matches, false otherwise. if error is not nil then it is always false
func (s *Sign) DoesPresignedSignatureMatch() (bool, *probe.Error) { func doesPresignedSignatureMatch(r *http.Request) (bool, *probe.Error) {
// Access credentials.
cred := serverConfig.GetCredential()
// Server region.
region := serverConfig.GetRegion()
// Copy request
req := *r
// Parse request query string. // Parse request query string.
preSignValues, err := parsePreSignV4(s.httpRequest.URL.Query()) preSignValues, err := parsePreSignV4(req.URL.Query())
if err != nil { if err != nil {
return false, err.Trace(s.httpRequest.URL.String()) return false, err.Trace(req.URL.String())
} }
// Verify if the access key id matches. // Verify if the access key id matches.
if preSignValues.Credential.accessKeyID != s.accessKeyID { if preSignValues.Credential.accessKey != cred.AccessKeyID {
return false, ErrInvalidAccessKeyID("Access key id does not match with our records.", preSignValues.Credential.accessKeyID).Trace(preSignValues.Credential.accessKeyID) return false, ErrInvalidAccessKey("Access key id does not match with our records.", preSignValues.Credential.accessKey).Trace(preSignValues.Credential.accessKey)
} }
// Verify if region is valid. // Verify if region is valid.
reqRegion := preSignValues.Credential.scope.region sRegion := preSignValues.Credential.scope.region
if !isValidRegion(reqRegion, s.region) { if !isValidRegion(sRegion, region) {
return false, ErrInvalidRegion("Requested region is not recognized.", reqRegion).Trace(reqRegion) return false, ErrInvalidRegion("Requested region is not recognized.", sRegion).Trace(sRegion)
} }
// Save region.
s.region = reqRegion
// Extract all the signed headers along with its values. // Extract all the signed headers along with its values.
s.extractedSignedHeaders = extractSignedHeaders(preSignValues.SignedHeaders, s.httpRequest.Header) extractedSignedHeaders := extractSignedHeaders(preSignValues.SignedHeaders, req.Header)
// Construct new query. // Construct new query.
query := make(url.Values) query := make(url.Values)
@ -293,11 +267,11 @@ func (s *Sign) DoesPresignedSignatureMatch() (bool, *probe.Error) {
// Construct the query. // Construct the query.
query.Set("X-Amz-Date", t.Format(iso8601Format)) query.Set("X-Amz-Date", t.Format(iso8601Format))
query.Set("X-Amz-Expires", strconv.Itoa(expireSeconds)) query.Set("X-Amz-Expires", strconv.Itoa(expireSeconds))
query.Set("X-Amz-SignedHeaders", s.getSignedHeaders(s.extractedSignedHeaders)) query.Set("X-Amz-SignedHeaders", getSignedHeaders(extractedSignedHeaders))
query.Set("X-Amz-Credential", s.accessKeyID+"/"+s.getScope(t)) query.Set("X-Amz-Credential", cred.AccessKeyID+"/"+getScope(t, region))
// Save other headers available in the request parameters. // Save other headers available in the request parameters.
for k, v := range s.httpRequest.URL.Query() { for k, v := range req.URL.Query() {
if strings.HasPrefix(strings.ToLower(k), "x-amz") { if strings.HasPrefix(strings.ToLower(k), "x-amz") {
continue continue
} }
@ -308,35 +282,58 @@ func (s *Sign) DoesPresignedSignatureMatch() (bool, *probe.Error) {
encodedQuery := query.Encode() encodedQuery := query.Encode()
// Verify if date query is same. // Verify if date query is same.
if s.httpRequest.URL.Query().Get("X-Amz-Date") != query.Get("X-Amz-Date") { if req.URL.Query().Get("X-Amz-Date") != query.Get("X-Amz-Date") {
return false, nil return false, nil
} }
// Verify if expires query is same. // Verify if expires query is same.
if s.httpRequest.URL.Query().Get("X-Amz-Expires") != query.Get("X-Amz-Expires") { if req.URL.Query().Get("X-Amz-Expires") != query.Get("X-Amz-Expires") {
return false, nil return false, nil
} }
// Verify if signed headers query is same. // Verify if signed headers query is same.
if s.httpRequest.URL.Query().Get("X-Amz-SignedHeaders") != query.Get("X-Amz-SignedHeaders") { if req.URL.Query().Get("X-Amz-SignedHeaders") != query.Get("X-Amz-SignedHeaders") {
return false, nil return false, nil
} }
// Verify if credential query is same. // Verify if credential query is same.
if s.httpRequest.URL.Query().Get("X-Amz-Credential") != query.Get("X-Amz-Credential") { if req.URL.Query().Get("X-Amz-Credential") != query.Get("X-Amz-Credential") {
return false, nil return false, nil
} }
// Verify finally if signature is same.
newSignature := s.getSignature(s.getSigningKey(t), s.getStringToSign(s.getPresignedCanonicalRequest(encodedQuery), t)) /// Verify finally if signature is same.
if s.httpRequest.URL.Query().Get("X-Amz-Signature") != newSignature {
// Get canonical request.
presignedCanonicalReq := getPresignCanonicalRequest(extractedSignedHeaders, encodedQuery, req.URL.Path, req.Method, req.Host)
// Get string to sign from canonical request.
presignedStringToSign := getStringToSign(presignedCanonicalReq, t, region)
// Get hmac presigned signing key.
presignedSigningKey := getSigningKey(cred.SecretAccessKey, t, region)
// Get new signature.
newSignature := getSignature(presignedSigningKey, presignedStringToSign)
// Verify signature.
if req.URL.Query().Get("X-Amz-Signature") != newSignature {
return false, nil return false, nil
} }
return true, nil return true, nil
} }
// DoesSignatureMatch - Verify authorization header with calculated header in accordance with // doesSignatureMatch - Verify authorization header with calculated header in accordance with
// - http://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html // - http://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html
// returns true if matches, false otherwise. if error is not nil then it is always false // returns true if matches, false otherwise. if error is not nil then it is always false
func (s *Sign) DoesSignatureMatch(hashedPayload string) (bool, *probe.Error) { func doesSignatureMatch(hashedPayload string, r *http.Request) (bool, *probe.Error) {
// Access credentials.
cred := serverConfig.GetCredential()
// Server region.
region := serverConfig.GetRegion()
// Copy request.
req := *r
// Save authorization header. // Save authorization header.
v4Auth := s.httpRequest.Header.Get("Authorization") v4Auth := req.Header.Get("Authorization")
// Parse signature version '4' header. // Parse signature version '4' header.
signV4Values, err := parseSignV4(v4Auth) signV4Values, err := parseSignV4(v4Auth)
@ -345,29 +342,23 @@ func (s *Sign) DoesSignatureMatch(hashedPayload string) (bool, *probe.Error) {
} }
// Extract all the signed headers along with its values. // Extract all the signed headers along with its values.
s.extractedSignedHeaders = extractSignedHeaders(signV4Values.SignedHeaders, s.httpRequest.Header) extractedSignedHeaders := extractSignedHeaders(signV4Values.SignedHeaders, req.Header)
// Verify if the access key id matches. // Verify if the access key id matches.
if signV4Values.Credential.accessKeyID != s.accessKeyID { if signV4Values.Credential.accessKey != cred.AccessKeyID {
return false, ErrInvalidAccessKeyID("Access key id does not match with our records.", signV4Values.Credential.accessKeyID).Trace(signV4Values.Credential.accessKeyID) return false, ErrInvalidAccessKey("Access key id does not match with our records.", signV4Values.Credential.accessKey).Trace(signV4Values.Credential.accessKey)
} }
// Verify if region is valid. // Verify if region is valid.
reqRegion := signV4Values.Credential.scope.region sRegion := signV4Values.Credential.scope.region
if !isValidRegion(reqRegion, s.region) { if !isValidRegion(sRegion, region) {
return false, ErrInvalidRegion("Requested region is not recognized.", reqRegion).Trace(reqRegion) return false, ErrInvalidRegion("Requested region is not recognized.", sRegion).Trace(sRegion)
} }
// Save region.
s.region = reqRegion
// Set input payload.
s.httpRequest.Header.Set("X-Amz-Content-Sha256", hashedPayload)
// Extract date, if not present throw error. // Extract date, if not present throw error.
var date string var date string
if date = s.httpRequest.Header.Get(http.CanonicalHeaderKey("x-amz-date")); date == "" { if date = req.Header.Get(http.CanonicalHeaderKey("x-amz-date")); date == "" {
if date = s.httpRequest.Header.Get("Date"); date == "" { if date = r.Header.Get("Date"); date == "" {
return false, ErrMissingDateHeader("Date header is missing from the request.").Trace() return false, ErrMissingDateHeader("Date header is missing from the request.").Trace()
} }
} }
@ -377,11 +368,20 @@ func (s *Sign) DoesSignatureMatch(hashedPayload string) (bool, *probe.Error) {
return false, probe.NewError(e) return false, probe.NewError(e)
} }
// Signature version '4'. // Query string.
canonicalRequest := s.getCanonicalRequest() queryStr := req.URL.Query().Encode()
stringToSign := s.getStringToSign(canonicalRequest, t)
signingKey := s.getSigningKey(t) // Get canonical request.
newSignature := s.getSignature(signingKey, stringToSign) canonicalRequest := getCanonicalRequest(extractedSignedHeaders, hashedPayload, queryStr, req.URL.Path, req.Method, req.Host)
// Get string to sign from canonical request.
stringToSign := getStringToSign(canonicalRequest, t, region)
// Get hmac signing key.
signingKey := getSigningKey(cred.SecretAccessKey, t, region)
// Calculate signature.
newSignature := getSignature(signingKey, stringToSign)
// Verify if signature match. // Verify if signature match.
if newSignature != signV4Values.Signature { if newSignature != signV4Values.Signature {

View File

@ -1,104 +0,0 @@
/*
* Minio Cloud Storage, (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package main
import (
"crypto/hmac"
"encoding/hex"
"io"
"regexp"
"strings"
"unicode/utf8"
"github.com/minio/minio/pkg/crypto/sha256"
)
// sum256Reader calculate sha256 sum for an input read seeker
func sum256Reader(reader io.ReadSeeker) ([]byte, error) {
h := sha256.New()
var err error
start, _ := reader.Seek(0, 1)
defer reader.Seek(start, 0)
for err == nil {
length := 0
byteBuffer := make([]byte, 1024*1024)
length, err = reader.Read(byteBuffer)
byteBuffer = byteBuffer[0:length]
h.Write(byteBuffer)
}
if err != io.EOF {
return nil, err
}
return h.Sum(nil), nil
}
// sum256 calculate sha256 sum for an input byte array
func sum256(data []byte) []byte {
hash := sha256.New()
hash.Write(data)
return hash.Sum(nil)
}
// sumHMAC calculate hmac between two input byte array
func sumHMAC(key []byte, data []byte) []byte {
hash := hmac.New(sha256.New, key)
hash.Write(data)
return hash.Sum(nil)
}
// getURLEncodedName encode the strings from UTF-8 byte representations to HTML hex escape sequences
//
// This is necessary since regular url.Parse() and url.Encode() functions do not support UTF-8
// non english characters cannot be parsed due to the nature in which url.Encode() is written
//
// This function on the other hand is a direct replacement for url.Encode() technique to support
// pretty much every UTF-8 character.
func getURLEncodedName(name string) string {
// if object matches reserved string, no need to encode them
reservedNames := regexp.MustCompile("^[a-zA-Z0-9-_.~/]+$")
if reservedNames.MatchString(name) {
return name
}
var encodedName string
for _, s := range name {
if 'A' <= s && s <= 'Z' || 'a' <= s && s <= 'z' || '0' <= s && s <= '9' { // §2.3 Unreserved characters (mark)
encodedName = encodedName + string(s)
continue
}
switch s {
case '-', '_', '.', '~', '/': // §2.3 Unreserved characters (mark)
encodedName = encodedName + string(s)
continue
default:
len := utf8.RuneLen(s)
if len < 0 {
return name
}
u := make([]byte, len)
utf8.EncodeRune(u, s)
for _, r := range u {
hex := hex.EncodeToString([]byte{r})
encodedName = encodedName + "%" + strings.ToUpper(hex)
}
}
}
return encodedName
}

View File

@ -23,3 +23,6 @@ var errSyslogNotSupported = errors.New("Syslog logger not supported on windows")
// errInvalidArgument means that input argument is invalid. // errInvalidArgument means that input argument is invalid.
var errInvalidArgument = errors.New("Invalid arguments specified") var errInvalidArgument = errors.New("Invalid arguments specified")
// errSignatureMismatch means signature did not match.
var errSignatureMismatch = errors.New("Signature does not match")

View File

@ -19,18 +19,17 @@ package main
import ( import (
"encoding/base64" "encoding/base64"
"strings" "strings"
"github.com/minio/minio/pkg/probe"
) )
// isValidMD5 - verify if valid md5 // checkValidMD5 - verify if valid md5, returns md5 in bytes.
func isValidMD5(md5 string) bool { func checkValidMD5(md5 string) ([]byte, *probe.Error) {
if md5 == "" { md5Bytes, e := base64.StdEncoding.DecodeString(strings.TrimSpace(md5))
return true if e != nil {
return nil, probe.NewError(e)
} }
_, err := base64.StdEncoding.DecodeString(strings.TrimSpace(md5)) return md5Bytes, nil
if err != nil {
return false
}
return true
} }
/// http://docs.aws.amazon.com/AmazonS3/latest/dev/UploadingObjects.html /// http://docs.aws.amazon.com/AmazonS3/latest/dev/UploadingObjects.html