Merge pull request #1132 from harshavardhana/merge-ports

web/rpc: Merge ports with API server.
This commit is contained in:
Harshavardhana 2016-02-17 21:22:48 -08:00
commit 2a6bc604db
30 changed files with 1432 additions and 1310 deletions

View File

@ -75,6 +75,7 @@ const (
BucketNotEmpty BucketNotEmpty
RootPathFull RootPathFull
ObjectExistsAsPrefix ObjectExistsAsPrefix
AllAccessDisabled
) )
// APIError code to Error structure map // APIError code to Error structure map
@ -244,6 +245,11 @@ var errorCodeResponse = map[int]APIError{
Description: "An object already exists as your prefix, choose a different prefix to proceed.", Description: "An object already exists as your prefix, choose a different prefix to proceed.",
HTTPStatusCode: http.StatusConflict, HTTPStatusCode: http.StatusConflict,
}, },
AllAccessDisabled: {
Code: "AllAccessDisabled",
Description: "All access to this bucket has been disabled.",
HTTPStatusCode: http.StatusForbidden,
},
} }
// errorCodeError provides errorCode to Error. It returns empty if the code provided is unknown // errorCodeError provides errorCode to Error. It returns empty if the code provided is unknown

View File

@ -1,304 +0,0 @@
/*
* Minio Cloud Storage, (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package main
import (
"bytes"
"encoding/base64"
"io"
"io/ioutil"
"mime/multipart"
"net/http"
"strings"
"time"
"github.com/minio/minio/pkg/probe"
v4 "github.com/minio/minio/pkg/signature"
)
const (
authHeaderPrefix = "AWS4-HMAC-SHA256"
iso8601Format = "20060102T150405Z"
)
// getCredentialsFromAuth parse credentials tag from authorization value
func getCredentialsFromAuth(authValue string) ([]string, *probe.Error) {
if authValue == "" {
return nil, probe.NewError(errMissingAuthHeaderValue)
}
// replace all spaced strings
authValue = strings.Replace(authValue, " ", "", -1)
if !strings.HasPrefix(authValue, authHeaderPrefix) {
return nil, probe.NewError(errMissingFieldsAuthHeader)
}
if !strings.HasPrefix(strings.TrimPrefix(authValue, authHeaderPrefix), "Credential") {
return nil, probe.NewError(errInvalidAuthHeaderPrefix)
}
authValue = strings.TrimPrefix(authValue, authHeaderPrefix)
authFields := strings.Split(strings.TrimSpace(authValue), ",")
if len(authFields) != 3 {
return nil, probe.NewError(errInvalidAuthHeaderValue)
}
credentials := strings.Split(strings.TrimSpace(authFields[0]), "=")
if len(credentials) != 2 {
return nil, probe.NewError(errMissingFieldsCredentialTag)
}
credentialElements := strings.Split(strings.TrimSpace(credentials[1]), "/")
if len(credentialElements) != 5 {
return nil, probe.NewError(errCredentialTagMalformed)
}
return credentialElements, nil
}
func getSignatureFromAuth(authHeaderValue string) (string, *probe.Error) {
authValue := strings.TrimPrefix(authHeaderValue, authHeaderPrefix)
authFields := strings.Split(strings.TrimSpace(authValue), ",")
if len(authFields) != 3 {
return "", probe.NewError(errInvalidAuthHeaderValue)
}
if len(strings.Split(strings.TrimSpace(authFields[2]), "=")) != 2 {
return "", probe.NewError(errMissingFieldsSignatureTag)
}
signature := strings.Split(strings.TrimSpace(authFields[2]), "=")[1]
return signature, nil
}
func getSignedHeadersFromAuth(authHeaderValue string) ([]string, *probe.Error) {
authValue := strings.TrimPrefix(authHeaderValue, authHeaderPrefix)
authFields := strings.Split(strings.TrimSpace(authValue), ",")
if len(authFields) != 3 {
return nil, probe.NewError(errInvalidAuthHeaderValue)
}
if len(strings.Split(strings.TrimSpace(authFields[1]), "=")) != 2 {
return nil, probe.NewError(errMissingFieldsSignedHeadersTag)
}
signedHeaders := strings.Split(strings.Split(strings.TrimSpace(authFields[1]), "=")[1], ";")
return signedHeaders, nil
}
// verify if region value is valid with configured minioRegion.
func isValidRegion(region string, minioRegion string) *probe.Error {
if minioRegion == "" {
minioRegion = "us-east-1"
}
if region != minioRegion && region != "US" {
return probe.NewError(errInvalidRegion)
}
return nil
}
// stripRegion - strip only region from auth header.
func stripRegion(authHeaderValue string) (string, *probe.Error) {
credentialElements, err := getCredentialsFromAuth(authHeaderValue)
if err != nil {
return "", err.Trace(authHeaderValue)
}
region := credentialElements[2]
return region, nil
}
// stripAccessKeyID - strip only access key id from auth header.
func stripAccessKeyID(authHeaderValue string) (string, *probe.Error) {
credentialElements, err := getCredentialsFromAuth(authHeaderValue)
if err != nil {
return "", err.Trace()
}
accessKeyID := credentialElements[0]
if !isValidAccessKey(accessKeyID) {
return "", probe.NewError(errAccessKeyIDInvalid)
}
return accessKeyID, nil
}
// initSignatureV4 initializing signature verification.
func initSignatureV4(req *http.Request) (*v4.Signature, *probe.Error) {
// strip auth from authorization header.
authHeaderValue := req.Header.Get("Authorization")
config, err := loadConfigV2()
if err != nil {
return nil, err.Trace()
}
region, err := stripRegion(authHeaderValue)
if err != nil {
return nil, err.Trace(authHeaderValue)
}
if err = isValidRegion(region, config.Credentials.Region); err != nil {
return nil, err.Trace(authHeaderValue)
}
accessKeyID, err := stripAccessKeyID(authHeaderValue)
if err != nil {
return nil, err.Trace(authHeaderValue)
}
signature, err := getSignatureFromAuth(authHeaderValue)
if err != nil {
return nil, err.Trace(authHeaderValue)
}
signedHeaders, err := getSignedHeadersFromAuth(authHeaderValue)
if err != nil {
return nil, err.Trace(authHeaderValue)
}
if config.Credentials.AccessKeyID == accessKeyID {
signature := &v4.Signature{
AccessKeyID: config.Credentials.AccessKeyID,
SecretAccessKey: config.Credentials.SecretAccessKey,
Region: region,
Signature: signature,
SignedHeaders: signedHeaders,
Request: req,
}
return signature, nil
}
return nil, probe.NewError(errAccessKeyIDInvalid)
}
func extractHTTPFormValues(reader *multipart.Reader) (io.Reader, map[string]string, *probe.Error) {
/// HTML Form values
formValues := make(map[string]string)
filePart := new(bytes.Buffer)
var e error
for e == nil {
var part *multipart.Part
part, e = reader.NextPart()
if part != nil {
if part.FileName() == "" {
buffer, e := ioutil.ReadAll(part)
if e != nil {
return nil, nil, probe.NewError(e)
}
formValues[http.CanonicalHeaderKey(part.FormName())] = string(buffer)
} else {
if _, e := io.Copy(filePart, part); e != nil {
return nil, nil, probe.NewError(e)
}
}
}
}
return filePart, formValues, nil
}
func applyPolicy(formValues map[string]string) *probe.Error {
if formValues["X-Amz-Algorithm"] != "AWS4-HMAC-SHA256" {
return probe.NewError(errUnsupportedAlgorithm)
}
/// Decoding policy
policyBytes, e := base64.StdEncoding.DecodeString(formValues["Policy"])
if e != nil {
return probe.NewError(e)
}
postPolicyForm, err := v4.ParsePostPolicyForm(string(policyBytes))
if err != nil {
return err.Trace()
}
if !postPolicyForm.Expiration.After(time.Now().UTC()) {
return probe.NewError(errPolicyAlreadyExpired)
}
if postPolicyForm.Conditions.Policies["$bucket"].Operator == "eq" {
if formValues["Bucket"] != postPolicyForm.Conditions.Policies["$bucket"].Value {
return probe.NewError(errPolicyMissingFields)
}
}
if postPolicyForm.Conditions.Policies["$x-amz-date"].Operator == "eq" {
if formValues["X-Amz-Date"] != postPolicyForm.Conditions.Policies["$x-amz-date"].Value {
return probe.NewError(errPolicyMissingFields)
}
}
if postPolicyForm.Conditions.Policies["$Content-Type"].Operator == "starts-with" {
if !strings.HasPrefix(formValues["Content-Type"], postPolicyForm.Conditions.Policies["$Content-Type"].Value) {
return probe.NewError(errPolicyMissingFields)
}
}
if postPolicyForm.Conditions.Policies["$Content-Type"].Operator == "eq" {
if formValues["Content-Type"] != postPolicyForm.Conditions.Policies["$Content-Type"].Value {
return probe.NewError(errPolicyMissingFields)
}
}
if postPolicyForm.Conditions.Policies["$key"].Operator == "starts-with" {
if !strings.HasPrefix(formValues["Key"], postPolicyForm.Conditions.Policies["$key"].Value) {
return probe.NewError(errPolicyMissingFields)
}
}
if postPolicyForm.Conditions.Policies["$key"].Operator == "eq" {
if formValues["Key"] != postPolicyForm.Conditions.Policies["$key"].Value {
return probe.NewError(errPolicyMissingFields)
}
}
return nil
}
// initPostPresignedPolicyV4 initializing post policy signature verification
func initPostPresignedPolicyV4(formValues map[string]string) (*v4.Signature, *probe.Error) {
credentialElements := strings.Split(strings.TrimSpace(formValues["X-Amz-Credential"]), "/")
if len(credentialElements) != 5 {
return nil, probe.NewError(errCredentialTagMalformed)
}
accessKeyID := credentialElements[0]
if !isValidAccessKey(accessKeyID) {
return nil, probe.NewError(errAccessKeyIDInvalid)
}
config, err := loadConfigV2()
if err != nil {
return nil, err.Trace()
}
region := credentialElements[2]
if config.Credentials.AccessKeyID == accessKeyID {
signature := &v4.Signature{
AccessKeyID: config.Credentials.AccessKeyID,
SecretAccessKey: config.Credentials.SecretAccessKey,
Region: region,
Signature: formValues["X-Amz-Signature"],
PresignedPolicy: formValues["Policy"],
}
return signature, nil
}
return nil, probe.NewError(errAccessKeyIDInvalid)
}
// initPresignedSignatureV4 initializing presigned signature verification
func initPresignedSignatureV4(req *http.Request) (*v4.Signature, *probe.Error) {
credentialElements := strings.Split(strings.TrimSpace(req.URL.Query().Get("X-Amz-Credential")), "/")
if len(credentialElements) != 5 {
return nil, probe.NewError(errCredentialTagMalformed)
}
accessKeyID := credentialElements[0]
if !isValidAccessKey(accessKeyID) {
return nil, probe.NewError(errAccessKeyIDInvalid)
}
config, err := loadConfigV2()
if err != nil {
return nil, err.Trace()
}
region := credentialElements[2]
signedHeaders := strings.Split(strings.TrimSpace(req.URL.Query().Get("X-Amz-SignedHeaders")), ";")
signature := strings.TrimSpace(req.URL.Query().Get("X-Amz-Signature"))
if config.Credentials.AccessKeyID == accessKeyID {
signature := &v4.Signature{
AccessKeyID: config.Credentials.AccessKeyID,
SecretAccessKey: config.Credentials.SecretAccessKey,
Region: region,
Signature: signature,
SignedHeaders: signedHeaders,
Presigned: true,
Request: req,
}
return signature, nil
}
return nil, probe.NewError(errAccessKeyIDInvalid)
}

88
auth-handler.go Normal file
View File

@ -0,0 +1,88 @@
/*
* Minio Cloud Storage, (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package main
import (
"fmt"
"net/http"
jwtgo "github.com/dgrijalva/jwt-go"
)
const (
signV4Algorithm = "AWS4-HMAC-SHA256"
jwtAlgorithm = "Bearer"
)
// authHandler - handles all the incoming authorization headers and
// validates them if possible.
type authHandler struct {
handler http.Handler
}
// setAuthHandler to validate authorization header for the incoming request.
func setAuthHandler(h http.Handler) http.Handler {
return authHandler{h}
}
// handler for validating incoming authorization headers.
func (a authHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
// Verify if request is presigned, validate signature inside each handlers.
if isRequestPresignedSignatureV4(r) {
a.handler.ServeHTTP(w, r)
return
}
// Verify if request has post policy signature, validate signature
// inside POST policy handler.
if isRequestPostPolicySignatureV4(r) && r.Method == "POST" {
a.handler.ServeHTTP(w, r)
return
}
// No authorization found, let the top level caller validate if
// public request is allowed.
if _, ok := r.Header["Authorization"]; !ok {
a.handler.ServeHTTP(w, r)
return
}
// Verify if the signature algorithms are known.
if !isRequestSignatureV4(r) && !isRequestJWT(r) {
writeErrorResponse(w, r, SignatureVersionNotSupported, r.URL.Path)
return
}
// Verify JWT authorization header is present.
if isRequestJWT(r) {
// Validate Authorization header to be valid.
jwt := InitJWT()
token, e := jwtgo.ParseFromRequest(r, func(token *jwtgo.Token) (interface{}, error) {
if _, ok := token.Method.(*jwtgo.SigningMethodHMAC); !ok {
return nil, fmt.Errorf("Unexpected signing method: %v", token.Header["alg"])
}
return jwt.secretAccessKey, nil
})
if e != nil || !token.Valid {
w.WriteHeader(http.StatusUnauthorized)
return
}
}
// For all other signed requests, let top level caller verify.
a.handler.ServeHTTP(w, r)
}

View File

@ -17,26 +17,34 @@
package main package main
import ( import (
"bytes"
"encoding/hex" "encoding/hex"
"io"
"io/ioutil" "io/ioutil"
"mime/multipart"
"net/http" "net/http"
"github.com/gorilla/mux" "github.com/gorilla/mux"
"github.com/minio/minio/pkg/crypto/sha256" "github.com/minio/minio/pkg/crypto/sha256"
"github.com/minio/minio/pkg/fs" "github.com/minio/minio/pkg/fs"
"github.com/minio/minio/pkg/probe" "github.com/minio/minio/pkg/probe"
v4 "github.com/minio/minio/pkg/signature" signV4 "github.com/minio/minio/pkg/signature"
) )
// GetBucketLocationHandler - GET Bucket location. // GetBucketLocationHandler - GET Bucket location.
// ------------------------- // -------------------------
// This operation returns bucket location. // This operation returns bucket location.
func (api CloudStorageAPI) GetBucketLocationHandler(w http.ResponseWriter, req *http.Request) { func (api CloudStorageAPI) GetBucketLocationHandler(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(req) vars := mux.Vars(r)
bucket := vars["bucket"] bucket := vars["bucket"]
if isRequestRequiresACLCheck(req) { if isRequestRequiresACLCheck(r) {
writeErrorResponse(w, req, AccessDenied, req.URL.Path) writeErrorResponse(w, r, AccessDenied, r.URL.Path)
return
}
if !isSignV4ReqAuthenticated(api.Signature, r) {
writeErrorResponse(w, r, SignatureDoesNotMatch, r.URL.Path)
return return
} }
@ -45,20 +53,23 @@ func (api CloudStorageAPI) GetBucketLocationHandler(w http.ResponseWriter, req *
errorIf(err.Trace(), "GetBucketMetadata failed.", nil) errorIf(err.Trace(), "GetBucketMetadata failed.", nil)
switch err.ToGoError().(type) { switch err.ToGoError().(type) {
case fs.BucketNotFound: case fs.BucketNotFound:
writeErrorResponse(w, req, NoSuchBucket, req.URL.Path) writeErrorResponse(w, r, NoSuchBucket, r.URL.Path)
case fs.BucketNameInvalid: case fs.BucketNameInvalid:
writeErrorResponse(w, req, InvalidBucketName, req.URL.Path) writeErrorResponse(w, r, InvalidBucketName, r.URL.Path)
default: default:
writeErrorResponse(w, req, InternalError, req.URL.Path) writeErrorResponse(w, r, InternalError, r.URL.Path)
} }
return return
} }
// TODO: Location value for LocationResponse is deliberately not used, until // Generate response.
// we bring in a mechanism of configurable regions. For the time being encodedSuccessResponse := encodeSuccessResponse(LocationResponse{})
// default region is empty i.e 'us-east-1'. if api.Region != "us-east-1" {
encodedSuccessResponse := encodeSuccessResponse(LocationResponse{}) // generate response encodedSuccessResponse = encodeSuccessResponse(LocationResponse{
setCommonHeaders(w) // write headers Location: api.Region,
})
}
setCommonHeaders(w) // write headers.
writeSuccessResponse(w, encodedSuccessResponse) writeSuccessResponse(w, encodedSuccessResponse)
} }
@ -70,18 +81,23 @@ func (api CloudStorageAPI) GetBucketLocationHandler(w http.ResponseWriter, req *
// completed or aborted. This operation returns at most 1,000 multipart // completed or aborted. This operation returns at most 1,000 multipart
// uploads in the response. // uploads in the response.
// //
func (api CloudStorageAPI) ListMultipartUploadsHandler(w http.ResponseWriter, req *http.Request) { func (api CloudStorageAPI) ListMultipartUploadsHandler(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(req) vars := mux.Vars(r)
bucket := vars["bucket"] bucket := vars["bucket"]
if isRequestRequiresACLCheck(req) { if isRequestRequiresACLCheck(r) {
writeErrorResponse(w, req, AccessDenied, req.URL.Path) writeErrorResponse(w, r, AccessDenied, r.URL.Path)
return return
} }
resources := getBucketMultipartResources(req.URL.Query()) if !isSignV4ReqAuthenticated(api.Signature, r) {
writeErrorResponse(w, r, SignatureDoesNotMatch, r.URL.Path)
return
}
resources := getBucketMultipartResources(r.URL.Query())
if resources.MaxUploads < 0 { if resources.MaxUploads < 0 {
writeErrorResponse(w, req, InvalidMaxUploads, req.URL.Path) writeErrorResponse(w, r, InvalidMaxUploads, r.URL.Path)
return return
} }
if resources.MaxUploads == 0 { if resources.MaxUploads == 0 {
@ -93,9 +109,9 @@ func (api CloudStorageAPI) ListMultipartUploadsHandler(w http.ResponseWriter, re
errorIf(err.Trace(), "ListMultipartUploads failed.", nil) errorIf(err.Trace(), "ListMultipartUploads failed.", nil)
switch err.ToGoError().(type) { switch err.ToGoError().(type) {
case fs.BucketNotFound: case fs.BucketNotFound:
writeErrorResponse(w, req, NoSuchBucket, req.URL.Path) writeErrorResponse(w, r, NoSuchBucket, r.URL.Path)
default: default:
writeErrorResponse(w, req, InternalError, req.URL.Path) writeErrorResponse(w, r, InternalError, r.URL.Path)
} }
return return
} }
@ -109,26 +125,31 @@ func (api CloudStorageAPI) ListMultipartUploadsHandler(w http.ResponseWriter, re
} }
// ListObjectsHandler - GET Bucket (List Objects) // ListObjectsHandler - GET Bucket (List Objects)
// ------------------------- // -- -----------------------
// This implementation of the GET operation returns some or all (up to 1000) // This implementation of the GET operation returns some or all (up to 1000)
// of the objects in a bucket. You can use the request parameters as selection // of the objects in a bucket. You can use the request parameters as selection
// criteria to return a subset of the objects in a bucket. // criteria to return a subset of the objects in a bucket.
// //
func (api CloudStorageAPI) ListObjectsHandler(w http.ResponseWriter, req *http.Request) { func (api CloudStorageAPI) ListObjectsHandler(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(req) vars := mux.Vars(r)
bucket := vars["bucket"] bucket := vars["bucket"]
if isRequestRequiresACLCheck(req) { if isRequestRequiresACLCheck(r) {
if api.Filesystem.IsPrivateBucket(bucket) { if api.Filesystem.IsPrivateBucket(bucket) {
writeErrorResponse(w, req, AccessDenied, req.URL.Path) writeErrorResponse(w, r, AccessDenied, r.URL.Path)
return return
} }
} }
if !isSignV4ReqAuthenticated(api.Signature, r) {
writeErrorResponse(w, r, SignatureDoesNotMatch, r.URL.Path)
return
}
// TODO handle encoding type. // TODO handle encoding type.
prefix, marker, delimiter, maxkeys, _ := getBucketResources(req.URL.Query()) prefix, marker, delimiter, maxkeys, _ := getBucketResources(r.URL.Query())
if maxkeys < 0 { if maxkeys < 0 {
writeErrorResponse(w, req, InvalidMaxKeys, req.URL.Path) writeErrorResponse(w, r, InvalidMaxKeys, r.URL.Path)
return return
} }
if maxkeys == 0 { if maxkeys == 0 {
@ -148,16 +169,16 @@ func (api CloudStorageAPI) ListObjectsHandler(w http.ResponseWriter, req *http.R
} }
switch err.ToGoError().(type) { switch err.ToGoError().(type) {
case fs.BucketNameInvalid: case fs.BucketNameInvalid:
writeErrorResponse(w, req, InvalidBucketName, req.URL.Path) writeErrorResponse(w, r, InvalidBucketName, r.URL.Path)
case fs.BucketNotFound: case fs.BucketNotFound:
writeErrorResponse(w, req, NoSuchBucket, req.URL.Path) writeErrorResponse(w, r, NoSuchBucket, r.URL.Path)
case fs.ObjectNotFound: case fs.ObjectNotFound:
writeErrorResponse(w, req, NoSuchKey, req.URL.Path) writeErrorResponse(w, r, NoSuchKey, r.URL.Path)
case fs.ObjectNameInvalid: case fs.ObjectNameInvalid:
writeErrorResponse(w, req, NoSuchKey, req.URL.Path) writeErrorResponse(w, r, NoSuchKey, r.URL.Path)
default: default:
errorIf(err.Trace(), "ListObjects failed.", nil) errorIf(err.Trace(), "ListObjects failed.", nil)
writeErrorResponse(w, req, InternalError, req.URL.Path) writeErrorResponse(w, r, InternalError, r.URL.Path)
} }
} }
@ -165,9 +186,14 @@ func (api CloudStorageAPI) ListObjectsHandler(w http.ResponseWriter, req *http.R
// ----------- // -----------
// This implementation of the GET operation returns a list of all buckets // This implementation of the GET operation returns a list of all buckets
// owned by the authenticated sender of the request. // owned by the authenticated sender of the request.
func (api CloudStorageAPI) ListBucketsHandler(w http.ResponseWriter, req *http.Request) { func (api CloudStorageAPI) ListBucketsHandler(w http.ResponseWriter, r *http.Request) {
if isRequestRequiresACLCheck(req) { if isRequestRequiresACLCheck(r) {
writeErrorResponse(w, req, AccessDenied, req.URL.Path) writeErrorResponse(w, r, AccessDenied, r.URL.Path)
return
}
if !isSignV4ReqAuthenticated(api.Signature, r) {
writeErrorResponse(w, r, SignatureDoesNotMatch, r.URL.Path)
return return
} }
@ -183,90 +209,83 @@ func (api CloudStorageAPI) ListBucketsHandler(w http.ResponseWriter, req *http.R
return return
} }
errorIf(err.Trace(), "ListBuckets failed.", nil) errorIf(err.Trace(), "ListBuckets failed.", nil)
writeErrorResponse(w, req, InternalError, req.URL.Path) writeErrorResponse(w, r, InternalError, r.URL.Path)
} }
// PutBucketHandler - PUT Bucket // PutBucketHandler - PUT Bucket
// ---------- // ----------
// This implementation of the PUT operation creates a new bucket for authenticated request // This implementation of the PUT operation creates a new bucket for authenticated request
func (api CloudStorageAPI) PutBucketHandler(w http.ResponseWriter, req *http.Request) { func (api CloudStorageAPI) PutBucketHandler(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(req) vars := mux.Vars(r)
bucket := vars["bucket"] bucket := vars["bucket"]
if isRequestRequiresACLCheck(req) { if isRequestRequiresACLCheck(r) {
writeErrorResponse(w, req, AccessDenied, req.URL.Path) writeErrorResponse(w, r, AccessDenied, r.URL.Path)
return return
} }
// read from 'x-amz-acl' // read from 'x-amz-acl'
aclType := getACLType(req) aclType := getACLType(r)
if aclType == unsupportedACLType { if aclType == unsupportedACLType {
writeErrorResponse(w, req, NotImplemented, req.URL.Path) writeErrorResponse(w, r, NotImplemented, r.URL.Path)
return return
} }
var signature *v4.Signature
// Init signature V4 verification
if isRequestSignatureV4(req) {
var err *probe.Error
signature, err = initSignatureV4(req)
if err != nil {
switch err.ToGoError() {
case errInvalidRegion:
errorIf(err.Trace(), "Unknown region in authorization header.", nil)
writeErrorResponse(w, req, AuthorizationHeaderMalformed, req.URL.Path)
return
case errAccessKeyIDInvalid:
errorIf(err.Trace(), "Invalid access key id.", nil)
writeErrorResponse(w, req, InvalidAccessKeyID, req.URL.Path)
return
default:
errorIf(err.Trace(), "Initializing signature v4 failed.", nil)
writeErrorResponse(w, req, InternalError, req.URL.Path)
return
}
}
}
// if body of request is non-nil then check for validity of Content-Length // if body of request is non-nil then check for validity of Content-Length
if req.Body != nil { if r.Body != nil {
/// if Content-Length is unknown/missing, deny the request /// if Content-Length is unknown/missing, deny the request
if req.ContentLength == -1 && !contains(req.TransferEncoding, "chunked") { if r.ContentLength == -1 && !contains(r.TransferEncoding, "chunked") {
writeErrorResponse(w, req, MissingContentLength, req.URL.Path) writeErrorResponse(w, r, MissingContentLength, r.URL.Path)
return return
} }
if signature != nil { }
locationBytes, e := ioutil.ReadAll(req.Body)
if e != nil { // Set http request for signature.
errorIf(probe.NewError(e), "MakeBucket failed.", nil) auth := api.Signature.SetHTTPRequestToVerify(r)
writeErrorResponse(w, req, InternalError, req.URL.Path) if isRequestPresignedSignatureV4(r) {
return ok, err := auth.DoesPresignedSignatureMatch()
} if err != nil {
sh := sha256.New() errorIf(err.Trace(r.URL.String()), "Presigned signature verification failed.", nil)
sh.Write(locationBytes) writeErrorResponse(w, r, SignatureDoesNotMatch, r.URL.Path)
ok, err := signature.DoesSignatureMatch(hex.EncodeToString(sh.Sum(nil))) return
if err != nil { }
errorIf(err.Trace(), "MakeBucket failed.", nil) if !ok {
writeErrorResponse(w, req, InternalError, req.URL.Path) writeErrorResponse(w, r, SignatureDoesNotMatch, r.URL.Path)
return return
} }
if !ok { } else if isRequestSignatureV4(r) {
writeErrorResponse(w, req, SignatureDoesNotMatch, req.URL.Path) // Verify signature for the incoming body if any.
return locationBytes, e := ioutil.ReadAll(r.Body)
} if e != nil {
errorIf(probe.NewError(e), "MakeBucket failed.", nil)
writeErrorResponse(w, r, InternalError, r.URL.Path)
return
}
sh := sha256.New()
sh.Write(locationBytes)
ok, err := auth.DoesSignatureMatch(hex.EncodeToString(sh.Sum(nil)))
if err != nil {
errorIf(err.Trace(), "MakeBucket failed.", nil)
writeErrorResponse(w, r, InternalError, r.URL.Path)
return
}
if !ok {
writeErrorResponse(w, r, SignatureDoesNotMatch, r.URL.Path)
return
} }
} }
// Make bucket.
err := api.Filesystem.MakeBucket(bucket, getACLTypeString(aclType)) err := api.Filesystem.MakeBucket(bucket, getACLTypeString(aclType))
if err != nil { if err != nil {
errorIf(err.Trace(), "MakeBucket failed.", nil) errorIf(err.Trace(), "MakeBucket failed.", nil)
switch err.ToGoError().(type) { switch err.ToGoError().(type) {
case fs.BucketNameInvalid: case fs.BucketNameInvalid:
writeErrorResponse(w, req, InvalidBucketName, req.URL.Path) writeErrorResponse(w, r, InvalidBucketName, r.URL.Path)
case fs.BucketExists: case fs.BucketExists:
writeErrorResponse(w, req, BucketAlreadyExists, req.URL.Path) writeErrorResponse(w, r, BucketAlreadyExists, r.URL.Path)
default: default:
writeErrorResponse(w, req, InternalError, req.URL.Path) writeErrorResponse(w, r, InternalError, r.URL.Path)
} }
return return
} }
@ -275,16 +294,41 @@ func (api CloudStorageAPI) PutBucketHandler(w http.ResponseWriter, req *http.Req
writeSuccessResponse(w, nil) writeSuccessResponse(w, nil)
} }
func extractHTTPFormValues(reader *multipart.Reader) (io.Reader, map[string]string, *probe.Error) {
/// HTML Form values
formValues := make(map[string]string)
filePart := new(bytes.Buffer)
var e error
for e == nil {
var part *multipart.Part
part, e = reader.NextPart()
if part != nil {
if part.FileName() == "" {
buffer, e := ioutil.ReadAll(part)
if e != nil {
return nil, nil, probe.NewError(e)
}
formValues[http.CanonicalHeaderKey(part.FormName())] = string(buffer)
} else {
if _, e := io.Copy(filePart, part); e != nil {
return nil, nil, probe.NewError(e)
}
}
}
}
return filePart, formValues, nil
}
// PostPolicyBucketHandler - POST policy // PostPolicyBucketHandler - POST policy
// ---------- // ----------
// This implementation of the POST operation handles object creation with a specified // This implementation of the POST operation handles object creation with a specified
// signature policy in multipart/form-data // signature policy in multipart/form-data
func (api CloudStorageAPI) PostPolicyBucketHandler(w http.ResponseWriter, req *http.Request) { func (api CloudStorageAPI) PostPolicyBucketHandler(w http.ResponseWriter, r *http.Request) {
// if body of request is non-nil then check for validity of Content-Length // if body of request is non-nil then check for validity of Content-Length
if req.Body != nil { if r.Body != nil {
/// if Content-Length is unknown/missing, deny the request /// if Content-Length is unknown/missing, deny the request
if req.ContentLength == -1 { if r.ContentLength == -1 {
writeErrorResponse(w, req, MissingContentLength, req.URL.Path) writeErrorResponse(w, r, MissingContentLength, r.URL.Path)
return return
} }
} }
@ -292,65 +336,61 @@ func (api CloudStorageAPI) PostPolicyBucketHandler(w http.ResponseWriter, req *h
// Here the parameter is the size of the form data that should // Here the parameter is the size of the form data that should
// be loaded in memory, the remaining being put in temporary // be loaded in memory, the remaining being put in temporary
// files // files
reader, e := req.MultipartReader() reader, e := r.MultipartReader()
if e != nil { if e != nil {
errorIf(probe.NewError(e), "Unable to initialize multipart reader.", nil) errorIf(probe.NewError(e), "Unable to initialize multipart reader.", nil)
writeErrorResponse(w, req, MalformedPOSTRequest, req.URL.Path) writeErrorResponse(w, r, MalformedPOSTRequest, r.URL.Path)
return return
} }
fileBody, formValues, err := extractHTTPFormValues(reader) fileBody, formValues, err := extractHTTPFormValues(reader)
if err != nil { if err != nil {
errorIf(err.Trace(), "Unable to parse form values.", nil) errorIf(err.Trace(), "Unable to parse form values.", nil)
writeErrorResponse(w, req, MalformedPOSTRequest, req.URL.Path) writeErrorResponse(w, r, MalformedPOSTRequest, r.URL.Path)
return return
} }
bucket := mux.Vars(req)["bucket"] bucket := mux.Vars(r)["bucket"]
formValues["Bucket"] = bucket formValues["Bucket"] = bucket
object := formValues["Key"] object := formValues["Key"]
signature, err := initPostPresignedPolicyV4(formValues)
if err != nil {
errorIf(err.Trace(), "Unable to initialize post policy presigned.", nil)
writeErrorResponse(w, req, MalformedPOSTRequest, req.URL.Path)
return
}
var ok bool var ok bool
if ok, err = signature.DoesPolicySignatureMatch(formValues["X-Amz-Date"]); err != nil {
// Set http request for signature.
api.Signature.SetHTTPRequestToVerify(r)
// Verify policy signature.
ok, err = api.Signature.DoesPolicySignatureMatch(formValues)
if err != nil {
errorIf(err.Trace(), "Unable to verify signature.", nil) errorIf(err.Trace(), "Unable to verify signature.", nil)
writeErrorResponse(w, req, SignatureDoesNotMatch, req.URL.Path) writeErrorResponse(w, r, SignatureDoesNotMatch, r.URL.Path)
return return
} }
if ok == false { if !ok {
writeErrorResponse(w, req, SignatureDoesNotMatch, req.URL.Path) writeErrorResponse(w, r, SignatureDoesNotMatch, r.URL.Path)
return return
} }
if err = applyPolicy(formValues); err != nil { if err = signV4.ApplyPolicyCond(formValues); err != nil {
errorIf(err.Trace(), "Invalid request, policy doesn't match with the endpoint.", nil) errorIf(err.Trace(), "Invalid request, policy doesn't match with the endpoint.", nil)
writeErrorResponse(w, req, MalformedPOSTRequest, req.URL.Path) writeErrorResponse(w, r, MalformedPOSTRequest, r.URL.Path)
return return
} }
metadata, err := api.Filesystem.CreateObject(bucket, object, "", 0, fileBody, nil) metadata, err := api.Filesystem.CreateObject(bucket, object, "", -1, fileBody, nil)
if err != nil { if err != nil {
errorIf(err.Trace(), "CreateObject failed.", nil) errorIf(err.Trace(), "CreateObject failed.", nil)
switch err.ToGoError().(type) { switch err.ToGoError().(type) {
case fs.RootPathFull: case fs.RootPathFull:
writeErrorResponse(w, req, RootPathFull, req.URL.Path) writeErrorResponse(w, r, RootPathFull, r.URL.Path)
case fs.BucketNotFound: case fs.BucketNotFound:
writeErrorResponse(w, req, NoSuchBucket, req.URL.Path) writeErrorResponse(w, r, NoSuchBucket, r.URL.Path)
case fs.BucketNameInvalid: case fs.BucketNameInvalid:
writeErrorResponse(w, req, InvalidBucketName, req.URL.Path) writeErrorResponse(w, r, InvalidBucketName, r.URL.Path)
case fs.BadDigest: case fs.BadDigest:
writeErrorResponse(w, req, BadDigest, req.URL.Path) writeErrorResponse(w, r, BadDigest, r.URL.Path)
case v4.SigDoesNotMatch:
writeErrorResponse(w, req, SignatureDoesNotMatch, req.URL.Path)
case fs.IncompleteBody: case fs.IncompleteBody:
writeErrorResponse(w, req, IncompleteBody, req.URL.Path) writeErrorResponse(w, r, IncompleteBody, r.URL.Path)
case fs.EntityTooLarge:
writeErrorResponse(w, req, EntityTooLarge, req.URL.Path)
case fs.InvalidDigest: case fs.InvalidDigest:
writeErrorResponse(w, req, InvalidDigest, req.URL.Path) writeErrorResponse(w, r, InvalidDigest, r.URL.Path)
default: default:
writeErrorResponse(w, req, InternalError, req.URL.Path) writeErrorResponse(w, r, InternalError, r.URL.Path)
} }
return return
} }
@ -363,19 +403,24 @@ func (api CloudStorageAPI) PostPolicyBucketHandler(w http.ResponseWriter, req *h
// PutBucketACLHandler - PUT Bucket ACL // PutBucketACLHandler - PUT Bucket ACL
// ---------- // ----------
// This implementation of the PUT operation modifies the bucketACL for authenticated request // This implementation of the PUT operation modifies the bucketACL for authenticated request
func (api CloudStorageAPI) PutBucketACLHandler(w http.ResponseWriter, req *http.Request) { func (api CloudStorageAPI) PutBucketACLHandler(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(req) vars := mux.Vars(r)
bucket := vars["bucket"] bucket := vars["bucket"]
if isRequestRequiresACLCheck(req) { if isRequestRequiresACLCheck(r) {
writeErrorResponse(w, req, AccessDenied, req.URL.Path) writeErrorResponse(w, r, AccessDenied, r.URL.Path)
return
}
if !isSignV4ReqAuthenticated(api.Signature, r) {
writeErrorResponse(w, r, SignatureDoesNotMatch, r.URL.Path)
return return
} }
// read from 'x-amz-acl' // read from 'x-amz-acl'
aclType := getACLType(req) aclType := getACLType(r)
if aclType == unsupportedACLType { if aclType == unsupportedACLType {
writeErrorResponse(w, req, NotImplemented, req.URL.Path) writeErrorResponse(w, r, NotImplemented, r.URL.Path)
return return
} }
err := api.Filesystem.SetBucketMetadata(bucket, map[string]string{"acl": getACLTypeString(aclType)}) err := api.Filesystem.SetBucketMetadata(bucket, map[string]string{"acl": getACLTypeString(aclType)})
@ -383,11 +428,11 @@ func (api CloudStorageAPI) PutBucketACLHandler(w http.ResponseWriter, req *http.
errorIf(err.Trace(), "PutBucketACL failed.", nil) errorIf(err.Trace(), "PutBucketACL failed.", nil)
switch err.ToGoError().(type) { switch err.ToGoError().(type) {
case fs.BucketNameInvalid: case fs.BucketNameInvalid:
writeErrorResponse(w, req, InvalidBucketName, req.URL.Path) writeErrorResponse(w, r, InvalidBucketName, r.URL.Path)
case fs.BucketNotFound: case fs.BucketNotFound:
writeErrorResponse(w, req, NoSuchBucket, req.URL.Path) writeErrorResponse(w, r, NoSuchBucket, r.URL.Path)
default: default:
writeErrorResponse(w, req, InternalError, req.URL.Path) writeErrorResponse(w, r, InternalError, r.URL.Path)
} }
return return
} }
@ -400,12 +445,17 @@ func (api CloudStorageAPI) PutBucketACLHandler(w http.ResponseWriter, req *http.
// of a bucket. One must have permission to access the bucket to // of a bucket. One must have permission to access the bucket to
// know its ``acl``. This operation willl return response of 404 // know its ``acl``. This operation willl return response of 404
// if bucket not found and 403 for invalid credentials. // if bucket not found and 403 for invalid credentials.
func (api CloudStorageAPI) GetBucketACLHandler(w http.ResponseWriter, req *http.Request) { func (api CloudStorageAPI) GetBucketACLHandler(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(req) vars := mux.Vars(r)
bucket := vars["bucket"] bucket := vars["bucket"]
if isRequestRequiresACLCheck(req) { if isRequestRequiresACLCheck(r) {
writeErrorResponse(w, req, AccessDenied, req.URL.Path) writeErrorResponse(w, r, AccessDenied, r.URL.Path)
return
}
if !isSignV4ReqAuthenticated(api.Signature, r) {
writeErrorResponse(w, r, SignatureDoesNotMatch, r.URL.Path)
return return
} }
@ -414,11 +464,11 @@ func (api CloudStorageAPI) GetBucketACLHandler(w http.ResponseWriter, req *http.
errorIf(err.Trace(), "GetBucketMetadata failed.", nil) errorIf(err.Trace(), "GetBucketMetadata failed.", nil)
switch err.ToGoError().(type) { switch err.ToGoError().(type) {
case fs.BucketNotFound: case fs.BucketNotFound:
writeErrorResponse(w, req, NoSuchBucket, req.URL.Path) writeErrorResponse(w, r, NoSuchBucket, r.URL.Path)
case fs.BucketNameInvalid: case fs.BucketNameInvalid:
writeErrorResponse(w, req, InvalidBucketName, req.URL.Path) writeErrorResponse(w, r, InvalidBucketName, r.URL.Path)
default: default:
writeErrorResponse(w, req, InternalError, req.URL.Path) writeErrorResponse(w, r, InternalError, r.URL.Path)
} }
return return
} }
@ -437,27 +487,32 @@ func (api CloudStorageAPI) GetBucketACLHandler(w http.ResponseWriter, req *http.
// The operation returns a 200 OK if the bucket exists and you // The operation returns a 200 OK if the bucket exists and you
// have permission to access it. Otherwise, the operation might // have permission to access it. Otherwise, the operation might
// return responses such as 404 Not Found and 403 Forbidden. // return responses such as 404 Not Found and 403 Forbidden.
func (api CloudStorageAPI) HeadBucketHandler(w http.ResponseWriter, req *http.Request) { func (api CloudStorageAPI) HeadBucketHandler(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(req) vars := mux.Vars(r)
bucket := vars["bucket"] bucket := vars["bucket"]
if isRequestRequiresACLCheck(req) { if isRequestRequiresACLCheck(r) {
if api.Filesystem.IsPrivateBucket(bucket) { if api.Filesystem.IsPrivateBucket(bucket) {
writeErrorResponse(w, req, AccessDenied, req.URL.Path) writeErrorResponse(w, r, AccessDenied, r.URL.Path)
return return
} }
} }
if !isSignV4ReqAuthenticated(api.Signature, r) {
writeErrorResponse(w, r, SignatureDoesNotMatch, r.URL.Path)
return
}
_, err := api.Filesystem.GetBucketMetadata(bucket) _, err := api.Filesystem.GetBucketMetadata(bucket)
if err != nil { if err != nil {
errorIf(err.Trace(), "GetBucketMetadata failed.", nil) errorIf(err.Trace(), "GetBucketMetadata failed.", nil)
switch err.ToGoError().(type) { switch err.ToGoError().(type) {
case fs.BucketNotFound: case fs.BucketNotFound:
writeErrorResponse(w, req, NoSuchBucket, req.URL.Path) writeErrorResponse(w, r, NoSuchBucket, r.URL.Path)
case fs.BucketNameInvalid: case fs.BucketNameInvalid:
writeErrorResponse(w, req, InvalidBucketName, req.URL.Path) writeErrorResponse(w, r, InvalidBucketName, r.URL.Path)
default: default:
writeErrorResponse(w, req, InternalError, req.URL.Path) writeErrorResponse(w, r, InternalError, r.URL.Path)
} }
return return
} }
@ -465,12 +520,17 @@ func (api CloudStorageAPI) HeadBucketHandler(w http.ResponseWriter, req *http.Re
} }
// DeleteBucketHandler - Delete bucket // DeleteBucketHandler - Delete bucket
func (api CloudStorageAPI) DeleteBucketHandler(w http.ResponseWriter, req *http.Request) { func (api CloudStorageAPI) DeleteBucketHandler(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(req) vars := mux.Vars(r)
bucket := vars["bucket"] bucket := vars["bucket"]
if isRequestRequiresACLCheck(req) { if isRequestRequiresACLCheck(r) {
writeErrorResponse(w, req, AccessDenied, req.URL.Path) writeErrorResponse(w, r, AccessDenied, r.URL.Path)
return
}
if !isSignV4ReqAuthenticated(api.Signature, r) {
writeErrorResponse(w, r, SignatureDoesNotMatch, r.URL.Path)
return return
} }
@ -479,11 +539,11 @@ func (api CloudStorageAPI) DeleteBucketHandler(w http.ResponseWriter, req *http.
errorIf(err.Trace(), "DeleteBucket failed.", nil) errorIf(err.Trace(), "DeleteBucket failed.", nil)
switch err.ToGoError().(type) { switch err.ToGoError().(type) {
case fs.BucketNotFound: case fs.BucketNotFound:
writeErrorResponse(w, req, NoSuchBucket, req.URL.Path) writeErrorResponse(w, r, NoSuchBucket, r.URL.Path)
case fs.BucketNotEmpty: case fs.BucketNotEmpty:
writeErrorResponse(w, req, BucketNotEmpty, req.URL.Path) writeErrorResponse(w, r, BucketNotEmpty, r.URL.Path)
default: default:
writeErrorResponse(w, req, InternalError, req.URL.Path) writeErrorResponse(w, r, InternalError, r.URL.Path)
} }
return return
} }

View File

@ -26,6 +26,11 @@ import (
"github.com/rs/cors" "github.com/rs/cors"
) )
const (
iso8601Format = "20060102T150405Z"
privateBucket = "/minio"
)
// HandlerFunc - useful to chain different middleware http.Handler // HandlerFunc - useful to chain different middleware http.Handler
type HandlerFunc func(http.Handler) http.Handler type HandlerFunc func(http.Handler) http.Handler
@ -38,59 +43,81 @@ func registerHandlers(mux *router.Router, handlerFns ...HandlerFunc) http.Handle
return f return f
} }
type timeHandler struct { // Attempts to parse date string into known date layouts. Date layouts
handler http.Handler // currently supported are ``time.RFC1123``, ``time.RFC1123Z`` and
// special ``iso8601Format``.
func parseKnownLayouts(date string) (time.Time, error) {
parsedTime, e := time.Parse(time.RFC1123, date)
if e == nil {
return parsedTime, nil
}
parsedTime, e = time.Parse(time.RFC1123Z, date)
if e == nil {
return parsedTime, nil
}
parsedTime, e = time.Parse(iso8601Format, date)
if e == nil {
return parsedTime, nil
}
return time.Time{}, e
} }
type resourceHandler struct { // Parse date string from incoming header, current supports and verifies
handler http.Handler // follow HTTP headers.
} //
// - X-Amz-Date
type ignoreSignatureV2RequestHandler struct { // - X-Minio-Date
handler http.Handler // - Date
} //
// In following time layouts ``time.RFC1123``, ``time.RFC1123Z`` and ``iso8601Format``.
func parseDate(req *http.Request) (time.Time, error) { func parseDateHeader(req *http.Request) (time.Time, error) {
amzDate := req.Header.Get(http.CanonicalHeaderKey("x-amz-date")) amzDate := req.Header.Get(http.CanonicalHeaderKey("x-amz-date"))
switch { if amzDate != "" {
case amzDate != "": return parseKnownLayouts(amzDate)
if _, err := time.Parse(time.RFC1123, amzDate); err == nil {
return time.Parse(time.RFC1123, amzDate)
}
if _, err := time.Parse(time.RFC1123Z, amzDate); err == nil {
return time.Parse(time.RFC1123Z, amzDate)
}
if _, err := time.Parse(iso8601Format, amzDate); err == nil {
return time.Parse(iso8601Format, amzDate)
}
} }
minioDate := req.Header.Get(http.CanonicalHeaderKey("x-minio-date")) minioDate := req.Header.Get(http.CanonicalHeaderKey("x-minio-date"))
switch { if minioDate != "" {
case minioDate != "": return parseKnownLayouts(minioDate)
if _, err := time.Parse(time.RFC1123, minioDate); err == nil { }
return time.Parse(time.RFC1123, minioDate) genericDate := req.Header.Get("Date")
} if genericDate != "" {
if _, err := time.Parse(time.RFC1123Z, minioDate); err == nil { return parseKnownLayouts(genericDate)
return time.Parse(time.RFC1123Z, minioDate) }
} return time.Time{}, errors.New("Date header missing, invalid request.")
if _, err := time.Parse(iso8601Format, minioDate); err == nil { }
return time.Parse(iso8601Format, minioDate)
// Adds redirect rules for incoming requests.
type redirectHandler struct {
handler http.Handler
locationPrefix string
}
func setBrowserRedirectHandler(h http.Handler) http.Handler {
return redirectHandler{handler: h, locationPrefix: privateBucket}
}
func (h redirectHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
// Re-direction handled specifically for browsers.
if strings.Contains(r.Header.Get("User-Agent"), "Mozilla") {
switch r.URL.Path {
case "/":
// This could be the default route for browser, redirect
// to 'locationPrefix/'.
fallthrough
case "/rpc":
// This is '/rpc' API route for browser, redirect to
// 'locationPrefix/rpc'.
fallthrough
case "/login":
// This is '/login' route for browser, redirect to
// 'locationPrefix/login'.
location := h.locationPrefix + r.URL.Path
// Redirect to new location.
http.Redirect(w, r, location, http.StatusTemporaryRedirect)
return
} }
} }
date := req.Header.Get("Date") h.handler.ServeHTTP(w, r)
switch {
case date != "":
if _, err := time.Parse(time.RFC1123, date); err == nil {
return time.Parse(time.RFC1123, date)
}
if _, err := time.Parse(time.RFC1123Z, date); err == nil {
return time.Parse(time.RFC1123Z, date)
}
if _, err := time.Parse(iso8601Format, amzDate); err == nil {
return time.Parse(iso8601Format, amzDate)
}
}
return time.Time{}, errors.New("invalid request")
} }
// Adds Cache-Control header // Adds Cache-Control header
@ -98,18 +125,41 @@ type cacheControlHandler struct {
handler http.Handler handler http.Handler
} }
func setCacheControlHandler(h http.Handler) http.Handler { func setBrowserCacheControlHandler(h http.Handler) http.Handler {
return cacheControlHandler{h} return cacheControlHandler{h}
} }
func (h cacheControlHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { func (h cacheControlHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
if r.Method == "GET" { if r.Method == "GET" && strings.Contains(r.Header.Get("User-Agent"), "Mozilla") {
// expire the cache in one week // Expire cache in one hour for all browser requests.
w.Header().Set("Cache-Control", "public, max-age=604800") w.Header().Set("Cache-Control", "public, max-age=3600")
} }
h.handler.ServeHTTP(w, r) h.handler.ServeHTTP(w, r)
} }
// Adds verification for incoming paths.
type minioPrivateBucketHandler struct {
handler http.Handler
privateBucket string
}
func setPrivateBucketHandler(h http.Handler) http.Handler {
return minioPrivateBucketHandler{handler: h, privateBucket: privateBucket}
}
func (h minioPrivateBucketHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
// For all non browser requests, reject access to 'privateBucket'.
if !strings.Contains(r.Header.Get("User-Agent"), "Mozilla") && strings.HasPrefix(r.URL.Path, privateBucket) {
writeErrorResponse(w, r, AllAccessDisabled, r.URL.Path)
return
}
h.handler.ServeHTTP(w, r)
}
type timeHandler struct {
handler http.Handler
}
// setTimeValidityHandler to validate parsable time over http header // setTimeValidityHandler to validate parsable time over http header
func setTimeValidityHandler(h http.Handler) http.Handler { func setTimeValidityHandler(h http.Handler) http.Handler {
return timeHandler{h} return timeHandler{h}
@ -118,19 +168,18 @@ func setTimeValidityHandler(h http.Handler) http.Handler {
func (h timeHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { func (h timeHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
// Verify if date headers are set, if not reject the request // Verify if date headers are set, if not reject the request
if r.Header.Get("Authorization") != "" { if r.Header.Get("Authorization") != "" {
if r.Header.Get(http.CanonicalHeaderKey("x-amz-date")) == "" && r.Header.Get(http.CanonicalHeaderKey("x-minio-date")) == "" && r.Header.Get("Date") == "" { date, e := parseDateHeader(r)
// there is no way to knowing if this is a valid request, could be a attack reject such clients if e != nil {
writeErrorResponse(w, r, RequestTimeTooSkewed, r.URL.Path) // All our internal APIs are sensitive towards Date
return // header, for all requests where Date header is not
} // present we will reject such clients.
date, err := parseDate(r)
if err != nil {
// there is no way to knowing if this is a valid request, could be a attack reject such clients
writeErrorResponse(w, r, RequestTimeTooSkewed, r.URL.Path) writeErrorResponse(w, r, RequestTimeTooSkewed, r.URL.Path)
return return
} }
duration := time.Since(date) duration := time.Since(date)
minutes := time.Duration(5) * time.Minute minutes := time.Duration(5) * time.Minute
// Verify if the request date header is more than 5minutes
// late, reject such clients.
if duration.Minutes() > minutes.Minutes() { if duration.Minutes() > minutes.Minutes() {
writeErrorResponse(w, r, RequestTimeTooSkewed, r.URL.Path) writeErrorResponse(w, r, RequestTimeTooSkewed, r.URL.Path)
return return
@ -139,6 +188,10 @@ func (h timeHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
h.handler.ServeHTTP(w, r) h.handler.ServeHTTP(w, r)
} }
type resourceHandler struct {
handler http.Handler
}
// setCorsHandler handler for CORS (Cross Origin Resource Sharing) // setCorsHandler handler for CORS (Cross Origin Resource Sharing)
func setCorsHandler(h http.Handler) http.Handler { func setCorsHandler(h http.Handler) http.Handler {
c := cors.New(cors.Options{ c := cors.New(cors.Options{
@ -149,27 +202,10 @@ func setCorsHandler(h http.Handler) http.Handler {
return c.Handler(h) return c.Handler(h)
} }
// setIgnoreSignatureV2RequestHandler -
// Verify if authorization header has signature version '2', reject it cleanly.
func setIgnoreSignatureV2RequestHandler(h http.Handler) http.Handler {
return ignoreSignatureV2RequestHandler{h}
}
// Ignore signature version '2' ServerHTTP() wrapper.
func (h ignoreSignatureV2RequestHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
if _, ok := r.Header["Authorization"]; ok {
if !strings.HasPrefix(r.Header.Get("Authorization"), authHeaderPrefix) {
writeErrorResponse(w, r, SignatureVersionNotSupported, r.URL.Path)
return
}
}
h.handler.ServeHTTP(w, r)
}
// setIgnoreResourcesHandler - // setIgnoreResourcesHandler -
// Ignore resources handler is wrapper handler used for API request resource validation // Ignore resources handler is wrapper handler used for API request resource validation
// Since we do not support all the S3 queries, it is necessary for us to throw back a // Since we do not support all the S3 queries, it is necessary for us to throw back a
// valid error message indicating such a feature is not implemented. // valid error message indicating that requested feature is not implemented.
func setIgnoreResourcesHandler(h http.Handler) http.Handler { func setIgnoreResourcesHandler(h http.Handler) http.Handler {
return resourceHandler{h} return resourceHandler{h}
} }

View File

@ -1,57 +0,0 @@
/*
* Minio Cloud Storage, (C) 2016 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package main
import (
"fmt"
"net/http"
jwtgo "github.com/dgrijalva/jwt-go"
)
type jwtAuthHandler struct {
handler http.Handler
}
// setJWTAuthHandler -
// Verify if authorization header is of form JWT, reject it otherwise.
func setJWTAuthHandler(h http.Handler) http.Handler {
return jwtAuthHandler{h}
}
// Ignore request if authorization header is not valid.
func (h jwtAuthHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
// Let the top level caller handle if the requests should be
// allowed, if there are no Authorization headers.
if r.Header.Get("Authorization") == "" {
h.handler.ServeHTTP(w, r)
return
}
// Validate Authorization header to be valid.
jwt := InitJWT()
token, e := jwtgo.ParseFromRequest(r, func(token *jwtgo.Token) (interface{}, error) {
if _, ok := token.Method.(*jwtgo.SigningMethodHMAC); !ok {
return nil, fmt.Errorf("Unexpected signing method: %v", token.Header["alg"])
}
return jwt.secretAccessKey, nil
})
if e != nil || !token.Valid {
w.WriteHeader(http.StatusUnauthorized)
return
}
h.handler.ServeHTTP(w, r)
}

View File

@ -23,8 +23,6 @@ import (
"github.com/gorilla/mux" "github.com/gorilla/mux"
"github.com/minio/minio/pkg/fs" "github.com/minio/minio/pkg/fs"
"github.com/minio/minio/pkg/probe"
v4 "github.com/minio/minio/pkg/signature"
) )
const ( const (
@ -53,48 +51,53 @@ func setResponseHeaders(w http.ResponseWriter, reqParams url.Values) {
// ---------- // ----------
// This implementation of the GET operation retrieves object. To use GET, // This implementation of the GET operation retrieves object. To use GET,
// you must have READ access to the object. // you must have READ access to the object.
func (api CloudStorageAPI) GetObjectHandler(w http.ResponseWriter, req *http.Request) { func (api CloudStorageAPI) GetObjectHandler(w http.ResponseWriter, r *http.Request) {
var object, bucket string var object, bucket string
vars := mux.Vars(req) vars := mux.Vars(r)
bucket = vars["bucket"] bucket = vars["bucket"]
object = vars["object"] object = vars["object"]
if isRequestRequiresACLCheck(req) { if isRequestRequiresACLCheck(r) {
if api.Filesystem.IsPrivateBucket(bucket) { if api.Filesystem.IsPrivateBucket(bucket) {
writeErrorResponse(w, req, AccessDenied, req.URL.Path) writeErrorResponse(w, r, AccessDenied, r.URL.Path)
return return
} }
} }
if !isSignV4ReqAuthenticated(api.Signature, r) {
writeErrorResponse(w, r, SignatureDoesNotMatch, r.URL.Path)
return
}
metadata, err := api.Filesystem.GetObjectMetadata(bucket, object) metadata, err := api.Filesystem.GetObjectMetadata(bucket, object)
if err != nil { if err != nil {
errorIf(err.Trace(), "GetObject failed.", nil) errorIf(err.Trace(), "GetObject failed.", nil)
switch err.ToGoError().(type) { switch err.ToGoError().(type) {
case fs.BucketNameInvalid: case fs.BucketNameInvalid:
writeErrorResponse(w, req, InvalidBucketName, req.URL.Path) writeErrorResponse(w, r, InvalidBucketName, r.URL.Path)
case fs.BucketNotFound: case fs.BucketNotFound:
writeErrorResponse(w, req, NoSuchBucket, req.URL.Path) writeErrorResponse(w, r, NoSuchBucket, r.URL.Path)
case fs.ObjectNotFound: case fs.ObjectNotFound:
writeErrorResponse(w, req, NoSuchKey, req.URL.Path) writeErrorResponse(w, r, NoSuchKey, r.URL.Path)
case fs.ObjectNameInvalid: case fs.ObjectNameInvalid:
writeErrorResponse(w, req, NoSuchKey, req.URL.Path) writeErrorResponse(w, r, NoSuchKey, r.URL.Path)
default: default:
writeErrorResponse(w, req, InternalError, req.URL.Path) writeErrorResponse(w, r, InternalError, r.URL.Path)
} }
return return
} }
var hrange *httpRange var hrange *httpRange
hrange, err = getRequestedRange(req.Header.Get("Range"), metadata.Size) hrange, err = getRequestedRange(r.Header.Get("Range"), metadata.Size)
if err != nil { if err != nil {
writeErrorResponse(w, req, InvalidRange, req.URL.Path) writeErrorResponse(w, r, InvalidRange, r.URL.Path)
return return
} }
// Set standard object headers. // Set standard object headers.
setObjectHeaders(w, metadata, hrange) setObjectHeaders(w, metadata, hrange)
// Set any additional requested response headers. // Set any additional ruested response headers.
setResponseHeaders(w, req.URL.Query()) setResponseHeaders(w, r.URL.Query())
// Get the object. // Get the object.
if _, err = api.Filesystem.GetObject(w, bucket, object, hrange.start, hrange.length); err != nil { if _, err = api.Filesystem.GetObject(w, bucket, object, hrange.start, hrange.length); err != nil {
@ -106,32 +109,37 @@ func (api CloudStorageAPI) GetObjectHandler(w http.ResponseWriter, req *http.Req
// HeadObjectHandler - HEAD Object // HeadObjectHandler - HEAD Object
// ----------- // -----------
// The HEAD operation retrieves metadata from an object without returning the object itself. // The HEAD operation retrieves metadata from an object without returning the object itself.
func (api CloudStorageAPI) HeadObjectHandler(w http.ResponseWriter, req *http.Request) { func (api CloudStorageAPI) HeadObjectHandler(w http.ResponseWriter, r *http.Request) {
var object, bucket string var object, bucket string
vars := mux.Vars(req) vars := mux.Vars(r)
bucket = vars["bucket"] bucket = vars["bucket"]
object = vars["object"] object = vars["object"]
if isRequestRequiresACLCheck(req) { if isRequestRequiresACLCheck(r) {
if api.Filesystem.IsPrivateBucket(bucket) { if api.Filesystem.IsPrivateBucket(bucket) {
writeErrorResponse(w, req, AccessDenied, req.URL.Path) writeErrorResponse(w, r, AccessDenied, r.URL.Path)
return return
} }
} }
if !isSignV4ReqAuthenticated(api.Signature, r) {
writeErrorResponse(w, r, SignatureDoesNotMatch, r.URL.Path)
return
}
metadata, err := api.Filesystem.GetObjectMetadata(bucket, object) metadata, err := api.Filesystem.GetObjectMetadata(bucket, object)
if err != nil { if err != nil {
switch err.ToGoError().(type) { switch err.ToGoError().(type) {
case fs.BucketNameInvalid: case fs.BucketNameInvalid:
writeErrorResponse(w, req, InvalidBucketName, req.URL.Path) writeErrorResponse(w, r, InvalidBucketName, r.URL.Path)
case fs.BucketNotFound: case fs.BucketNotFound:
writeErrorResponse(w, req, NoSuchBucket, req.URL.Path) writeErrorResponse(w, r, NoSuchBucket, r.URL.Path)
case fs.ObjectNotFound: case fs.ObjectNotFound:
writeErrorResponse(w, req, NoSuchKey, req.URL.Path) writeErrorResponse(w, r, NoSuchKey, r.URL.Path)
case fs.ObjectNameInvalid: case fs.ObjectNameInvalid:
writeErrorResponse(w, req, NoSuchKey, req.URL.Path) writeErrorResponse(w, r, NoSuchKey, r.URL.Path)
default: default:
writeErrorResponse(w, req, InternalError, req.URL.Path) writeErrorResponse(w, r, InternalError, r.URL.Path)
} }
return return
} }
@ -142,86 +150,78 @@ func (api CloudStorageAPI) HeadObjectHandler(w http.ResponseWriter, req *http.Re
// PutObjectHandler - PUT Object // PutObjectHandler - PUT Object
// ---------- // ----------
// This implementation of the PUT operation adds an object to a bucket. // This implementation of the PUT operation adds an object to a bucket.
func (api CloudStorageAPI) PutObjectHandler(w http.ResponseWriter, req *http.Request) { func (api CloudStorageAPI) PutObjectHandler(w http.ResponseWriter, r *http.Request) {
var object, bucket string var object, bucket string
vars := mux.Vars(req) vars := mux.Vars(r)
bucket = vars["bucket"] bucket = vars["bucket"]
object = vars["object"] object = vars["object"]
if isRequestRequiresACLCheck(req) { if isRequestRequiresACLCheck(r) {
if api.Filesystem.IsPrivateBucket(bucket) || api.Filesystem.IsReadOnlyBucket(bucket) { if api.Filesystem.IsPrivateBucket(bucket) || api.Filesystem.IsReadOnlyBucket(bucket) {
writeErrorResponse(w, req, AccessDenied, req.URL.Path) writeErrorResponse(w, r, AccessDenied, r.URL.Path)
return return
} }
} }
// get Content-MD5 sent by client and verify if valid // get Content-MD5 sent by client and verify if valid
md5 := req.Header.Get("Content-MD5") md5 := r.Header.Get("Content-MD5")
if !isValidMD5(md5) { if !isValidMD5(md5) {
writeErrorResponse(w, req, InvalidDigest, req.URL.Path) writeErrorResponse(w, r, InvalidDigest, r.URL.Path)
return return
} }
/// if Content-Length is unknown/missing, deny the request /// if Content-Length is unknown/missing, deny the request
size := req.ContentLength size := r.ContentLength
if size == -1 { if size == -1 && !contains(r.TransferEncoding, "chunked") {
writeErrorResponse(w, req, MissingContentLength, req.URL.Path) writeErrorResponse(w, r, MissingContentLength, r.URL.Path)
return return
} }
/// maximum Upload size for objects in a single operation /// maximum Upload size for objects in a single operation
if isMaxObjectSize(size) { if isMaxObjectSize(size) {
writeErrorResponse(w, req, EntityTooLarge, req.URL.Path) writeErrorResponse(w, r, EntityTooLarge, r.URL.Path)
return return
} }
var signature *v4.Signature // Set http request for signature.
if isRequestSignatureV4(req) { auth := api.Signature.SetHTTPRequestToVerify(r)
// Init signature V4 verification
var err *probe.Error // For presigned requests verify them right here.
signature, err = initSignatureV4(req) if isRequestPresignedSignatureV4(r) {
ok, err := auth.DoesPresignedSignatureMatch()
if err != nil { if err != nil {
switch err.ToGoError() { errorIf(err.Trace(r.URL.String()), "Presigned signature verification failed.", nil)
case errInvalidRegion: writeErrorResponse(w, r, SignatureDoesNotMatch, r.URL.Path)
errorIf(err.Trace(), "Unknown region in authorization header.", nil) return
writeErrorResponse(w, req, AuthorizationHeaderMalformed, req.URL.Path)
return
case errAccessKeyIDInvalid:
errorIf(err.Trace(), "Invalid access key id.", nil)
writeErrorResponse(w, req, InvalidAccessKeyID, req.URL.Path)
return
default:
errorIf(err.Trace(), "Initializing signature v4 failed.", nil)
writeErrorResponse(w, req, InternalError, req.URL.Path)
return
}
} }
if !ok {
writeErrorResponse(w, r, SignatureDoesNotMatch, r.URL.Path)
return
}
auth = nil
} }
metadata, err := api.Filesystem.CreateObject(bucket, object, md5, size, req.Body, signature) // Create object.
metadata, err := api.Filesystem.CreateObject(bucket, object, md5, size, r.Body, auth)
if err != nil { if err != nil {
errorIf(err.Trace(), "CreateObject failed.", nil) errorIf(err.Trace(), "CreateObject failed.", nil)
switch err.ToGoError().(type) { switch err.ToGoError().(type) {
case fs.RootPathFull: case fs.RootPathFull:
writeErrorResponse(w, req, RootPathFull, req.URL.Path) writeErrorResponse(w, r, RootPathFull, r.URL.Path)
case fs.BucketNotFound: case fs.BucketNotFound:
writeErrorResponse(w, req, NoSuchBucket, req.URL.Path) writeErrorResponse(w, r, NoSuchBucket, r.URL.Path)
case fs.BucketNameInvalid: case fs.BucketNameInvalid:
writeErrorResponse(w, req, InvalidBucketName, req.URL.Path) writeErrorResponse(w, r, InvalidBucketName, r.URL.Path)
case fs.BadDigest: case fs.BadDigest:
writeErrorResponse(w, req, BadDigest, req.URL.Path) writeErrorResponse(w, r, BadDigest, r.URL.Path)
case fs.MissingDateHeader: case fs.SignDoesNotMatch:
writeErrorResponse(w, req, RequestTimeTooSkewed, req.URL.Path) writeErrorResponse(w, r, SignatureDoesNotMatch, r.URL.Path)
case v4.SigDoesNotMatch:
writeErrorResponse(w, req, SignatureDoesNotMatch, req.URL.Path)
case fs.IncompleteBody: case fs.IncompleteBody:
writeErrorResponse(w, req, IncompleteBody, req.URL.Path) writeErrorResponse(w, r, IncompleteBody, r.URL.Path)
case fs.EntityTooLarge:
writeErrorResponse(w, req, EntityTooLarge, req.URL.Path)
case fs.InvalidDigest: case fs.InvalidDigest:
writeErrorResponse(w, req, InvalidDigest, req.URL.Path) writeErrorResponse(w, r, InvalidDigest, r.URL.Path)
case fs.ObjectExistsAsPrefix: case fs.ObjectExistsAsPrefix:
writeErrorResponse(w, req, ObjectExistsAsPrefix, req.URL.Path) writeErrorResponse(w, r, ObjectExistsAsPrefix, r.URL.Path)
default: default:
writeErrorResponse(w, req, InternalError, req.URL.Path) writeErrorResponse(w, r, InternalError, r.URL.Path)
} }
return return
} }
@ -234,35 +234,40 @@ func (api CloudStorageAPI) PutObjectHandler(w http.ResponseWriter, req *http.Req
/// Multipart CloudStorageAPI /// Multipart CloudStorageAPI
// NewMultipartUploadHandler - New multipart upload // NewMultipartUploadHandler - New multipart upload
func (api CloudStorageAPI) NewMultipartUploadHandler(w http.ResponseWriter, req *http.Request) { func (api CloudStorageAPI) NewMultipartUploadHandler(w http.ResponseWriter, r *http.Request) {
var object, bucket string var object, bucket string
vars := mux.Vars(req) vars := mux.Vars(r)
bucket = vars["bucket"] bucket = vars["bucket"]
object = vars["object"] object = vars["object"]
if isRequestRequiresACLCheck(req) { if isRequestRequiresACLCheck(r) {
if api.Filesystem.IsPrivateBucket(bucket) || api.Filesystem.IsReadOnlyBucket(bucket) { if api.Filesystem.IsPrivateBucket(bucket) || api.Filesystem.IsReadOnlyBucket(bucket) {
writeErrorResponse(w, req, AccessDenied, req.URL.Path) writeErrorResponse(w, r, AccessDenied, r.URL.Path)
return return
} }
} }
if !isSignV4ReqAuthenticated(api.Signature, r) {
writeErrorResponse(w, r, SignatureDoesNotMatch, r.URL.Path)
return
}
uploadID, err := api.Filesystem.NewMultipartUpload(bucket, object) uploadID, err := api.Filesystem.NewMultipartUpload(bucket, object)
if err != nil { if err != nil {
errorIf(err.Trace(), "NewMultipartUpload failed.", nil) errorIf(err.Trace(), "NewMultipartUpload failed.", nil)
switch err.ToGoError().(type) { switch err.ToGoError().(type) {
case fs.RootPathFull: case fs.RootPathFull:
writeErrorResponse(w, req, RootPathFull, req.URL.Path) writeErrorResponse(w, r, RootPathFull, r.URL.Path)
case fs.BucketNameInvalid: case fs.BucketNameInvalid:
writeErrorResponse(w, req, InvalidBucketName, req.URL.Path) writeErrorResponse(w, r, InvalidBucketName, r.URL.Path)
case fs.BucketNotFound: case fs.BucketNotFound:
writeErrorResponse(w, req, NoSuchBucket, req.URL.Path) writeErrorResponse(w, r, NoSuchBucket, r.URL.Path)
case fs.ObjectNotFound: case fs.ObjectNotFound:
writeErrorResponse(w, req, NoSuchKey, req.URL.Path) writeErrorResponse(w, r, NoSuchKey, r.URL.Path)
case fs.ObjectNameInvalid: case fs.ObjectNameInvalid:
writeErrorResponse(w, req, NoSuchKey, req.URL.Path) writeErrorResponse(w, r, NoSuchKey, r.URL.Path)
default: default:
writeErrorResponse(w, req, InternalError, req.URL.Path) writeErrorResponse(w, r, InternalError, r.URL.Path)
} }
return return
} }
@ -276,94 +281,88 @@ func (api CloudStorageAPI) NewMultipartUploadHandler(w http.ResponseWriter, req
} }
// PutObjectPartHandler - Upload part // PutObjectPartHandler - Upload part
func (api CloudStorageAPI) PutObjectPartHandler(w http.ResponseWriter, req *http.Request) { func (api CloudStorageAPI) PutObjectPartHandler(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(req) vars := mux.Vars(r)
bucket := vars["bucket"] bucket := vars["bucket"]
object := vars["object"] object := vars["object"]
if isRequestRequiresACLCheck(req) { if isRequestRequiresACLCheck(r) {
if api.Filesystem.IsPrivateBucket(bucket) || api.Filesystem.IsReadOnlyBucket(bucket) { if api.Filesystem.IsPrivateBucket(bucket) || api.Filesystem.IsReadOnlyBucket(bucket) {
writeErrorResponse(w, req, AccessDenied, req.URL.Path) writeErrorResponse(w, r, AccessDenied, r.URL.Path)
return return
} }
} }
// get Content-MD5 sent by client and verify if valid // get Content-MD5 sent by client and verify if valid
md5 := req.Header.Get("Content-MD5") md5 := r.Header.Get("Content-MD5")
if !isValidMD5(md5) { if !isValidMD5(md5) {
writeErrorResponse(w, req, InvalidDigest, req.URL.Path) writeErrorResponse(w, r, InvalidDigest, r.URL.Path)
return return
} }
/// if Content-Length is unknown/missing, throw away /// if Content-Length is unknown/missing, throw away
size := req.ContentLength size := r.ContentLength
if size == -1 { if size == -1 {
writeErrorResponse(w, req, MissingContentLength, req.URL.Path) writeErrorResponse(w, r, MissingContentLength, r.URL.Path)
return return
} }
/// maximum Upload size for multipart objects in a single operation /// maximum Upload size for multipart objects in a single operation
if isMaxObjectSize(size) { if isMaxObjectSize(size) {
writeErrorResponse(w, req, EntityTooLarge, req.URL.Path) writeErrorResponse(w, r, EntityTooLarge, r.URL.Path)
return return
} }
uploadID := req.URL.Query().Get("uploadId") uploadID := r.URL.Query().Get("uploadId")
partIDString := req.URL.Query().Get("partNumber") partIDString := r.URL.Query().Get("partNumber")
var partID int var partID int
{ {
var err error var err error
partID, err = strconv.Atoi(partIDString) partID, err = strconv.Atoi(partIDString)
if err != nil { if err != nil {
writeErrorResponse(w, req, InvalidPart, req.URL.Path) writeErrorResponse(w, r, InvalidPart, r.URL.Path)
return return
} }
} }
var signature *v4.Signature // Set http request for signature.
if isRequestSignatureV4(req) { auth := api.Signature.SetHTTPRequestToVerify(r)
// Init signature V4 verification // For presigned requests verify right here.
var err *probe.Error if isRequestPresignedSignatureV4(r) {
signature, err = initSignatureV4(req) ok, err := auth.DoesPresignedSignatureMatch()
if err != nil { if err != nil {
switch err.ToGoError() { errorIf(err.Trace(r.URL.String()), "Presigned signature verification failed.", nil)
case errInvalidRegion: writeErrorResponse(w, r, SignatureDoesNotMatch, r.URL.Path)
errorIf(err.Trace(), "Unknown region in authorization header.", nil) return
writeErrorResponse(w, req, AuthorizationHeaderMalformed, req.URL.Path)
return
case errAccessKeyIDInvalid:
errorIf(err.Trace(), "Invalid access key id.", nil)
writeErrorResponse(w, req, InvalidAccessKeyID, req.URL.Path)
return
default:
errorIf(err.Trace(), "Initializing signature v4 failed.", nil)
writeErrorResponse(w, req, InternalError, req.URL.Path)
return
}
} }
if !ok {
writeErrorResponse(w, r, SignatureDoesNotMatch, r.URL.Path)
return
}
// Signature verified, set this to nil payload verification
// not necessary.
auth = nil
} }
calculatedMD5, err := api.Filesystem.CreateObjectPart(bucket, object, uploadID, md5, partID, size, req.Body, signature) calculatedMD5, err := api.Filesystem.CreateObjectPart(bucket, object, uploadID, md5, partID, size, r.Body, auth)
if err != nil { if err != nil {
errorIf(err.Trace(), "CreateObjectPart failed.", nil) errorIf(err.Trace(), "CreateObjectPart failed.", nil)
switch err.ToGoError().(type) { switch err.ToGoError().(type) {
case fs.RootPathFull: case fs.RootPathFull:
writeErrorResponse(w, req, RootPathFull, req.URL.Path) writeErrorResponse(w, r, RootPathFull, r.URL.Path)
case fs.InvalidUploadID: case fs.InvalidUploadID:
writeErrorResponse(w, req, NoSuchUpload, req.URL.Path) writeErrorResponse(w, r, NoSuchUpload, r.URL.Path)
case fs.BadDigest: case fs.BadDigest:
writeErrorResponse(w, req, BadDigest, req.URL.Path) writeErrorResponse(w, r, BadDigest, r.URL.Path)
case v4.SigDoesNotMatch: case fs.SignDoesNotMatch:
writeErrorResponse(w, req, SignatureDoesNotMatch, req.URL.Path) writeErrorResponse(w, r, SignatureDoesNotMatch, r.URL.Path)
case fs.IncompleteBody: case fs.IncompleteBody:
writeErrorResponse(w, req, IncompleteBody, req.URL.Path) writeErrorResponse(w, r, IncompleteBody, r.URL.Path)
case fs.EntityTooLarge:
writeErrorResponse(w, req, EntityTooLarge, req.URL.Path)
case fs.InvalidDigest: case fs.InvalidDigest:
writeErrorResponse(w, req, InvalidDigest, req.URL.Path) writeErrorResponse(w, r, InvalidDigest, r.URL.Path)
default: default:
writeErrorResponse(w, req, InternalError, req.URL.Path) writeErrorResponse(w, r, InternalError, r.URL.Path)
} }
return return
} }
@ -374,35 +373,40 @@ func (api CloudStorageAPI) PutObjectPartHandler(w http.ResponseWriter, req *http
} }
// AbortMultipartUploadHandler - Abort multipart upload // AbortMultipartUploadHandler - Abort multipart upload
func (api CloudStorageAPI) AbortMultipartUploadHandler(w http.ResponseWriter, req *http.Request) { func (api CloudStorageAPI) AbortMultipartUploadHandler(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(req) vars := mux.Vars(r)
bucket := vars["bucket"] bucket := vars["bucket"]
object := vars["object"] object := vars["object"]
if isRequestRequiresACLCheck(req) { if isRequestRequiresACLCheck(r) {
if api.Filesystem.IsPrivateBucket(bucket) || api.Filesystem.IsReadOnlyBucket(bucket) { if api.Filesystem.IsPrivateBucket(bucket) || api.Filesystem.IsReadOnlyBucket(bucket) {
writeErrorResponse(w, req, AccessDenied, req.URL.Path) writeErrorResponse(w, r, AccessDenied, r.URL.Path)
return return
} }
} }
objectResourcesMetadata := getObjectResources(req.URL.Query()) if !isSignV4ReqAuthenticated(api.Signature, r) {
writeErrorResponse(w, r, SignatureDoesNotMatch, r.URL.Path)
return
}
objectResourcesMetadata := getObjectResources(r.URL.Query())
err := api.Filesystem.AbortMultipartUpload(bucket, object, objectResourcesMetadata.UploadID) err := api.Filesystem.AbortMultipartUpload(bucket, object, objectResourcesMetadata.UploadID)
if err != nil { if err != nil {
errorIf(err.Trace(), "AbortMutlipartUpload failed.", nil) errorIf(err.Trace(), "AbortMutlipartUpload failed.", nil)
switch err.ToGoError().(type) { switch err.ToGoError().(type) {
case fs.BucketNameInvalid: case fs.BucketNameInvalid:
writeErrorResponse(w, req, InvalidBucketName, req.URL.Path) writeErrorResponse(w, r, InvalidBucketName, r.URL.Path)
case fs.BucketNotFound: case fs.BucketNotFound:
writeErrorResponse(w, req, NoSuchBucket, req.URL.Path) writeErrorResponse(w, r, NoSuchBucket, r.URL.Path)
case fs.ObjectNotFound: case fs.ObjectNotFound:
writeErrorResponse(w, req, NoSuchKey, req.URL.Path) writeErrorResponse(w, r, NoSuchKey, r.URL.Path)
case fs.ObjectNameInvalid: case fs.ObjectNameInvalid:
writeErrorResponse(w, req, NoSuchKey, req.URL.Path) writeErrorResponse(w, r, NoSuchKey, r.URL.Path)
case fs.InvalidUploadID: case fs.InvalidUploadID:
writeErrorResponse(w, req, NoSuchUpload, req.URL.Path) writeErrorResponse(w, r, NoSuchUpload, r.URL.Path)
default: default:
writeErrorResponse(w, req, InternalError, req.URL.Path) writeErrorResponse(w, r, InternalError, r.URL.Path)
} }
return return
} }
@ -410,25 +414,30 @@ func (api CloudStorageAPI) AbortMultipartUploadHandler(w http.ResponseWriter, re
} }
// ListObjectPartsHandler - List object parts // ListObjectPartsHandler - List object parts
func (api CloudStorageAPI) ListObjectPartsHandler(w http.ResponseWriter, req *http.Request) { func (api CloudStorageAPI) ListObjectPartsHandler(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(req) vars := mux.Vars(r)
bucket := vars["bucket"] bucket := vars["bucket"]
object := vars["object"] object := vars["object"]
if isRequestRequiresACLCheck(req) { if isRequestRequiresACLCheck(r) {
if api.Filesystem.IsPrivateBucket(bucket) || api.Filesystem.IsReadOnlyBucket(bucket) { if api.Filesystem.IsPrivateBucket(bucket) || api.Filesystem.IsReadOnlyBucket(bucket) {
writeErrorResponse(w, req, AccessDenied, req.URL.Path) writeErrorResponse(w, r, AccessDenied, r.URL.Path)
return return
} }
} }
objectResourcesMetadata := getObjectResources(req.URL.Query()) if !isSignV4ReqAuthenticated(api.Signature, r) {
writeErrorResponse(w, r, SignatureDoesNotMatch, r.URL.Path)
return
}
objectResourcesMetadata := getObjectResources(r.URL.Query())
if objectResourcesMetadata.PartNumberMarker < 0 { if objectResourcesMetadata.PartNumberMarker < 0 {
writeErrorResponse(w, req, InvalidPartNumberMarker, req.URL.Path) writeErrorResponse(w, r, InvalidPartNumberMarker, r.URL.Path)
return return
} }
if objectResourcesMetadata.MaxParts < 0 { if objectResourcesMetadata.MaxParts < 0 {
writeErrorResponse(w, req, InvalidMaxParts, req.URL.Path) writeErrorResponse(w, r, InvalidMaxParts, r.URL.Path)
return return
} }
if objectResourcesMetadata.MaxParts == 0 { if objectResourcesMetadata.MaxParts == 0 {
@ -440,17 +449,17 @@ func (api CloudStorageAPI) ListObjectPartsHandler(w http.ResponseWriter, req *ht
errorIf(err.Trace(), "ListObjectParts failed.", nil) errorIf(err.Trace(), "ListObjectParts failed.", nil)
switch err.ToGoError().(type) { switch err.ToGoError().(type) {
case fs.BucketNameInvalid: case fs.BucketNameInvalid:
writeErrorResponse(w, req, InvalidBucketName, req.URL.Path) writeErrorResponse(w, r, InvalidBucketName, r.URL.Path)
case fs.BucketNotFound: case fs.BucketNotFound:
writeErrorResponse(w, req, NoSuchBucket, req.URL.Path) writeErrorResponse(w, r, NoSuchBucket, r.URL.Path)
case fs.ObjectNotFound: case fs.ObjectNotFound:
writeErrorResponse(w, req, NoSuchKey, req.URL.Path) writeErrorResponse(w, r, NoSuchKey, r.URL.Path)
case fs.ObjectNameInvalid: case fs.ObjectNameInvalid:
writeErrorResponse(w, req, NoSuchKey, req.URL.Path) writeErrorResponse(w, r, NoSuchKey, r.URL.Path)
case fs.InvalidUploadID: case fs.InvalidUploadID:
writeErrorResponse(w, req, NoSuchUpload, req.URL.Path) writeErrorResponse(w, r, NoSuchUpload, r.URL.Path)
default: default:
writeErrorResponse(w, req, InternalError, req.URL.Path) writeErrorResponse(w, r, InternalError, r.URL.Path)
} }
return return
} }
@ -463,72 +472,69 @@ func (api CloudStorageAPI) ListObjectPartsHandler(w http.ResponseWriter, req *ht
} }
// CompleteMultipartUploadHandler - Complete multipart upload // CompleteMultipartUploadHandler - Complete multipart upload
func (api CloudStorageAPI) CompleteMultipartUploadHandler(w http.ResponseWriter, req *http.Request) { func (api CloudStorageAPI) CompleteMultipartUploadHandler(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(req) vars := mux.Vars(r)
bucket := vars["bucket"] bucket := vars["bucket"]
object := vars["object"] object := vars["object"]
if isRequestRequiresACLCheck(req) { if isRequestRequiresACLCheck(r) {
if api.Filesystem.IsPrivateBucket(bucket) || api.Filesystem.IsReadOnlyBucket(bucket) { if api.Filesystem.IsPrivateBucket(bucket) || api.Filesystem.IsReadOnlyBucket(bucket) {
writeErrorResponse(w, req, AccessDenied, req.URL.Path) writeErrorResponse(w, r, AccessDenied, r.URL.Path)
return return
} }
} }
objectResourcesMetadata := getObjectResources(req.URL.Query()) // Set http request for signature.
var signature *v4.Signature auth := api.Signature.SetHTTPRequestToVerify(r)
if isRequestSignatureV4(req) { // For presigned requests verify right here.
// Init signature V4 verification if isRequestPresignedSignatureV4(r) {
var err *probe.Error ok, err := auth.DoesPresignedSignatureMatch()
signature, err = initSignatureV4(req)
if err != nil { if err != nil {
switch err.ToGoError() { errorIf(err.Trace(r.URL.String()), "Presigned signature verification failed.", nil)
case errInvalidRegion: writeErrorResponse(w, r, SignatureDoesNotMatch, r.URL.Path)
errorIf(err.Trace(), "Unknown region in authorization header.", nil) return
writeErrorResponse(w, req, AuthorizationHeaderMalformed, req.URL.Path)
return
case errAccessKeyIDInvalid:
errorIf(err.Trace(), "Invalid access key id.", nil)
writeErrorResponse(w, req, InvalidAccessKeyID, req.URL.Path)
return
default:
errorIf(err.Trace(), "Initializing signature v4 failed.", nil)
writeErrorResponse(w, req, InternalError, req.URL.Path)
return
}
} }
if !ok {
writeErrorResponse(w, r, SignatureDoesNotMatch, r.URL.Path)
return
}
auth = nil
} }
metadata, err := api.Filesystem.CompleteMultipartUpload(bucket, object, objectResourcesMetadata.UploadID, req.Body, signature) // Extract object resources.
objectResourcesMetadata := getObjectResources(r.URL.Query())
// Complete multipart upload.
metadata, err := api.Filesystem.CompleteMultipartUpload(bucket, object, objectResourcesMetadata.UploadID, r.Body, api.Signature)
if err != nil { if err != nil {
errorIf(err.Trace(), "CompleteMultipartUpload failed.", nil) errorIf(err.Trace(), "CompleteMultipartUpload failed.", nil)
switch err.ToGoError().(type) { switch err.ToGoError().(type) {
case fs.BucketNameInvalid: case fs.BucketNameInvalid:
writeErrorResponse(w, req, InvalidBucketName, req.URL.Path) writeErrorResponse(w, r, InvalidBucketName, r.URL.Path)
case fs.BucketNotFound: case fs.BucketNotFound:
writeErrorResponse(w, req, NoSuchBucket, req.URL.Path) writeErrorResponse(w, r, NoSuchBucket, r.URL.Path)
case fs.ObjectNotFound: case fs.ObjectNotFound:
writeErrorResponse(w, req, NoSuchKey, req.URL.Path) writeErrorResponse(w, r, NoSuchKey, r.URL.Path)
case fs.ObjectNameInvalid: case fs.ObjectNameInvalid:
writeErrorResponse(w, req, NoSuchKey, req.URL.Path) writeErrorResponse(w, r, NoSuchKey, r.URL.Path)
case fs.InvalidUploadID: case fs.InvalidUploadID:
writeErrorResponse(w, req, NoSuchUpload, req.URL.Path) writeErrorResponse(w, r, NoSuchUpload, r.URL.Path)
case fs.InvalidPart: case fs.InvalidPart:
writeErrorResponse(w, req, InvalidPart, req.URL.Path) writeErrorResponse(w, r, InvalidPart, r.URL.Path)
case fs.InvalidPartOrder: case fs.InvalidPartOrder:
writeErrorResponse(w, req, InvalidPartOrder, req.URL.Path) writeErrorResponse(w, r, InvalidPartOrder, r.URL.Path)
case v4.SigDoesNotMatch: case fs.SignDoesNotMatch:
writeErrorResponse(w, req, SignatureDoesNotMatch, req.URL.Path) writeErrorResponse(w, r, SignatureDoesNotMatch, r.URL.Path)
case fs.IncompleteBody: case fs.IncompleteBody:
writeErrorResponse(w, req, IncompleteBody, req.URL.Path) writeErrorResponse(w, r, IncompleteBody, r.URL.Path)
case fs.MalformedXML: case fs.MalformedXML:
writeErrorResponse(w, req, MalformedXML, req.URL.Path) writeErrorResponse(w, r, MalformedXML, r.URL.Path)
default: default:
writeErrorResponse(w, req, InternalError, req.URL.Path) writeErrorResponse(w, r, InternalError, r.URL.Path)
} }
return return
} }
response := generateCompleteMultpartUploadResponse(bucket, object, req.URL.String(), metadata.MD5) response := generateCompleteMultpartUploadResponse(bucket, object, r.URL.String(), metadata.MD5)
encodedSuccessResponse := encodeSuccessResponse(response) encodedSuccessResponse := encodeSuccessResponse(response)
// write headers // write headers
setCommonHeaders(w) setCommonHeaders(w)
@ -539,32 +545,37 @@ func (api CloudStorageAPI) CompleteMultipartUploadHandler(w http.ResponseWriter,
/// Delete CloudStorageAPI /// Delete CloudStorageAPI
// DeleteObjectHandler - Delete object // DeleteObjectHandler - Delete object
func (api CloudStorageAPI) DeleteObjectHandler(w http.ResponseWriter, req *http.Request) { func (api CloudStorageAPI) DeleteObjectHandler(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(req) vars := mux.Vars(r)
bucket := vars["bucket"] bucket := vars["bucket"]
object := vars["object"] object := vars["object"]
if isRequestRequiresACLCheck(req) { if isRequestRequiresACLCheck(r) {
if api.Filesystem.IsPrivateBucket(bucket) || api.Filesystem.IsReadOnlyBucket(bucket) { if api.Filesystem.IsPrivateBucket(bucket) || api.Filesystem.IsReadOnlyBucket(bucket) {
writeErrorResponse(w, req, AccessDenied, req.URL.Path) writeErrorResponse(w, r, AccessDenied, r.URL.Path)
return return
} }
} }
if !isSignV4ReqAuthenticated(api.Signature, r) {
writeErrorResponse(w, r, SignatureDoesNotMatch, r.URL.Path)
return
}
err := api.Filesystem.DeleteObject(bucket, object) err := api.Filesystem.DeleteObject(bucket, object)
if err != nil { if err != nil {
errorIf(err.Trace(), "DeleteObject failed.", nil) errorIf(err.Trace(), "DeleteObject failed.", nil)
switch err.ToGoError().(type) { switch err.ToGoError().(type) {
case fs.BucketNameInvalid: case fs.BucketNameInvalid:
writeErrorResponse(w, req, InvalidBucketName, req.URL.Path) writeErrorResponse(w, r, InvalidBucketName, r.URL.Path)
case fs.BucketNotFound: case fs.BucketNotFound:
writeErrorResponse(w, req, NoSuchBucket, req.URL.Path) writeErrorResponse(w, r, NoSuchBucket, r.URL.Path)
case fs.ObjectNotFound: case fs.ObjectNotFound:
writeErrorResponse(w, req, NoSuchKey, req.URL.Path) writeErrorResponse(w, r, NoSuchKey, r.URL.Path)
case fs.ObjectNameInvalid: case fs.ObjectNameInvalid:
writeErrorResponse(w, req, NoSuchKey, req.URL.Path) writeErrorResponse(w, r, NoSuchKey, r.URL.Path)
default: default:
writeErrorResponse(w, req, InternalError, req.URL.Path) writeErrorResponse(w, r, InternalError, r.URL.Path)
} }
} }
writeSuccessNoContent(w) writeSuccessNoContent(w)

View File

@ -23,7 +23,6 @@ import (
"io/ioutil" "io/ioutil"
"os" "os"
"path/filepath" "path/filepath"
"strings"
) )
// File container provided for atomic file writes // File container provided for atomic file writes
@ -82,7 +81,6 @@ func FileCreateWithPrefix(filePath string, prefix string) (*File, error) {
if err := os.MkdirAll(filepath.Dir(filePath), 0700); err != nil { if err := os.MkdirAll(filepath.Dir(filePath), 0700); err != nil {
return nil, err return nil, err
} }
prefix = strings.TrimSpace(prefix)
f, err := ioutil.TempFile(filepath.Dir(filePath), prefix+filepath.Base(filePath)) f, err := ioutil.TempFile(filepath.Dir(filePath), prefix+filepath.Base(filePath))
if err != nil { if err != nil {
return nil, err return nil, err

View File

@ -19,7 +19,6 @@ package fs
import ( import (
"os" "os"
"regexp" "regexp"
"strings"
"time" "time"
"unicode/utf8" "unicode/utf8"
) )
@ -167,7 +166,7 @@ var validBucket = regexp.MustCompile(`^[a-z0-9][a-z0-9\.\-]{1,61}[a-z0-9]$`)
// IsValidBucketName - verify bucket name in accordance with // IsValidBucketName - verify bucket name in accordance with
// - http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html // - http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html
func IsValidBucketName(bucket string) bool { func IsValidBucketName(bucket string) bool {
if strings.TrimSpace(bucket) == "" { if bucket == "" {
return false return false
} }
if len(bucket) < 3 || len(bucket) > 63 { if len(bucket) < 3 || len(bucket) > 63 {
@ -182,7 +181,7 @@ func IsValidBucketName(bucket string) bool {
// IsValidObjectName - verify object name in accordance with // IsValidObjectName - verify object name in accordance with
// - http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html // - http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html
func IsValidObjectName(object string) bool { func IsValidObjectName(object string) bool {
if strings.TrimSpace(object) == "" { if object == "" {
return true return true
} }
if len(object) > 1024 || len(object) == 0 { if len(object) > 1024 || len(object) == 0 {

View File

@ -18,25 +18,11 @@ package fs
import "fmt" import "fmt"
// MissingDateHeader date header missing // SignDoesNotMatch - signature does not match.
type MissingDateHeader struct{} type SignDoesNotMatch struct{}
func (e MissingDateHeader) Error() string { func (e SignDoesNotMatch) Error() string {
return "Missing date header" return "Signature does not match."
}
// MissingExpiresQuery expires query string missing
type MissingExpiresQuery struct{}
func (e MissingExpiresQuery) Error() string {
return "Missing expires query string"
}
// ExpiredPresignedRequest request already expired
type ExpiredPresignedRequest struct{}
func (e ExpiredPresignedRequest) Error() string {
return "Presigned request already expired"
} }
// InvalidArgument invalid argument // InvalidArgument invalid argument
@ -156,30 +142,8 @@ func (e BadDigest) Error() string {
return "Bad digest" return "Bad digest"
} }
// ParityOverflow parity over flow
type ParityOverflow struct{}
func (e ParityOverflow) Error() string {
return "Parity overflow"
}
// ChecksumMismatch checksum mismatch
type ChecksumMismatch struct{}
func (e ChecksumMismatch) Error() string {
return "Checksum mismatch"
}
// MissingPOSTPolicy missing post policy
type MissingPOSTPolicy struct{}
func (e MissingPOSTPolicy) Error() string {
return "Missing POST policy in multipart form"
}
// InternalError - generic internal error // InternalError - generic internal error
type InternalError struct { type InternalError struct{}
}
// BackendError - generic disk backend error // BackendError - generic disk backend error
type BackendError struct { type BackendError struct {
@ -237,13 +201,6 @@ type BucketNameInvalid GenericBucketError
/// Object related errors /// Object related errors
// EntityTooLarge - object size exceeds maximum limit
type EntityTooLarge struct {
GenericObjectError
Size string
MaxSize string
}
// ObjectNameInvalid - object name provided is invalid // ObjectNameInvalid - object name provided is invalid
type ObjectNameInvalid GenericObjectError type ObjectNameInvalid GenericObjectError
@ -292,11 +249,6 @@ func (e ObjectNameInvalid) Error() string {
return "Object name invalid: " + e.Bucket + "#" + e.Object return "Object name invalid: " + e.Bucket + "#" + e.Object
} }
// Return string an error formatted as the given text
func (e EntityTooLarge) Error() string {
return e.Bucket + "#" + e.Object + "with " + e.Size + "reached maximum allowed size limit " + e.MaxSize
}
// IncompleteBody You did not provide the number of bytes specified by the Content-Length HTTP header // IncompleteBody You did not provide the number of bytes specified by the Content-Length HTTP header
type IncompleteBody GenericObjectError type IncompleteBody GenericObjectError

View File

@ -68,9 +68,11 @@ func (fs Filesystem) listObjects(bucket, prefix, marker, delimiter string, maxKe
// Bucket path prefix should always end with a separator. // Bucket path prefix should always end with a separator.
bucketPathPrefix := bucketPath + string(os.PathSeparator) bucketPathPrefix := bucketPath + string(os.PathSeparator)
prefixPath := bucketPathPrefix + prefix prefixPath := bucketPathPrefix + prefix
st, err := os.Stat(prefixPath) st, e := os.Stat(prefixPath)
if err != nil && os.IsNotExist(err) { if e != nil {
walkPath = bucketPath if os.IsNotExist(e) {
walkPath = bucketPath
}
} else { } else {
if st.IsDir() && !strings.HasSuffix(prefix, delimiter) { if st.IsDir() && !strings.HasSuffix(prefix, delimiter) {
walkPath = bucketPath walkPath = bucketPath

View File

@ -152,7 +152,7 @@ func (fs Filesystem) MakeBucket(bucket, acl string) *probe.Error {
} }
return probe.NewError(e) return probe.NewError(e)
} }
if strings.TrimSpace(acl) == "" { if acl == "" {
acl = "private" acl = "private"
} }
@ -232,7 +232,7 @@ func (fs Filesystem) SetBucketMetadata(bucket string, metadata map[string]string
if !IsValidBucketACL(acl) { if !IsValidBucketACL(acl) {
return probe.NewError(InvalidACL{ACL: acl}) return probe.NewError(InvalidACL{ACL: acl})
} }
if strings.TrimSpace(acl) == "" { if acl == "" {
acl = "private" acl = "private"
} }
bucket = fs.denormalizeBucket(bucket) bucket = fs.denormalizeBucket(bucket)

View File

@ -174,7 +174,15 @@ func saveParts(partPathPrefix string, mw io.Writer, parts []CompletePart) *probe
md5Sum = strings.TrimSuffix(md5Sum, "\"") md5Sum = strings.TrimSuffix(md5Sum, "\"")
partFile, e := os.OpenFile(partPathPrefix+md5Sum+fmt.Sprintf("$%d-$multiparts", part.PartNumber), os.O_RDONLY, 0600) partFile, e := os.OpenFile(partPathPrefix+md5Sum+fmt.Sprintf("$%d-$multiparts", part.PartNumber), os.O_RDONLY, 0600)
if e != nil { if e != nil {
return probe.NewError(e) if !os.IsNotExist(e) {
return probe.NewError(e)
}
// Some clients do not set Content-MD5, so we would have
// created part files without 'ETag' in them.
partFile, e = os.OpenFile(partPathPrefix+fmt.Sprintf("$%d-$multiparts", part.PartNumber), os.O_RDONLY, 0600)
if e != nil {
return probe.NewError(e)
}
} }
partReaders = append(partReaders, partFile) partReaders = append(partReaders, partFile)
partClosers = append(partClosers, partFile) partClosers = append(partClosers, partFile)
@ -322,9 +330,9 @@ func (fs Filesystem) CreateObjectPart(bucket, object, uploadID, expectedMD5Sum s
return "", probe.NewError(InvalidUploadID{UploadID: uploadID}) return "", probe.NewError(InvalidUploadID{UploadID: uploadID})
} }
if strings.TrimSpace(expectedMD5Sum) != "" { if expectedMD5Sum != "" {
var expectedMD5SumBytes []byte var expectedMD5SumBytes []byte
expectedMD5SumBytes, err = base64.StdEncoding.DecodeString(strings.TrimSpace(expectedMD5Sum)) expectedMD5SumBytes, err = base64.StdEncoding.DecodeString(expectedMD5Sum)
if err != nil { if err != nil {
// Pro-actively close the connection // Pro-actively close the connection
return "", probe.NewError(InvalidDigest{MD5: expectedMD5Sum}) return "", probe.NewError(InvalidDigest{MD5: expectedMD5Sum})
@ -361,8 +369,8 @@ func (fs Filesystem) CreateObjectPart(bucket, object, uploadID, expectedMD5Sum s
md5sum := hex.EncodeToString(md5Hasher.Sum(nil)) md5sum := hex.EncodeToString(md5Hasher.Sum(nil))
// Verify if the written object is equal to what is expected, only // Verify if the written object is equal to what is expected, only
// if it is requested as such. // if it is requested as such.
if strings.TrimSpace(expectedMD5Sum) != "" { if expectedMD5Sum != "" {
if !isMD5SumEqual(strings.TrimSpace(expectedMD5Sum), md5sum) { if !isMD5SumEqual(expectedMD5Sum, md5sum) {
partFile.CloseAndPurge() partFile.CloseAndPurge()
return "", probe.NewError(BadDigest{MD5: expectedMD5Sum, Bucket: bucket, Object: object}) return "", probe.NewError(BadDigest{MD5: expectedMD5Sum, Bucket: bucket, Object: object})
} }
@ -375,7 +383,7 @@ func (fs Filesystem) CreateObjectPart(bucket, object, uploadID, expectedMD5Sum s
} }
if !ok { if !ok {
partFile.CloseAndPurge() partFile.CloseAndPurge()
return "", probe.NewError(signV4.SigDoesNotMatch{}) return "", probe.NewError(SignDoesNotMatch{})
} }
} }
partFile.Close() partFile.Close()
@ -472,7 +480,7 @@ func (fs Filesystem) CompleteMultipartUpload(bucket, object, uploadID string, da
} }
if !ok { if !ok {
file.CloseAndPurge() file.CloseAndPurge()
return ObjectMetadata{}, probe.NewError(signV4.SigDoesNotMatch{}) return ObjectMetadata{}, probe.NewError(SignDoesNotMatch{})
} }
} }
completeMultipartUpload := &CompleteMultipartUpload{} completeMultipartUpload := &CompleteMultipartUpload{}

View File

@ -178,7 +178,7 @@ func getMetadata(rootPath, bucket, object string) (ObjectMetadata, *probe.Error)
// isMD5SumEqual - returns error if md5sum mismatches, success its `nil` // isMD5SumEqual - returns error if md5sum mismatches, success its `nil`
func isMD5SumEqual(expectedMD5Sum, actualMD5Sum string) bool { func isMD5SumEqual(expectedMD5Sum, actualMD5Sum string) bool {
// Verify the md5sum. // Verify the md5sum.
if strings.TrimSpace(expectedMD5Sum) != "" && strings.TrimSpace(actualMD5Sum) != "" { if expectedMD5Sum != "" && actualMD5Sum != "" {
// Decode md5sum to bytes from their hexadecimal // Decode md5sum to bytes from their hexadecimal
// representations. // representations.
expectedMD5SumBytes, err := hex.DecodeString(expectedMD5Sum) expectedMD5SumBytes, err := hex.DecodeString(expectedMD5Sum)
@ -199,7 +199,7 @@ func isMD5SumEqual(expectedMD5Sum, actualMD5Sum string) bool {
} }
// CreateObject - create an object. // CreateObject - create an object.
func (fs Filesystem) CreateObject(bucket, object, expectedMD5Sum string, size int64, data io.Reader, signature *signV4.Signature) (ObjectMetadata, *probe.Error) { func (fs Filesystem) CreateObject(bucket, object, expectedMD5Sum string, size int64, data io.Reader, sig *signV4.Signature) (ObjectMetadata, *probe.Error) {
di, e := disk.GetInfo(fs.path) di, e := disk.GetInfo(fs.path)
if e != nil { if e != nil {
return ObjectMetadata{}, probe.NewError(e) return ObjectMetadata{}, probe.NewError(e)
@ -233,9 +233,9 @@ func (fs Filesystem) CreateObject(bucket, object, expectedMD5Sum string, size in
// Get object path. // Get object path.
objectPath := filepath.Join(bucketPath, object) objectPath := filepath.Join(bucketPath, object)
if strings.TrimSpace(expectedMD5Sum) != "" { if expectedMD5Sum != "" {
var expectedMD5SumBytes []byte var expectedMD5SumBytes []byte
expectedMD5SumBytes, e = base64.StdEncoding.DecodeString(strings.TrimSpace(expectedMD5Sum)) expectedMD5SumBytes, e = base64.StdEncoding.DecodeString(expectedMD5Sum)
if e != nil { if e != nil {
// Pro-actively close the connection. // Pro-actively close the connection.
return ObjectMetadata{}, probe.NewError(InvalidDigest{MD5: expectedMD5Sum}) return ObjectMetadata{}, probe.NewError(InvalidDigest{MD5: expectedMD5Sum})
@ -244,7 +244,7 @@ func (fs Filesystem) CreateObject(bucket, object, expectedMD5Sum string, size in
} }
// Write object. // Write object.
file, e := atomic.FileCreateWithPrefix(objectPath, "$tmpobject") file, e := atomic.FileCreateWithPrefix(objectPath, expectedMD5Sum+"$tmpobject")
if e != nil { if e != nil {
switch e := e.(type) { switch e := e.(type) {
case *os.PathError: case *os.PathError:
@ -279,22 +279,22 @@ func (fs Filesystem) CreateObject(bucket, object, expectedMD5Sum string, size in
md5Sum := hex.EncodeToString(md5Hasher.Sum(nil)) md5Sum := hex.EncodeToString(md5Hasher.Sum(nil))
// Verify if the written object is equal to what is expected, only // Verify if the written object is equal to what is expected, only
// if it is requested as such. // if it is requested as such.
if strings.TrimSpace(expectedMD5Sum) != "" { if expectedMD5Sum != "" {
if !isMD5SumEqual(strings.TrimSpace(expectedMD5Sum), md5Sum) { if !isMD5SumEqual(expectedMD5Sum, md5Sum) {
file.CloseAndPurge() file.CloseAndPurge()
return ObjectMetadata{}, probe.NewError(BadDigest{MD5: expectedMD5Sum, Bucket: bucket, Object: object}) return ObjectMetadata{}, probe.NewError(BadDigest{MD5: expectedMD5Sum, Bucket: bucket, Object: object})
} }
} }
sha256Sum := hex.EncodeToString(sha256Hasher.Sum(nil)) sha256Sum := hex.EncodeToString(sha256Hasher.Sum(nil))
if signature != nil { if sig != nil {
ok, err := signature.DoesSignatureMatch(sha256Sum) ok, err := sig.DoesSignatureMatch(sha256Sum)
if err != nil { if err != nil {
file.CloseAndPurge() file.CloseAndPurge()
return ObjectMetadata{}, err.Trace() return ObjectMetadata{}, err.Trace()
} }
if !ok { if !ok {
file.CloseAndPurge() file.CloseAndPurge()
return ObjectMetadata{}, probe.NewError(signV4.SigDoesNotMatch{}) return ObjectMetadata{}, signV4.ErrSignDoesNotMath("Signature does not match")
} }
} }
file.Close() file.Close()

View File

@ -16,33 +16,41 @@
package signature package signature
// MissingDateHeader date header missing import (
type MissingDateHeader struct{} "fmt"
func (e MissingDateHeader) Error() string { "github.com/minio/minio/pkg/probe"
return "Missing date header" )
type errFunc func(msg string, a ...string) *probe.Error
func errFactory() errFunc {
return func(msg string, a ...string) *probe.Error {
return probe.NewError(fmt.Errorf("%s, Args: %s", msg, a)).Untrace()
}
} }
// MissingExpiresQuery expires query string missing // Various errors.
type MissingExpiresQuery struct{} var (
ErrPolicyAlreadyExpired = errFactory()
func (e MissingExpiresQuery) Error() string { ErrInvalidRegion = errFactory()
return "Missing expires query string" ErrInvalidDateFormat = errFactory()
} ErrInvalidService = errFactory()
ErrInvalidRequestVersion = errFactory()
// ExpiredPresignedRequest request already expired ErrMissingFields = errFactory()
type ExpiredPresignedRequest struct{} ErrMissingCredTag = errFactory()
ErrCredMalformed = errFactory()
func (e ExpiredPresignedRequest) Error() string { ErrMissingSignTag = errFactory()
return "Presigned request already expired" ErrMissingSignHeadersTag = errFactory()
} ErrMissingDateHeader = errFactory()
ErrMalformedDate = errFactory()
// SigDoesNotMatch invalid signature ErrMalformedExpires = errFactory()
type SigDoesNotMatch struct { ErrAuthHeaderEmpty = errFactory()
SignatureSent string ErrUnsuppSignAlgo = errFactory()
SignatureCalculated string ErrMissingExpiresQuery = errFactory()
} ErrExpiredPresignRequest = errFactory()
ErrSignDoesNotMath = errFactory()
func (e SigDoesNotMatch) Error() string { ErrInvalidAccessKeyID = errFactory()
return "The request signature we calculated does not match the signature you provided" ErrInvalidSecretKey = errFactory()
} ErrRegionISEmpty = errFactory()
)

View File

@ -17,9 +17,11 @@
package signature package signature
import ( import (
"encoding/base64"
"encoding/json" "encoding/json"
"fmt" "fmt"
"reflect" "reflect"
"strings"
"time" "time"
"github.com/minio/minio/pkg/probe" "github.com/minio/minio/pkg/probe"
@ -67,8 +69,8 @@ type PostPolicyForm struct {
} }
} }
// ParsePostPolicyForm - Parse JSON policy string into typed POostPolicyForm structure. // parsePostPolicyFormV4 - Parse JSON policy string into typed POostPolicyForm structure.
func ParsePostPolicyForm(policy string) (PostPolicyForm, *probe.Error) { func parsePostPolicyFormV4(policy string) (PostPolicyForm, *probe.Error) {
// Convert po into interfaces and // Convert po into interfaces and
// perform strict type conversion using reflection. // perform strict type conversion using reflection.
var rawPolicy struct { var rawPolicy struct {
@ -155,3 +157,53 @@ func ParsePostPolicyForm(policy string) (PostPolicyForm, *probe.Error) {
} }
return parsedPolicy, nil return parsedPolicy, nil
} }
// ApplyPolicyCond - apply policy conditions and validate input values.
func ApplyPolicyCond(formValues map[string]string) *probe.Error {
if formValues["X-Amz-Algorithm"] != signV4Algorithm {
return ErrUnsuppSignAlgo("Unsupported signature algorithm in policy form data.", formValues["X-Amz-Algorithm"]).Trace(formValues["X-Amz-Algorithm"])
}
/// Decoding policy
policyBytes, e := base64.StdEncoding.DecodeString(formValues["Policy"])
if e != nil {
return probe.NewError(e)
}
postPolicyForm, err := parsePostPolicyFormV4(string(policyBytes))
if err != nil {
return err.Trace()
}
if !postPolicyForm.Expiration.After(time.Now().UTC()) {
return ErrPolicyAlreadyExpired("Policy has already expired, please generate a new one.")
}
if postPolicyForm.Conditions.Policies["$bucket"].Operator == "eq" {
if formValues["Bucket"] != postPolicyForm.Conditions.Policies["$bucket"].Value {
return ErrMissingFields("Policy bucket is missing.", formValues["Bucket"])
}
}
if postPolicyForm.Conditions.Policies["$x-amz-date"].Operator == "eq" {
if formValues["X-Amz-Date"] != postPolicyForm.Conditions.Policies["$x-amz-date"].Value {
return ErrMissingFields("Policy date is missing.", formValues["X-Amz-Date"])
}
}
if postPolicyForm.Conditions.Policies["$Content-Type"].Operator == "starts-with" {
if !strings.HasPrefix(formValues["Content-Type"], postPolicyForm.Conditions.Policies["$Content-Type"].Value) {
return ErrMissingFields("Policy content-type is missing or invalid.", formValues["Content-Type"])
}
}
if postPolicyForm.Conditions.Policies["$Content-Type"].Operator == "eq" {
if formValues["Content-Type"] != postPolicyForm.Conditions.Policies["$Content-Type"].Value {
return ErrMissingFields("Policy content-Type is missing or invalid.", formValues["Content-Type"])
}
}
if postPolicyForm.Conditions.Policies["$key"].Operator == "starts-with" {
if !strings.HasPrefix(formValues["Key"], postPolicyForm.Conditions.Policies["$key"].Value) {
return ErrMissingFields("Policy key is missing.", formValues["Key"])
}
}
if postPolicyForm.Conditions.Policies["$key"].Operator == "eq" {
if formValues["Key"] != postPolicyForm.Conditions.Policies["$key"].Value {
return ErrMissingFields("Policy key is missing.", formValues["Key"])
}
}
return nil
}

View File

@ -18,16 +18,13 @@ package signature
import ( import (
"bytes" "bytes"
"crypto/hmac"
"encoding/hex" "encoding/hex"
"net/http" "net/http"
"net/url" "net/url"
"regexp"
"sort" "sort"
"strconv" "strconv"
"strings" "strings"
"time" "time"
"unicode/utf8"
"github.com/minio/minio/pkg/crypto/sha256" "github.com/minio/minio/pkg/crypto/sha256"
"github.com/minio/minio/pkg/probe" "github.com/minio/minio/pkg/probe"
@ -35,72 +32,52 @@ import (
// Signature - local variables // Signature - local variables
type Signature struct { type Signature struct {
AccessKeyID string accessKeyID string
SecretAccessKey string secretAccessKey string
Region string region string
Presigned bool httpRequest *http.Request
PresignedPolicy string extractedSignedHeaders http.Header
SignedHeaders []string
Signature string
Request *http.Request
} }
const ( const (
authHeaderPrefix = "AWS4-HMAC-SHA256" signV4Algorithm = "AWS4-HMAC-SHA256"
iso8601Format = "20060102T150405Z" iso8601Format = "20060102T150405Z"
yyyymmdd = "20060102" yyyymmdd = "20060102"
) )
// sumHMAC calculate hmac between two input byte array // New - initialize a new authorization checkes.
func sumHMAC(key []byte, data []byte) []byte { func New(accessKeyID, secretAccessKey, region string) (*Signature, *probe.Error) {
hash := hmac.New(sha256.New, key) if !isValidAccessKey.MatchString(accessKeyID) {
hash.Write(data) return nil, ErrInvalidAccessKeyID("Invalid access key id.", accessKeyID).Trace(accessKeyID)
return hash.Sum(nil) }
if !isValidSecretKey.MatchString(secretAccessKey) {
return nil, ErrInvalidAccessKeyID("Invalid secret key.", secretAccessKey).Trace(secretAccessKey)
}
if region == "" {
return nil, ErrRegionISEmpty("Region is empty.").Trace()
}
signature := &Signature{
accessKeyID: accessKeyID,
secretAccessKey: secretAccessKey,
region: region,
}
return signature, nil
} }
// getURLEncodedName encode the strings from UTF-8 byte representations to HTML hex escape sequences // SetHTTPRequestToVerify - sets the http request which needs to be verified.
// func (s *Signature) SetHTTPRequestToVerify(r *http.Request) *Signature {
// This is necessary since regular url.Parse() and url.Encode() functions do not support UTF-8 // Do not set http request if its 'nil'.
// non english characters cannot be parsed due to the nature in which url.Encode() is written if r == nil {
// return s
// This function on the other hand is a direct replacement for url.Encode() technique to support
// pretty much every UTF-8 character.
func getURLEncodedName(name string) string {
// if object matches reserved string, no need to encode them
reservedNames := regexp.MustCompile("^[a-zA-Z0-9-_.~/]+$")
if reservedNames.MatchString(name) {
return name
} }
var encodedName string s.httpRequest = r
for _, s := range name { return s
if 'A' <= s && s <= 'Z' || 'a' <= s && s <= 'z' || '0' <= s && s <= '9' { // §2.3 Unreserved characters (mark)
encodedName = encodedName + string(s)
continue
}
switch s {
case '-', '_', '.', '~', '/': // §2.3 Unreserved characters (mark)
encodedName = encodedName + string(s)
continue
default:
len := utf8.RuneLen(s)
if len < 0 {
return name
}
u := make([]byte, len)
utf8.EncodeRune(u, s)
for _, r := range u {
hex := hex.EncodeToString([]byte{r})
encodedName = encodedName + "%" + strings.ToUpper(hex)
}
}
}
return encodedName
} }
// getCanonicalHeaders generate a list of request headers with their values // getCanonicalHeaders generate a list of request headers with their values
func (r Signature) getCanonicalHeaders(signedHeaders map[string][]string) string { func (s Signature) getCanonicalHeaders(signedHeaders http.Header) string {
var headers []string var headers []string
vals := make(map[string][]string) vals := make(http.Header)
for k, vv := range signedHeaders { for k, vv := range signedHeaders {
headers = append(headers, strings.ToLower(k)) headers = append(headers, strings.ToLower(k))
vals[strings.ToLower(k)] = vv vals[strings.ToLower(k)] = vv
@ -114,7 +91,7 @@ func (r Signature) getCanonicalHeaders(signedHeaders map[string][]string) string
buf.WriteByte(':') buf.WriteByte(':')
switch { switch {
case k == "host": case k == "host":
buf.WriteString(r.Request.Host) buf.WriteString(s.httpRequest.Host)
fallthrough fallthrough
default: default:
for idx, v := range vals[k] { for idx, v := range vals[k] {
@ -130,7 +107,7 @@ func (r Signature) getCanonicalHeaders(signedHeaders map[string][]string) string
} }
// getSignedHeaders generate a string i.e alphabetically sorted, semicolon-separated list of lowercase request header names // getSignedHeaders generate a string i.e alphabetically sorted, semicolon-separated list of lowercase request header names
func (r Signature) getSignedHeaders(signedHeaders map[string][]string) string { func (s Signature) getSignedHeaders(signedHeaders http.Header) string {
var headers []string var headers []string
for k := range signedHeaders { for k := range signedHeaders {
headers = append(headers, strings.ToLower(k)) headers = append(headers, strings.ToLower(k))
@ -140,41 +117,6 @@ func (r Signature) getSignedHeaders(signedHeaders map[string][]string) string {
return strings.Join(headers, ";") return strings.Join(headers, ";")
} }
// extractSignedHeaders extract signed headers from Authorization header
func (r Signature) extractSignedHeaders() map[string][]string {
extractedSignedHeadersMap := make(map[string][]string)
for _, header := range r.SignedHeaders {
val, ok := r.Request.Header[http.CanonicalHeaderKey(header)]
if !ok {
// Golang http server strips off 'Expect' header, if the
// client sent this as part of signed headers we need to
// handle otherwise we would see a signature mismatch.
// `aws-cli` sets this as part of signed headers which is
// a bad idea since servers trying to implement AWS
// Signature version '4' will all encounter this issue.
//
// According to
// http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.20
// Expect header is always of form:
//
// Expect = "Expect" ":" 1#expectation
// expectation = "100-continue" | expectation-extension
//
// So it safe to assume that '100-continue' is what would
// be sent, for the time being keep this work around.
// Adding a *TODO* to remove this later when Golang server
// doesn't filter out the 'Expect' header.
if header == "expect" {
extractedSignedHeadersMap[header] = []string{"100-continue"}
}
// if not found continue, we will fail later
continue
}
extractedSignedHeadersMap[header] = val
}
return extractedSignedHeadersMap
}
// getCanonicalRequest generate a canonical request of style // getCanonicalRequest generate a canonical request of style
// //
// canonicalRequest = // canonicalRequest =
@ -185,18 +127,18 @@ func (r Signature) extractSignedHeaders() map[string][]string {
// <SignedHeaders>\n // <SignedHeaders>\n
// <HashedPayload> // <HashedPayload>
// //
func (r *Signature) getCanonicalRequest() string { func (s *Signature) getCanonicalRequest() string {
payload := r.Request.Header.Get(http.CanonicalHeaderKey("x-amz-content-sha256")) payload := s.httpRequest.Header.Get(http.CanonicalHeaderKey("x-amz-content-sha256"))
r.Request.URL.RawQuery = strings.Replace(r.Request.URL.Query().Encode(), "+", "%20", -1) s.httpRequest.URL.RawQuery = strings.Replace(s.httpRequest.URL.Query().Encode(), "+", "%20", -1)
encodedPath := getURLEncodedName(r.Request.URL.Path) encodedPath := getURLEncodedName(s.httpRequest.URL.Path)
// convert any space strings back to "+" // Convert any space strings back to "+".
encodedPath = strings.Replace(encodedPath, "+", "%20", -1) encodedPath = strings.Replace(encodedPath, "+", "%20", -1)
canonicalRequest := strings.Join([]string{ canonicalRequest := strings.Join([]string{
r.Request.Method, s.httpRequest.Method,
encodedPath, encodedPath,
r.Request.URL.RawQuery, s.httpRequest.URL.RawQuery,
r.getCanonicalHeaders(r.extractSignedHeaders()), s.getCanonicalHeaders(s.extractedSignedHeaders),
r.getSignedHeaders(r.extractSignedHeaders()), s.getSignedHeaders(s.extractedSignedHeaders),
payload, payload,
}, "\n") }, "\n")
return canonicalRequest return canonicalRequest
@ -212,69 +154,89 @@ func (r *Signature) getCanonicalRequest() string {
// <SignedHeaders>\n // <SignedHeaders>\n
// <HashedPayload> // <HashedPayload>
// //
func (r Signature) getPresignedCanonicalRequest(presignedQuery string) string { func (s Signature) getPresignedCanonicalRequest(presignedQuery string) string {
rawQuery := strings.Replace(presignedQuery, "+", "%20", -1) rawQuery := strings.Replace(presignedQuery, "+", "%20", -1)
encodedPath := getURLEncodedName(r.Request.URL.Path) encodedPath := getURLEncodedName(s.httpRequest.URL.Path)
// convert any space strings back to "+" // Convert any space strings back to "+".
encodedPath = strings.Replace(encodedPath, "+", "%20", -1) encodedPath = strings.Replace(encodedPath, "+", "%20", -1)
canonicalRequest := strings.Join([]string{ canonicalRequest := strings.Join([]string{
r.Request.Method, s.httpRequest.Method,
encodedPath, encodedPath,
rawQuery, rawQuery,
r.getCanonicalHeaders(r.extractSignedHeaders()), s.getCanonicalHeaders(s.extractedSignedHeaders),
r.getSignedHeaders(r.extractSignedHeaders()), s.getSignedHeaders(s.extractedSignedHeaders),
"UNSIGNED-PAYLOAD", "UNSIGNED-PAYLOAD",
}, "\n") }, "\n")
return canonicalRequest return canonicalRequest
} }
// getScope generate a string of a specific date, an AWS region, and a service // getScope generate a string of a specific date, an AWS region, and a service.
func (r Signature) getScope(t time.Time) string { func (s Signature) getScope(t time.Time) string {
scope := strings.Join([]string{ scope := strings.Join([]string{
t.Format(yyyymmdd), t.Format(yyyymmdd),
r.Region, s.region,
"s3", "s3",
"aws4_request", "aws4_request",
}, "/") }, "/")
return scope return scope
} }
// getStringToSign a string based on selected query values // getStringToSign a string based on selected query values.
func (r Signature) getStringToSign(canonicalRequest string, t time.Time) string { func (s Signature) getStringToSign(canonicalRequest string, t time.Time) string {
stringToSign := authHeaderPrefix + "\n" + t.Format(iso8601Format) + "\n" stringToSign := signV4Algorithm + "\n" + t.Format(iso8601Format) + "\n"
stringToSign = stringToSign + r.getScope(t) + "\n" stringToSign = stringToSign + s.getScope(t) + "\n"
canonicalRequestBytes := sha256.Sum256([]byte(canonicalRequest)) canonicalRequestBytes := sha256.Sum256([]byte(canonicalRequest))
stringToSign = stringToSign + hex.EncodeToString(canonicalRequestBytes[:]) stringToSign = stringToSign + hex.EncodeToString(canonicalRequestBytes[:])
return stringToSign return stringToSign
} }
// getSigningKey hmac seed to calculate final signature // getSigningKey hmac seed to calculate final signature.
func (r Signature) getSigningKey(t time.Time) []byte { func (s Signature) getSigningKey(t time.Time) []byte {
secret := r.SecretAccessKey secret := s.secretAccessKey
date := sumHMAC([]byte("AWS4"+secret), []byte(t.Format(yyyymmdd))) date := sumHMAC([]byte("AWS4"+secret), []byte(t.Format(yyyymmdd)))
region := sumHMAC(date, []byte(r.Region)) region := sumHMAC(date, []byte(s.region))
service := sumHMAC(region, []byte("s3")) service := sumHMAC(region, []byte("s3"))
signingKey := sumHMAC(service, []byte("aws4_request")) signingKey := sumHMAC(service, []byte("aws4_request"))
return signingKey return signingKey
} }
// getSignature final signature in hexadecimal form // getSignature final signature in hexadecimal form.
func (r Signature) getSignature(signingKey []byte, stringToSign string) string { func (s Signature) getSignature(signingKey []byte, stringToSign string) string {
return hex.EncodeToString(sumHMAC(signingKey, []byte(stringToSign))) return hex.EncodeToString(sumHMAC(signingKey, []byte(stringToSign)))
} }
// DoesPolicySignatureMatch - Verify query headers with post policy // DoesPolicySignatureMatch - Verify query headers with post policy
// - http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-HTTPPOSTConstructPolicy.html // - http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-HTTPPOSTConstructPolicy.html
// returns true if matches, false otherwise. if error is not nil then it is always false // returns true if matches, false otherwise. if error is not nil then it is always false
func (r *Signature) DoesPolicySignatureMatch(date string) (bool, *probe.Error) { func (s *Signature) DoesPolicySignatureMatch(formValues map[string]string) (bool, *probe.Error) {
t, err := time.Parse(iso8601Format, date) // Parse credential tag.
creds, err := parseCredential("Credential=" + formValues["X-Amz-Credential"])
if err != nil { if err != nil {
return false, probe.NewError(err) return false, err.Trace(formValues["X-Amz-Credential"])
} }
signingKey := r.getSigningKey(t)
stringToSign := string(r.PresignedPolicy) // Verify if the access key id matches.
newSignature := r.getSignature(signingKey, stringToSign) if creds.accessKeyID != s.accessKeyID {
if newSignature != r.Signature { return false, ErrInvalidAccessKeyID("Access key id does not match with our records.", creds.accessKeyID).Trace(creds.accessKeyID)
}
// Verify if the region is valid.
reqRegion := creds.scope.region
if !isValidRegion(reqRegion, s.region) {
return false, ErrInvalidRegion("Requested region is not recognized.", reqRegion).Trace(reqRegion)
}
// Save region.
s.region = reqRegion
// Parse date string.
t, e := time.Parse(iso8601Format, formValues["X-Amz-Date"])
if e != nil {
return false, probe.NewError(e)
}
signingKey := s.getSigningKey(t)
newSignature := s.getSignature(signingKey, formValues["Policy"])
if newSignature != formValues["X-Amz-Signature"] {
return false, nil return false, nil
} }
return true, nil return true, nil
@ -283,35 +245,49 @@ func (r *Signature) DoesPolicySignatureMatch(date string) (bool, *probe.Error) {
// DoesPresignedSignatureMatch - Verify query headers with presigned signature // DoesPresignedSignatureMatch - Verify query headers with presigned signature
// - http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html // - http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html
// returns true if matches, false otherwise. if error is not nil then it is always false // returns true if matches, false otherwise. if error is not nil then it is always false
func (r *Signature) DoesPresignedSignatureMatch() (bool, *probe.Error) { func (s *Signature) DoesPresignedSignatureMatch() (bool, *probe.Error) {
query := make(url.Values) // Parse request query string.
query.Set("X-Amz-Algorithm", authHeaderPrefix) preSignV4Values, err := parsePreSignV4(s.httpRequest.URL.Query())
if err != nil {
return false, err.Trace(s.httpRequest.URL.String())
}
var date string // Verify if the access key id matches.
if date = r.Request.URL.Query().Get("X-Amz-Date"); date == "" { if preSignV4Values.Creds.accessKeyID != s.accessKeyID {
return false, probe.NewError(MissingDateHeader{}) return false, ErrInvalidAccessKeyID("Access key id does not match with our records.", preSignV4Values.Creds.accessKeyID).Trace(preSignV4Values.Creds.accessKeyID)
} }
t, err := time.Parse(iso8601Format, date)
if err != nil { // Verify if region is valid.
return false, probe.NewError(err) reqRegion := preSignV4Values.Creds.scope.region
if !isValidRegion(reqRegion, s.region) {
return false, ErrInvalidRegion("Requested region is not recognized.", reqRegion).Trace(reqRegion)
} }
if _, ok := r.Request.URL.Query()["X-Amz-Expires"]; !ok {
return false, probe.NewError(MissingExpiresQuery{}) // Save region.
} s.region = reqRegion
expireSeconds, err := strconv.Atoi(r.Request.URL.Query().Get("X-Amz-Expires"))
if err != nil { // Extract all the signed headers along with its values.
return false, probe.NewError(err) s.extractedSignedHeaders = extractSignedHeaders(preSignV4Values.SignedHeaders, s.httpRequest.Header)
}
if time.Now().UTC().Sub(t) > time.Duration(expireSeconds)*time.Second { // Construct new query.
return false, probe.NewError(ExpiredPresignedRequest{}) query := make(url.Values)
query.Set("X-Amz-Algorithm", signV4Algorithm)
if time.Now().UTC().Sub(preSignV4Values.Date) > time.Duration(preSignV4Values.Expires)/time.Second {
return false, ErrExpiredPresignRequest("Presigned request already expired, please initiate a new request.")
} }
// Save the date and expires.
t := preSignV4Values.Date
expireSeconds := int(time.Duration(preSignV4Values.Expires) / time.Second)
query.Set("X-Amz-Date", t.Format(iso8601Format)) query.Set("X-Amz-Date", t.Format(iso8601Format))
query.Set("X-Amz-Expires", strconv.Itoa(expireSeconds)) query.Set("X-Amz-Expires", strconv.Itoa(expireSeconds))
query.Set("X-Amz-SignedHeaders", r.getSignedHeaders(r.extractSignedHeaders())) query.Set("X-Amz-SignedHeaders", s.getSignedHeaders(s.extractedSignedHeaders))
query.Set("X-Amz-Credential", r.AccessKeyID+"/"+r.getScope(t)) query.Set("X-Amz-Credential", s.accessKeyID+"/"+s.getScope(t))
// Save other headers available in the request parameters. // Save other headers available in the request parameters.
for k, v := range r.Request.URL.Query() { for k, v := range s.httpRequest.URL.Query() {
if strings.HasPrefix(strings.ToLower(k), "x-amz") { if strings.HasPrefix(strings.ToLower(k), "x-amz") {
continue continue
} }
@ -320,24 +296,24 @@ func (r *Signature) DoesPresignedSignatureMatch() (bool, *probe.Error) {
encodedQuery := query.Encode() encodedQuery := query.Encode()
// Verify if date query is same. // Verify if date query is same.
if r.Request.URL.Query().Get("X-Amz-Date") != query.Get("X-Amz-Date") { if s.httpRequest.URL.Query().Get("X-Amz-Date") != query.Get("X-Amz-Date") {
return false, nil return false, nil
} }
// Verify if expires query is same. // Verify if expires query is same.
if r.Request.URL.Query().Get("X-Amz-Expires") != query.Get("X-Amz-Expires") { if s.httpRequest.URL.Query().Get("X-Amz-Expires") != query.Get("X-Amz-Expires") {
return false, nil return false, nil
} }
// Verify if signed headers query is same. // Verify if signed headers query is same.
if r.Request.URL.Query().Get("X-Amz-SignedHeaders") != query.Get("X-Amz-SignedHeaders") { if s.httpRequest.URL.Query().Get("X-Amz-SignedHeaders") != query.Get("X-Amz-SignedHeaders") {
return false, nil return false, nil
} }
// Verify if credential query is same. // Verify if credential query is same.
if r.Request.URL.Query().Get("X-Amz-Credential") != query.Get("X-Amz-Credential") { if s.httpRequest.URL.Query().Get("X-Amz-Credential") != query.Get("X-Amz-Credential") {
return false, nil return false, nil
} }
// Verify finally if signature is same. // Verify finally if signature is same.
newSignature := r.getSignature(r.getSigningKey(t), r.getStringToSign(r.getPresignedCanonicalRequest(encodedQuery), t)) newSignature := s.getSignature(s.getSigningKey(t), s.getStringToSign(s.getPresignedCanonicalRequest(encodedQuery), t))
if r.Request.URL.Query().Get("X-Amz-Signature") != newSignature { if s.httpRequest.URL.Query().Get("X-Amz-Signature") != newSignature {
return false, nil return false, nil
} }
return true, nil return true, nil
@ -346,27 +322,57 @@ func (r *Signature) DoesPresignedSignatureMatch() (bool, *probe.Error) {
// DoesSignatureMatch - Verify authorization header with calculated header in accordance with // DoesSignatureMatch - Verify authorization header with calculated header in accordance with
// - http://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html // - http://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html
// returns true if matches, false otherwise. if error is not nil then it is always false // returns true if matches, false otherwise. if error is not nil then it is always false
func (r *Signature) DoesSignatureMatch(hashedPayload string) (bool, *probe.Error) { func (s *Signature) DoesSignatureMatch(hashedPayload string) (bool, *probe.Error) {
// set new calculated payload // Save authorization header.
r.Request.Header.Set("X-Amz-Content-Sha256", hashedPayload) v4Auth := s.httpRequest.Header.Get("Authorization")
// Add date if not present throw error // Parse signature version '4' header.
signV4Values, err := parseSignV4(v4Auth)
if err != nil {
return false, err.Trace(v4Auth)
}
// Extract all the signed headers along with its values.
s.extractedSignedHeaders = extractSignedHeaders(signV4Values.SignedHeaders, s.httpRequest.Header)
// Verify if the access key id matches.
if signV4Values.Creds.accessKeyID != s.accessKeyID {
return false, ErrInvalidAccessKeyID("Access key id does not match with our records.", signV4Values.Creds.accessKeyID).Trace(signV4Values.Creds.accessKeyID)
}
// Verify if region is valid.
reqRegion := signV4Values.Creds.scope.region
if !isValidRegion(reqRegion, s.region) {
return false, ErrInvalidRegion("Requested region is not recognized.", reqRegion).Trace(reqRegion)
}
// Save region.
s.region = reqRegion
// Set input payload.
s.httpRequest.Header.Set("X-Amz-Content-Sha256", hashedPayload)
// Extract date, if not present throw error.
var date string var date string
if date = r.Request.Header.Get(http.CanonicalHeaderKey("x-amz-date")); date == "" { if date = s.httpRequest.Header.Get(http.CanonicalHeaderKey("x-amz-date")); date == "" {
if date = r.Request.Header.Get("Date"); date == "" { if date = s.httpRequest.Header.Get("Date"); date == "" {
return false, probe.NewError(MissingDateHeader{}) return false, ErrMissingDateHeader("Date header is missing from the request.").Trace()
} }
} }
t, err := time.Parse(iso8601Format, date) // Parse date header.
if err != nil { t, e := time.Parse(iso8601Format, date)
return false, probe.NewError(err) if e != nil {
return false, probe.NewError(e)
} }
canonicalRequest := r.getCanonicalRequest()
stringToSign := r.getStringToSign(canonicalRequest, t)
signingKey := r.getSigningKey(t)
newSignature := r.getSignature(signingKey, stringToSign)
if newSignature != r.Signature { // Signature version '4'.
canonicalRequest := s.getCanonicalRequest()
stringToSign := s.getStringToSign(canonicalRequest, t)
signingKey := s.getSigningKey(t)
newSignature := s.getSignature(signingKey, stringToSign)
// Verify if signature match.
if newSignature != signV4Values.Signature {
return false, nil return false, nil
} }
return true, nil return true, nil

118
pkg/signature/utils.go Normal file
View File

@ -0,0 +1,118 @@
package signature
import (
"crypto/hmac"
"encoding/hex"
"net/http"
"regexp"
"strings"
"unicode/utf8"
"github.com/minio/minio/pkg/crypto/sha256"
)
// AccessID and SecretID length in bytes
const (
MinioAccessID = 20
MinioSecretID = 40
)
/// helpers
// isValidSecretKey - validate secret key.
var isValidSecretKey = regexp.MustCompile("^.{40}$")
// isValidAccessKey - validate access key.
var isValidAccessKey = regexp.MustCompile("^[A-Z0-9\\-\\.\\_\\~]{20}$")
// isValidRegion - verify if incoming region value is valid with configured Region.
func isValidRegion(reqRegion string, confRegion string) bool {
if confRegion == "" || confRegion == "US" {
confRegion = "us-east-1"
}
// Some older s3 clients set region as "US" instead of
// "us-east-1", handle it.
if reqRegion == "US" {
reqRegion = "us-east-1"
}
return reqRegion == confRegion
}
// sumHMAC calculate hmac between two input byte array.
func sumHMAC(key []byte, data []byte) []byte {
hash := hmac.New(sha256.New, key)
hash.Write(data)
return hash.Sum(nil)
}
// getURLEncodedName encode the strings from UTF-8 byte representations to HTML hex escape sequences
//
// This is necessary since regular url.Parse() and url.Encode() functions do not support UTF-8
// non english characters cannot be parsed due to the nature in which url.Encode() is written
//
// This function on the other hand is a direct replacement for url.Encode() technique to support
// pretty much every UTF-8 character.
func getURLEncodedName(name string) string {
// if object matches reserved string, no need to encode them
reservedNames := regexp.MustCompile("^[a-zA-Z0-9-_.~/]+$")
if reservedNames.MatchString(name) {
return name
}
var encodedName string
for _, s := range name {
if 'A' <= s && s <= 'Z' || 'a' <= s && s <= 'z' || '0' <= s && s <= '9' { // §2.3 Unreserved characters (mark)
encodedName = encodedName + string(s)
continue
}
switch s {
case '-', '_', '.', '~', '/': // §2.3 Unreserved characters (mark)
encodedName = encodedName + string(s)
continue
default:
len := utf8.RuneLen(s)
if len < 0 {
return name
}
u := make([]byte, len)
utf8.EncodeRune(u, s)
for _, r := range u {
hex := hex.EncodeToString([]byte{r})
encodedName = encodedName + "%" + strings.ToUpper(hex)
}
}
}
return encodedName
}
// extractSignedHeaders extract signed headers from Authorization header
func extractSignedHeaders(signedHeaders []string, reqHeaders http.Header) http.Header {
extractedSignedHeaders := make(http.Header)
for _, header := range signedHeaders {
val, ok := reqHeaders[http.CanonicalHeaderKey(header)]
if !ok {
// Golang http server strips off 'Expect' header, if the
// client sent this as part of signed headers we need to
// handle otherwise we would see a signature mismatch.
// `aws-cli` sets this as part of signed headers.
//
// According to
// http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.20
// Expect header is always of form:
//
// Expect = "Expect" ":" 1#expectation
// expectation = "100-continue" | expectation-extension
//
// So it safe to assume that '100-continue' is what would
// be sent, for the time being keep this work around.
// Adding a *TODO* to remove this later when Golang server
// doesn't filter out the 'Expect' header.
if header == "expect" {
extractedSignedHeaders[header] = []string{"100-continue"}
}
// If not found continue, we will fail later.
continue
}
extractedSignedHeaders[header] = val
}
return extractedSignedHeaders
}

203
pkg/signature/v4-parser.go Normal file
View File

@ -0,0 +1,203 @@
package signature
import (
"net/url"
"strings"
"time"
"github.com/minio/minio/pkg/probe"
)
type credScope struct {
accessKeyID string
scope struct {
date time.Time
region string
service string
request string
}
}
func parseCredential(credElement string) (credScope, *probe.Error) {
creds := strings.Split(strings.TrimSpace(credElement), "=")
if len(creds) != 2 {
return credScope{}, ErrMissingFields("Credential tag has missing fields.", credElement).Trace(credElement)
}
if creds[0] != "Credential" {
return credScope{}, ErrMissingCredTag("Missing credentials tag.", credElement).Trace(credElement)
}
credElements := strings.Split(strings.TrimSpace(creds[1]), "/")
if len(credElements) != 5 {
return credScope{}, ErrCredMalformed("Credential values malformed.", credElement).Trace(credElement)
}
if !isValidAccessKey.MatchString(credElements[0]) {
return credScope{}, ErrInvalidAccessKeyID("Invalid access key id.", credElement).Trace(credElement)
}
cred := credScope{
accessKeyID: credElements[0],
}
var e error
cred.scope.date, e = time.Parse(yyyymmdd, credElements[1])
if e != nil {
return credScope{}, ErrInvalidDateFormat("Invalid date format.", credElement).Trace(credElement)
}
if credElements[2] == "" {
return credScope{}, ErrRegionISEmpty("Region is empty.", credElement).Trace(credElement)
}
cred.scope.region = credElements[2]
if credElements[3] != "s3" {
return credScope{}, ErrInvalidService("Invalid service detected.", credElement).Trace(credElement)
}
cred.scope.service = credElements[3]
if credElements[4] != "aws4_request" {
return credScope{}, ErrInvalidRequestVersion("Invalid request version detected.", credElement).Trace(credElement)
}
cred.scope.request = credElements[4]
return cred, nil
}
// parse signature.
func parseSignature(signElement string) (string, *probe.Error) {
signFields := strings.Split(strings.TrimSpace(signElement), "=")
if len(signFields) != 2 {
return "", ErrMissingFields("Signature tag has missing fields.", signElement).Trace(signElement)
}
if signFields[0] != "Signature" {
return "", ErrMissingSignTag("Signature tag is missing", signElement).Trace(signElement)
}
signature := signFields[1]
return signature, nil
}
// parse signed headers.
func parseSignedHeaders(signedHdrElement string) ([]string, *probe.Error) {
signedHdrFields := strings.Split(strings.TrimSpace(signedHdrElement), "=")
if len(signedHdrFields) != 2 {
return nil, ErrMissingFields("Signed headers tag has missing fields.", signedHdrElement).Trace(signedHdrElement)
}
if signedHdrFields[0] != "SignedHeaders" {
return nil, ErrMissingSignHeadersTag("Signed headers tag is missing.", signedHdrElement).Trace(signedHdrElement)
}
signedHeaders := strings.Split(signedHdrFields[1], ";")
return signedHeaders, nil
}
// structured version of AWS Signature V4 header.
type signValues struct {
Creds credScope
SignedHeaders []string
Signature string
}
// structued version of AWS Signature V4 query string.
type preSignValues struct {
signValues
Date time.Time
Expires time.Duration
}
// Parses signature version '4' query string of the following form.
//
// querystring = X-Amz-Algorithm=algorithm
// querystring += &X-Amz-Credential= urlencode(access_key_ID + '/' + credential_scope)
// querystring += &X-Amz-Date=date
// querystring += &X-Amz-Expires=timeout interval
// querystring += &X-Amz-SignedHeaders=signed_headers
// querystring += &X-Amz-Signature=signature
//
func parsePreSignV4(query url.Values) (preSignValues, *probe.Error) {
// Verify if the query algorithm is supported or not.
if query.Get("X-Amz-Algorithm") != signV4Algorithm {
return preSignValues{}, ErrUnsuppSignAlgo("Unsupported algorithm in query string.", query.Get("X-Amz-Algorithm"))
}
// Initialize signature version '4' structured header.
preSignV4Values := preSignValues{}
var err *probe.Error
// Save credentail values.
preSignV4Values.Creds, err = parseCredential("Credential=" + query.Get("X-Amz-Credential"))
if err != nil {
return preSignValues{}, err.Trace(query.Get("X-Amz-Credential"))
}
var e error
// Save date in native time.Time.
preSignV4Values.Date, e = time.Parse(iso8601Format, query.Get("X-Amz-Date"))
if e != nil {
return preSignValues{}, ErrMalformedDate("Malformed date string.", query.Get("X-Amz-Date")).Trace(query.Get("X-Amz-Date"))
}
// Save expires in native time.Duration.
preSignV4Values.Expires, e = time.ParseDuration(query.Get("X-Amz-Expires") + "s")
if e != nil {
return preSignValues{}, ErrMalformedExpires("Malformed expires string.", query.Get("X-Amz-Expires")).Trace(query.Get("X-Amz-Expires"))
}
// Save signed headers.
preSignV4Values.SignedHeaders, err = parseSignedHeaders("SignedHeaders=" + query.Get("X-Amz-SignedHeaders"))
if err != nil {
return preSignValues{}, err.Trace(query.Get("X-Amz-SignedHeaders"))
}
// Save signature.
preSignV4Values.Signature, err = parseSignature("Signature=" + query.Get("X-Amz-Signature"))
if err != nil {
return preSignValues{}, err.Trace(query.Get("X-Amz-Signature"))
}
// Return structed form of signature query string.
return preSignV4Values, nil
}
// Parses signature version '4' header of the following form.
//
// Authorization: algorithm Credential=access key ID/credential scope, \
// SignedHeaders=SignedHeaders, Signature=signature
//
func parseSignV4(v4Auth string) (signValues, *probe.Error) {
// Replace all spaced strings, some clients can send spaced
// parameters and some won't. So we pro-actively remove any spaces
// to make parsing easier.
v4Auth = strings.Replace(v4Auth, " ", "", -1)
if v4Auth == "" {
return signValues{}, ErrAuthHeaderEmpty("Auth header empty.").Trace(v4Auth)
}
// Verify if the header algorithm is supported or not.
if !strings.HasPrefix(v4Auth, signV4Algorithm) {
return signValues{}, ErrUnsuppSignAlgo("Unsupported algorithm in authorization header.", v4Auth).Trace(v4Auth)
}
// Strip off the Algorithm prefix.
v4Auth = strings.TrimPrefix(v4Auth, signV4Algorithm)
authFields := strings.Split(strings.TrimSpace(v4Auth), ",")
if len(authFields) != 3 {
return signValues{}, ErrMissingFields("Missing fields in authorization header.", v4Auth).Trace(v4Auth)
}
// Initialize signature version '4' structured header.
signV4Values := signValues{}
var err *probe.Error
// Save credentail values.
signV4Values.Creds, err = parseCredential(authFields[0])
if err != nil {
return signValues{}, err.Trace(v4Auth)
}
// Save signed headers.
signV4Values.SignedHeaders, err = parseSignedHeaders(authFields[1])
if err != nil {
return signValues{}, err.Trace(v4Auth)
}
// Save signature.
signV4Values.Signature, err = parseSignature(authFields[2])
if err != nil {
return signValues{}, err.Trace(v4Auth)
}
// Return the structure here.
return signV4Values, nil
}

View File

@ -306,7 +306,7 @@ func (b bucket) WriteObject(objectName string, objectData io.Reader, size int64,
// //
// Signature mismatch occurred all temp files to be removed and all data purged. // Signature mismatch occurred all temp files to be removed and all data purged.
CleanupWritersOnError(writers) CleanupWritersOnError(writers)
return ObjectMetadata{}, probe.NewError(signV4.SigDoesNotMatch{}) return ObjectMetadata{}, probe.NewError(SignDoesNotMatch{})
} }
} }
objMetadata.MD5Sum = hex.EncodeToString(dataMD5sum) objMetadata.MD5Sum = hex.EncodeToString(dataMD5sum)

View File

@ -18,6 +18,13 @@ package xl
import "fmt" import "fmt"
// SignDoesNotMatch - signature does not match.
type SignDoesNotMatch struct{}
func (e SignDoesNotMatch) Error() string {
return "Signature does not match."
}
// InvalidArgument invalid argument // InvalidArgument invalid argument
type InvalidArgument struct{} type InvalidArgument struct{}

View File

@ -226,7 +226,7 @@ func (xl API) createObjectPart(bucket, key, uploadID string, partID int, content
return "", err.Trace() return "", err.Trace()
} }
if !ok { if !ok {
return "", probe.NewError(signV4.SigDoesNotMatch{}) return "", probe.NewError(SignDoesNotMatch{})
} }
} }
} }
@ -342,7 +342,7 @@ func (xl API) completeMultipartUploadV2(bucket, key, uploadID string, data io.Re
return nil, err.Trace() return nil, err.Trace()
} }
if !ok { if !ok {
return nil, probe.NewError(signV4.SigDoesNotMatch{}) return nil, probe.NewError(SignDoesNotMatch{})
} }
} }
parts := &CompleteMultipartUpload{} parts := &CompleteMultipartUpload{}

View File

@ -376,7 +376,7 @@ func (xl API) completeMultipartUpload(bucket, object, uploadID string, data io.R
return ObjectMetadata{}, err.Trace() return ObjectMetadata{}, err.Trace()
} }
if !ok { if !ok {
return ObjectMetadata{}, probe.NewError(signV4.SigDoesNotMatch{}) return ObjectMetadata{}, probe.NewError(SignDoesNotMatch{})
} }
} }
parts := &CompleteMultipartUpload{} parts := &CompleteMultipartUpload{}

View File

@ -392,7 +392,7 @@ func (xl API) createObject(bucket, key, contentType, expectedMD5Sum string, size
if !ok { if !ok {
// Delete perhaps the object is already saved, due to the nature of append() // Delete perhaps the object is already saved, due to the nature of append()
xl.objects.Delete(objectKey) xl.objects.Delete(objectKey)
return ObjectMetadata{}, probe.NewError(signV4.SigDoesNotMatch{}) return ObjectMetadata{}, probe.NewError(SignDoesNotMatch{})
} }
} }
@ -435,7 +435,7 @@ func (xl API) MakeBucket(bucketName, acl string, location io.Reader, signature *
return err.Trace() return err.Trace()
} }
if !ok { if !ok {
return probe.NewError(signV4.SigDoesNotMatch{}) return probe.NewError(SignDoesNotMatch{})
} }
} }

View File

@ -19,6 +19,7 @@ package main
import ( import (
"net" "net"
"net/http" "net/http"
"path/filepath"
router "github.com/gorilla/mux" router "github.com/gorilla/mux"
jsonrpc "github.com/gorilla/rpc/v2" jsonrpc "github.com/gorilla/rpc/v2"
@ -26,6 +27,7 @@ import (
"github.com/minio/minio-go" "github.com/minio/minio-go"
"github.com/minio/minio/pkg/fs" "github.com/minio/minio/pkg/fs"
"github.com/minio/minio/pkg/probe" "github.com/minio/minio/pkg/probe"
signV4 "github.com/minio/minio/pkg/signature"
) )
// CloudStorageAPI container for S3 compatible API. // CloudStorageAPI container for S3 compatible API.
@ -34,6 +36,10 @@ type CloudStorageAPI struct {
AccessLog bool AccessLog bool
// Filesystem instance. // Filesystem instance.
Filesystem fs.Filesystem Filesystem fs.Filesystem
// Signature instance.
Signature *signV4.Signature
// Region instance.
Region string
} }
// WebAPI container for Web API. // WebAPI container for Web API.
@ -53,39 +59,30 @@ type WebAPI struct {
secretAccessKey string secretAccessKey string
} }
func getWebAPIHandler(web *WebAPI) http.Handler {
var handlerFns = []HandlerFunc{
setCacheControlHandler, // Adds Cache-Control header
setTimeValidityHandler, // Validate time.
setJWTAuthHandler, // Authentication handler for verifying JWT's.
setCorsHandler, // CORS added only for testing purposes.
}
if web.AccessLog {
handlerFns = append(handlerFns, setAccessLogHandler)
}
s := jsonrpc.NewServer()
codec := json2.NewCodec()
s.RegisterCodec(codec, "application/json")
s.RegisterCodec(codec, "application/json; charset=UTF-8")
s.RegisterService(web, "Web")
mux := router.NewRouter()
// Root router.
root := mux.NewRoute().PathPrefix("/").Subrouter()
root.Handle("/rpc", s)
// Enable this when we add assets.
root.PathPrefix("/login").Handler(http.StripPrefix("/login", http.FileServer(assetFS())))
root.Handle("/{file:.*}", http.FileServer(assetFS()))
return registerHandlers(mux, handlerFns...)
}
// registerCloudStorageAPI - register all the handlers to their respective paths // registerCloudStorageAPI - register all the handlers to their respective paths
func registerCloudStorageAPI(mux *router.Router, a CloudStorageAPI) { func registerCloudStorageAPI(mux *router.Router, a CloudStorageAPI, w *WebAPI) {
// root Router // Minio rpc router
root := mux.NewRoute().PathPrefix("/").Subrouter() minio := mux.NewRoute().PathPrefix(privateBucket).Subrouter()
// Initialize json rpc handlers.
rpc := jsonrpc.NewServer()
codec := json2.NewCodec()
rpc.RegisterCodec(codec, "application/json")
rpc.RegisterCodec(codec, "application/json; charset=UTF-8")
rpc.RegisterService(w, "Web")
// RPC handler at URI - /minio/rpc
minio.Path("/rpc").Handler(rpc)
// Web handler assets at URI - /minio/login
minio.Path("/login").Handler(http.StripPrefix(filepath.Join(privateBucket, "login"), http.FileServer(assetFS())))
minio.Path("/{file:.*}").Handler(http.StripPrefix(privateBucket, http.FileServer(assetFS())))
// API Router
api := mux.NewRoute().PathPrefix("/").Subrouter()
// Bucket router // Bucket router
bucket := root.PathPrefix("/{bucket}").Subrouter() bucket := api.PathPrefix("/{bucket}").Subrouter()
// Object operations // Object operations
bucket.Methods("HEAD").Path("/{object:.+}").HandlerFunc(a.HeadObjectHandler) bucket.Methods("HEAD").Path("/{object:.+}").HandlerFunc(a.HeadObjectHandler)
@ -110,7 +107,7 @@ func registerCloudStorageAPI(mux *router.Router, a CloudStorageAPI) {
bucket.Methods("DELETE").HandlerFunc(a.DeleteBucketHandler) bucket.Methods("DELETE").HandlerFunc(a.DeleteBucketHandler)
// Root operation // Root operation
root.Methods("GET").HandlerFunc(a.ListBucketsHandler) api.Methods("GET").HandlerFunc(a.ListBucketsHandler)
} }
// getNewWebAPI instantiate a new WebAPI. // getNewWebAPI instantiate a new WebAPI.
@ -129,7 +126,7 @@ func getNewWebAPI(conf cloudServerConfig) *WebAPI {
client, e := minio.NewV4(net.JoinHostPort(host, port), conf.AccessKeyID, conf.SecretAccessKey, inSecure) client, e := minio.NewV4(net.JoinHostPort(host, port), conf.AccessKeyID, conf.SecretAccessKey, inSecure)
fatalIf(probe.NewError(e), "Unable to initialize minio client", nil) fatalIf(probe.NewError(e), "Unable to initialize minio client", nil)
web := &WebAPI{ w := &WebAPI{
FSPath: conf.Path, FSPath: conf.Path,
AccessLog: conf.AccessLog, AccessLog: conf.AccessLog,
Client: client, Client: client,
@ -138,7 +135,7 @@ func getNewWebAPI(conf cloudServerConfig) *WebAPI {
accessKeyID: conf.AccessKeyID, accessKeyID: conf.AccessKeyID,
secretAccessKey: conf.SecretAccessKey, secretAccessKey: conf.SecretAccessKey,
} }
return web return w
} }
// getNewCloudStorageAPI instantiate a new CloudStorageAPI. // getNewCloudStorageAPI instantiate a new CloudStorageAPI.
@ -146,24 +143,40 @@ func getNewCloudStorageAPI(conf cloudServerConfig) CloudStorageAPI {
fs, err := fs.New(conf.Path, conf.MinFreeDisk) fs, err := fs.New(conf.Path, conf.MinFreeDisk)
fatalIf(err.Trace(), "Initializing filesystem failed.", nil) fatalIf(err.Trace(), "Initializing filesystem failed.", nil)
sign, err := signV4.New(conf.AccessKeyID, conf.SecretAccessKey, conf.Region)
fatalIf(err.Trace(conf.AccessKeyID, conf.SecretAccessKey, conf.Region), "Initializing signature version '4' failed.", nil)
return CloudStorageAPI{ return CloudStorageAPI{
Filesystem: fs,
AccessLog: conf.AccessLog, AccessLog: conf.AccessLog,
Filesystem: fs,
Signature: sign,
Region: conf.Region,
} }
} }
func getCloudStorageAPIHandler(api CloudStorageAPI) http.Handler { func getCloudStorageAPIHandler(api CloudStorageAPI, web *WebAPI) http.Handler {
var handlerFns = []HandlerFunc{ var handlerFns = []HandlerFunc{
// Redirect some pre-defined browser request paths to a static
// location prefix.
setBrowserRedirectHandler,
// Validates if incoming request is for restricted buckets.
setPrivateBucketHandler,
// Adds cache control for all browser requests.
setBrowserCacheControlHandler,
// Validates all incoming requests to have a valid date header.
setTimeValidityHandler, setTimeValidityHandler,
// CORS setting for all browser API requests.
setCorsHandler,
// Validates all incoming URL resources, for invalid/unsupported
// resources client receives a HTTP error.
setIgnoreResourcesHandler, setIgnoreResourcesHandler,
setIgnoreSignatureV2RequestHandler, // Auth handler verifies incoming authorization headers and
setSignatureHandler, // routes them accordingly. Client receives a HTTP error for
} // invalid/unsupported signatures.
if api.AccessLog { setAuthHandler,
handlerFns = append(handlerFns, setAccessLogHandler)
} }
handlerFns = append(handlerFns, setCorsHandler) handlerFns = append(handlerFns, setCorsHandler)
mux := router.NewRouter() mux := router.NewRouter()
registerCloudStorageAPI(mux, api) registerCloudStorageAPI(mux, api, web)
return registerHandlers(mux, handlerFns...) return registerHandlers(mux, handlerFns...)
} }

View File

@ -77,6 +77,7 @@ type cloudServerConfig struct {
// Credentials. // Credentials.
AccessKeyID string // Access key id. AccessKeyID string // Access key id.
SecretAccessKey string // Secret access key. SecretAccessKey string // Secret access key.
Region string // Region string.
/// FS options /// FS options
Path string // Path to export for cloud storage Path string // Path to export for cloud storage
@ -89,45 +90,12 @@ type cloudServerConfig struct {
KeyFile string // Domain key KeyFile string // Domain key
} }
func configureWebServer(conf cloudServerConfig) (*http.Server, *probe.Error) {
// Split the api address into host and port.
host, port, e := net.SplitHostPort(conf.Address)
if e != nil {
return nil, probe.NewError(e)
}
webPort, e := strconv.Atoi(port)
if e != nil {
return nil, probe.NewError(e)
}
// Always choose the next port, based on the API address port.
webPort = webPort + 1
webAddress := net.JoinHostPort(host, strconv.Itoa(webPort))
// Minio server config
webServer := &http.Server{
Addr: webAddress,
Handler: getWebAPIHandler(getNewWebAPI(conf)),
MaxHeaderBytes: 1 << 20,
}
if conf.TLS {
var err error
webServer.TLSConfig = &tls.Config{}
webServer.TLSConfig.Certificates = make([]tls.Certificate, 1)
webServer.TLSConfig.Certificates[0], err = tls.LoadX509KeyPair(conf.CertFile, conf.KeyFile)
if err != nil {
return nil, probe.NewError(err)
}
}
return webServer, nil
}
// configureAPIServer configure a new server instance // configureAPIServer configure a new server instance
func configureAPIServer(conf cloudServerConfig) (*http.Server, *probe.Error) { func configureAPIServer(conf cloudServerConfig) (*http.Server, *probe.Error) {
// Minio server config // Minio server config
apiServer := &http.Server{ apiServer := &http.Server{
Addr: conf.Address, Addr: conf.Address,
Handler: getCloudStorageAPIHandler(getNewCloudStorageAPI(conf)), Handler: getCloudStorageAPIHandler(getNewCloudStorageAPI(conf), getNewWebAPI(conf)),
MaxHeaderBytes: 1 << 20, MaxHeaderBytes: 1 << 20,
} }
@ -299,12 +267,17 @@ func serverMain(c *cli.Context) {
if _, err := os.Stat(path); err != nil { if _, err := os.Stat(path); err != nil {
fatalIf(probe.NewError(err), "Unable to validate the path", nil) fatalIf(probe.NewError(err), "Unable to validate the path", nil)
} }
region := conf.Credentials.Region
if region == "" {
region = "us-east-1"
}
tls := (certFile != "" && keyFile != "") tls := (certFile != "" && keyFile != "")
serverConfig := cloudServerConfig{ serverConfig := cloudServerConfig{
Address: c.GlobalString("address"), Address: c.GlobalString("address"),
AccessLog: c.GlobalBool("enable-accesslog"), AccessLog: c.GlobalBool("enable-accesslog"),
AccessKeyID: conf.Credentials.AccessKeyID, AccessKeyID: conf.Credentials.AccessKeyID,
SecretAccessKey: conf.Credentials.SecretAccessKey, SecretAccessKey: conf.Credentials.SecretAccessKey,
Region: region,
Path: path, Path: path,
MinFreeDisk: minFreeDisk, MinFreeDisk: minFreeDisk,
TLS: tls, TLS: tls,
@ -319,13 +292,6 @@ func serverMain(c *cli.Context) {
Println("\nMinio Object Storage:") Println("\nMinio Object Storage:")
printServerMsg(apiServer) printServerMsg(apiServer)
// configure Web server.
webServer, err := configureWebServer(serverConfig)
errorIf(err.Trace(), "Failed to configure Web server.", nil)
Println("\nMinio Browser:")
printServerMsg(webServer)
Println("\nTo configure Minio Client:") Println("\nTo configure Minio Client:")
if runtime.GOOS == "windows" { if runtime.GOOS == "windows" {
Println(" Download \"mc\" from https://dl.minio.io/client/mc/release/" + runtime.GOOS + "-" + runtime.GOARCH + "/mc.exe") Println(" Download \"mc\" from https://dl.minio.io/client/mc/release/" + runtime.GOOS + "-" + runtime.GOARCH + "/mc.exe")
@ -337,6 +303,6 @@ func serverMain(c *cli.Context) {
} }
// Start server. // Start server.
err = minhttp.ListenAndServe(apiServer, webServer) err = minhttp.ListenAndServe(apiServer)
errorIf(err.Trace(), "Failed to start the minio server.", nil) errorIf(err.Trace(), "Failed to start the minio server.", nil)
} }

View File

@ -21,8 +21,10 @@ import (
"crypto/md5" "crypto/md5"
"io" "io"
"io/ioutil" "io/ioutil"
"net"
"os" "os"
"sort" "sort"
"strconv"
"strings" "strings"
"time" "time"
@ -52,6 +54,21 @@ var _ = Suite(&MyAPIFSCacheSuite{})
var testAPIFSCacheServer *httptest.Server var testAPIFSCacheServer *httptest.Server
// Ask the kernel for a free open port.
func getFreePort() int {
addr, err := net.ResolveTCPAddr("tcp", "localhost:0")
if err != nil {
panic(err)
}
l, err := net.ListenTCP("tcp", addr)
if err != nil {
panic(err)
}
defer l.Close()
return l.Addr().(*net.TCPAddr).Port
}
func (s *MyAPIFSCacheSuite) SetUpSuite(c *C) { func (s *MyAPIFSCacheSuite) SetUpSuite(c *C) {
root, e := ioutil.TempDir(os.TempDir(), "api-") root, e := ioutil.TempDir(os.TempDir(), "api-")
c.Assert(e, IsNil) c.Assert(e, IsNil)
@ -77,11 +94,16 @@ func (s *MyAPIFSCacheSuite) SetUpSuite(c *C) {
c.Assert(saveConfig(conf), IsNil) c.Assert(saveConfig(conf), IsNil)
cloudServer := cloudServerConfig{ cloudServer := cloudServerConfig{
Path: fsroot, Address: ":" + strconv.Itoa(getFreePort()),
MinFreeDisk: 0, Path: fsroot,
MinFreeDisk: 0,
AccessKeyID: s.accessKeyID,
SecretAccessKey: s.secretAccessKey,
Region: "us-east-1",
} }
cloudStorageAPI := getNewCloudStorageAPI(cloudServer) cloudStorageAPI := getNewCloudStorageAPI(cloudServer)
httpHandler := getCloudStorageAPIHandler(cloudStorageAPI) webAPI := getNewWebAPI(cloudServer)
httpHandler := getCloudStorageAPIHandler(cloudStorageAPI, webAPI)
testAPIFSCacheServer = httptest.NewServer(httpHandler) testAPIFSCacheServer = httptest.NewServer(httpHandler)
} }
@ -225,7 +247,7 @@ func (s *MyAPIFSCacheSuite) newRequest(method, urlStr string, contentLength int6
"aws4_request", "aws4_request",
}, "/") }, "/")
stringToSign := authHeaderPrefix + "\n" + t.Format(iso8601Format) + "\n" stringToSign := "AWS4-HMAC-SHA256" + "\n" + t.Format(iso8601Format) + "\n"
stringToSign = stringToSign + scope + "\n" stringToSign = stringToSign + scope + "\n"
stringToSign = stringToSign + hex.EncodeToString(sum256([]byte(canonicalRequest))) stringToSign = stringToSign + hex.EncodeToString(sum256([]byte(canonicalRequest)))
@ -238,7 +260,7 @@ func (s *MyAPIFSCacheSuite) newRequest(method, urlStr string, contentLength int6
// final Authorization header // final Authorization header
parts := []string{ parts := []string{
authHeaderPrefix + " Credential=" + s.accessKeyID + "/" + scope, "AWS4-HMAC-SHA256" + " Credential=" + s.accessKeyID + "/" + scope,
"SignedHeaders=" + signedHeaders, "SignedHeaders=" + signedHeaders,
"Signature=" + signature, "Signature=" + signature,
} }

View File

@ -1,144 +0,0 @@
/*
* Minio Cloud Storage, (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package main
import (
"encoding/hex"
"net/http"
"strings"
"github.com/minio/minio/pkg/crypto/sha256"
"github.com/minio/minio/pkg/probe"
v4 "github.com/minio/minio/pkg/signature"
)
type signatureHandler struct {
handler http.Handler
}
// setSignatureHandler to validate authorization header for the incoming request.
func setSignatureHandler(h http.Handler) http.Handler {
return signatureHandler{h}
}
func isRequestSignatureV4(req *http.Request) bool {
if _, ok := req.Header["Authorization"]; ok {
if strings.HasPrefix(req.Header.Get("Authorization"), authHeaderPrefix) {
return ok
}
}
return false
}
func isRequestRequiresACLCheck(req *http.Request) bool {
if isRequestSignatureV4(req) || isRequestPresignedSignatureV4(req) || isRequestPostPolicySignatureV4(req) {
return false
}
return true
}
func isRequestPresignedSignatureV4(req *http.Request) bool {
if _, ok := req.URL.Query()["X-Amz-Credential"]; ok {
return ok
}
return false
}
func isRequestPostPolicySignatureV4(req *http.Request) bool {
if _, ok := req.Header["Content-Type"]; ok {
if strings.Contains(req.Header.Get("Content-Type"), "multipart/form-data") {
return true
}
}
return false
}
func (s signatureHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
if isRequestPostPolicySignatureV4(r) && r.Method == "POST" {
s.handler.ServeHTTP(w, r)
return
}
var signature *v4.Signature
if isRequestSignatureV4(r) {
// For PUT and POST requests with payload, send the call upwards for verification.
// Or PUT and POST requests without payload, verify here.
if (r.Body == nil && (r.Method == "PUT" || r.Method == "POST")) || (r.Method != "PUT" && r.Method != "POST") {
// Init signature V4 verification
var err *probe.Error
signature, err = initSignatureV4(r)
if err != nil {
switch err.ToGoError() {
case errInvalidRegion:
errorIf(err.Trace(), "Unknown region in authorization header.", nil)
writeErrorResponse(w, r, AuthorizationHeaderMalformed, r.URL.Path)
return
case errAccessKeyIDInvalid:
errorIf(err.Trace(), "Invalid access key id.", nil)
writeErrorResponse(w, r, InvalidAccessKeyID, r.URL.Path)
return
default:
errorIf(err.Trace(), "Initializing signature v4 failed.", nil)
writeErrorResponse(w, r, InternalError, r.URL.Path)
return
}
}
dummySha256Bytes := sha256.Sum256([]byte(""))
ok, err := signature.DoesSignatureMatch(hex.EncodeToString(dummySha256Bytes[:]))
if err != nil {
errorIf(err.Trace(), "Unable to verify signature.", nil)
writeErrorResponse(w, r, InternalError, r.URL.Path)
return
}
if !ok {
writeErrorResponse(w, r, SignatureDoesNotMatch, r.URL.Path)
return
}
}
s.handler.ServeHTTP(w, r)
return
}
if isRequestPresignedSignatureV4(r) {
var err *probe.Error
signature, err = initPresignedSignatureV4(r)
if err != nil {
switch err.ToGoError() {
case errAccessKeyIDInvalid:
errorIf(err.Trace(), "Invalid access key id requested.", nil)
writeErrorResponse(w, r, InvalidAccessKeyID, r.URL.Path)
return
default:
errorIf(err.Trace(), "Initializing signature v4 failed.", nil)
writeErrorResponse(w, r, InternalError, r.URL.Path)
return
}
}
ok, err := signature.DoesPresignedSignatureMatch()
if err != nil {
errorIf(err.Trace(), "Unable to verify signature.", nil)
writeErrorResponse(w, r, InternalError, r.URL.Path)
return
}
if !ok {
writeErrorResponse(w, r, SignatureDoesNotMatch, r.URL.Path)
return
}
s.handler.ServeHTTP(w, r)
return
}
// call goes up from here, let ACL's verify the validity of the request
s.handler.ServeHTTP(w, r)
}

72
signature.go Normal file
View File

@ -0,0 +1,72 @@
package main
import (
"crypto/sha256"
"encoding/hex"
"net/http"
"strings"
signV4 "github.com/minio/minio/pkg/signature"
)
func isRequestJWT(r *http.Request) bool {
if _, ok := r.Header["Authorization"]; ok {
if strings.HasPrefix(r.Header.Get("Authorization"), jwtAlgorithm) {
return ok
}
}
return false
}
func isRequestSignatureV4(r *http.Request) bool {
if _, ok := r.Header["Authorization"]; ok {
if strings.HasPrefix(r.Header.Get("Authorization"), signV4Algorithm) {
return ok
}
}
return false
}
func isRequestPresignedSignatureV4(r *http.Request) bool {
if _, ok := r.URL.Query()["X-Amz-Credential"]; ok {
return ok
}
return false
}
func isRequestPostPolicySignatureV4(r *http.Request) bool {
if _, ok := r.Header["Content-Type"]; ok {
if strings.Contains(r.Header.Get("Content-Type"), "multipart/form-data") {
return true
}
}
return false
}
func isRequestRequiresACLCheck(r *http.Request) bool {
if isRequestSignatureV4(r) || isRequestPresignedSignatureV4(r) || isRequestPostPolicySignatureV4(r) {
return false
}
return true
}
func isSignV4ReqAuthenticated(sign *signV4.Signature, r *http.Request) bool {
auth := sign.SetHTTPRequestToVerify(r)
if isRequestSignatureV4(r) {
dummyPayload := sha256.Sum256([]byte(""))
ok, err := auth.DoesSignatureMatch(hex.EncodeToString(dummyPayload[:]))
if err != nil {
errorIf(err.Trace(), "Signature verification failed.", nil)
return false
}
return ok
} else if isRequestPresignedSignatureV4(r) {
ok, err := auth.DoesPresignedSignatureMatch()
if err != nil {
errorIf(err.Trace(), "Presigned signature verification failed.", nil)
return false
}
return ok
}
return false
}

View File

@ -36,9 +36,9 @@ import (
"github.com/minio/minio/pkg/probe" "github.com/minio/minio/pkg/probe"
) )
// isAuthenticated validates if any incoming request to be a valid JWT // isJWTReqAuthencatied validates if any incoming request to be a valid JWT
// authenticated request. // authenticated request.
func isAuthenticated(req *http.Request) bool { func isJWTReqAuthencatied(req *http.Request) bool {
jwt := InitJWT() jwt := InitJWT()
tokenRequest, e := jwtgo.ParseFromRequest(req, func(token *jwtgo.Token) (interface{}, error) { tokenRequest, e := jwtgo.ParseFromRequest(req, func(token *jwtgo.Token) (interface{}, error) {
if _, ok := token.Method.(*jwtgo.SigningMethodHMAC); !ok { if _, ok := token.Method.(*jwtgo.SigningMethodHMAC); !ok {
@ -60,7 +60,7 @@ func (web WebAPI) GetUIVersion(r *http.Request, args *GenericArgs, reply *Generi
// ServerInfo - get server info. // ServerInfo - get server info.
func (web *WebAPI) ServerInfo(r *http.Request, args *ServerInfoArgs, reply *ServerInfoRep) error { func (web *WebAPI) ServerInfo(r *http.Request, args *ServerInfoArgs, reply *ServerInfoRep) error {
if !isAuthenticated(r) { if !isJWTReqAuthencatied(r) {
return &json2.Error{Message: "Unauthorized request"} return &json2.Error{Message: "Unauthorized request"}
} }
host, err := os.Hostname() host, err := os.Hostname()
@ -89,7 +89,7 @@ func (web *WebAPI) ServerInfo(r *http.Request, args *ServerInfoArgs, reply *Serv
// DiskInfo - get disk statistics. // DiskInfo - get disk statistics.
func (web *WebAPI) DiskInfo(r *http.Request, args *DiskInfoArgs, reply *DiskInfoRep) error { func (web *WebAPI) DiskInfo(r *http.Request, args *DiskInfoArgs, reply *DiskInfoRep) error {
if !isAuthenticated(r) { if !isJWTReqAuthencatied(r) {
return &json2.Error{Message: "Unauthorized request"} return &json2.Error{Message: "Unauthorized request"}
} }
info, e := disk.GetInfo(web.FSPath) info, e := disk.GetInfo(web.FSPath)
@ -103,7 +103,7 @@ func (web *WebAPI) DiskInfo(r *http.Request, args *DiskInfoArgs, reply *DiskInfo
// MakeBucket - make a bucket. // MakeBucket - make a bucket.
func (web *WebAPI) MakeBucket(r *http.Request, args *MakeBucketArgs, reply *GenericRep) error { func (web *WebAPI) MakeBucket(r *http.Request, args *MakeBucketArgs, reply *GenericRep) error {
if !isAuthenticated(r) { if !isJWTReqAuthencatied(r) {
return &json2.Error{Message: "Unauthorized request"} return &json2.Error{Message: "Unauthorized request"}
} }
reply.UIVersion = uiVersion reply.UIVersion = uiVersion
@ -116,7 +116,7 @@ func (web *WebAPI) MakeBucket(r *http.Request, args *MakeBucketArgs, reply *Gene
// ListBuckets - list buckets api. // ListBuckets - list buckets api.
func (web *WebAPI) ListBuckets(r *http.Request, args *ListBucketsArgs, reply *ListBucketsRep) error { func (web *WebAPI) ListBuckets(r *http.Request, args *ListBucketsArgs, reply *ListBucketsRep) error {
if !isAuthenticated(r) { if !isJWTReqAuthencatied(r) {
return &json2.Error{Message: "Unauthorized request"} return &json2.Error{Message: "Unauthorized request"}
} }
buckets, e := web.Client.ListBuckets() buckets, e := web.Client.ListBuckets()
@ -135,7 +135,7 @@ func (web *WebAPI) ListBuckets(r *http.Request, args *ListBucketsArgs, reply *Li
// ListObjects - list objects api. // ListObjects - list objects api.
func (web *WebAPI) ListObjects(r *http.Request, args *ListObjectsArgs, reply *ListObjectsRep) error { func (web *WebAPI) ListObjects(r *http.Request, args *ListObjectsArgs, reply *ListObjectsRep) error {
if !isAuthenticated(r) { if !isJWTReqAuthencatied(r) {
return &json2.Error{Message: "Unauthorized request"} return &json2.Error{Message: "Unauthorized request"}
} }
doneCh := make(chan struct{}) doneCh := make(chan struct{})
@ -183,7 +183,7 @@ func getTargetHost(apiAddress, targetHost string) (string, *probe.Error) {
// PutObjectURL - generates url for upload access. // PutObjectURL - generates url for upload access.
func (web *WebAPI) PutObjectURL(r *http.Request, args *PutObjectURLArgs, reply *PutObjectURLRep) error { func (web *WebAPI) PutObjectURL(r *http.Request, args *PutObjectURLArgs, reply *PutObjectURLRep) error {
if !isAuthenticated(r) { if !isJWTReqAuthencatied(r) {
return &json2.Error{Message: "Unauthorized request"} return &json2.Error{Message: "Unauthorized request"}
} }
targetHost, err := getTargetHost(web.apiAddress, args.TargetHost) targetHost, err := getTargetHost(web.apiAddress, args.TargetHost)
@ -205,7 +205,7 @@ func (web *WebAPI) PutObjectURL(r *http.Request, args *PutObjectURLArgs, reply *
// GetObjectURL - generates url for download access. // GetObjectURL - generates url for download access.
func (web *WebAPI) GetObjectURL(r *http.Request, args *GetObjectURLArgs, reply *GetObjectURLRep) error { func (web *WebAPI) GetObjectURL(r *http.Request, args *GetObjectURLArgs, reply *GetObjectURLRep) error {
if !isAuthenticated(r) { if !isJWTReqAuthencatied(r) {
return &json2.Error{Message: "Unauthorized request"} return &json2.Error{Message: "Unauthorized request"}
} }
@ -237,7 +237,7 @@ func (web *WebAPI) GetObjectURL(r *http.Request, args *GetObjectURLArgs, reply *
// RemoveObject - removes an object. // RemoveObject - removes an object.
func (web *WebAPI) RemoveObject(r *http.Request, args *RemoveObjectArgs, reply *GenericRep) error { func (web *WebAPI) RemoveObject(r *http.Request, args *RemoveObjectArgs, reply *GenericRep) error {
if !isAuthenticated(r) { if !isJWTReqAuthencatied(r) {
return &json2.Error{Message: "Unauthorized request"} return &json2.Error{Message: "Unauthorized request"}
} }
reply.UIVersion = uiVersion reply.UIVersion = uiVersion