mirror of https://github.com/minio/minio.git
Merge pull request #1132 from harshavardhana/merge-ports
web/rpc: Merge ports with API server.
This commit is contained in:
commit
2a6bc604db
|
@ -75,6 +75,7 @@ const (
|
|||
BucketNotEmpty
|
||||
RootPathFull
|
||||
ObjectExistsAsPrefix
|
||||
AllAccessDisabled
|
||||
)
|
||||
|
||||
// APIError code to Error structure map
|
||||
|
@ -244,6 +245,11 @@ var errorCodeResponse = map[int]APIError{
|
|||
Description: "An object already exists as your prefix, choose a different prefix to proceed.",
|
||||
HTTPStatusCode: http.StatusConflict,
|
||||
},
|
||||
AllAccessDisabled: {
|
||||
Code: "AllAccessDisabled",
|
||||
Description: "All access to this bucket has been disabled.",
|
||||
HTTPStatusCode: http.StatusForbidden,
|
||||
},
|
||||
}
|
||||
|
||||
// errorCodeError provides errorCode to Error. It returns empty if the code provided is unknown
|
||||
|
|
304
api-signature.go
304
api-signature.go
|
@ -1,304 +0,0 @@
|
|||
/*
|
||||
* Minio Cloud Storage, (C) 2015 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/base64"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"mime/multipart"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/minio/minio/pkg/probe"
|
||||
v4 "github.com/minio/minio/pkg/signature"
|
||||
)
|
||||
|
||||
const (
|
||||
authHeaderPrefix = "AWS4-HMAC-SHA256"
|
||||
iso8601Format = "20060102T150405Z"
|
||||
)
|
||||
|
||||
// getCredentialsFromAuth parse credentials tag from authorization value
|
||||
func getCredentialsFromAuth(authValue string) ([]string, *probe.Error) {
|
||||
if authValue == "" {
|
||||
return nil, probe.NewError(errMissingAuthHeaderValue)
|
||||
}
|
||||
// replace all spaced strings
|
||||
authValue = strings.Replace(authValue, " ", "", -1)
|
||||
if !strings.HasPrefix(authValue, authHeaderPrefix) {
|
||||
return nil, probe.NewError(errMissingFieldsAuthHeader)
|
||||
}
|
||||
if !strings.HasPrefix(strings.TrimPrefix(authValue, authHeaderPrefix), "Credential") {
|
||||
return nil, probe.NewError(errInvalidAuthHeaderPrefix)
|
||||
}
|
||||
authValue = strings.TrimPrefix(authValue, authHeaderPrefix)
|
||||
authFields := strings.Split(strings.TrimSpace(authValue), ",")
|
||||
if len(authFields) != 3 {
|
||||
return nil, probe.NewError(errInvalidAuthHeaderValue)
|
||||
}
|
||||
credentials := strings.Split(strings.TrimSpace(authFields[0]), "=")
|
||||
if len(credentials) != 2 {
|
||||
return nil, probe.NewError(errMissingFieldsCredentialTag)
|
||||
}
|
||||
credentialElements := strings.Split(strings.TrimSpace(credentials[1]), "/")
|
||||
if len(credentialElements) != 5 {
|
||||
return nil, probe.NewError(errCredentialTagMalformed)
|
||||
}
|
||||
return credentialElements, nil
|
||||
}
|
||||
|
||||
func getSignatureFromAuth(authHeaderValue string) (string, *probe.Error) {
|
||||
authValue := strings.TrimPrefix(authHeaderValue, authHeaderPrefix)
|
||||
authFields := strings.Split(strings.TrimSpace(authValue), ",")
|
||||
if len(authFields) != 3 {
|
||||
return "", probe.NewError(errInvalidAuthHeaderValue)
|
||||
}
|
||||
if len(strings.Split(strings.TrimSpace(authFields[2]), "=")) != 2 {
|
||||
return "", probe.NewError(errMissingFieldsSignatureTag)
|
||||
}
|
||||
signature := strings.Split(strings.TrimSpace(authFields[2]), "=")[1]
|
||||
return signature, nil
|
||||
}
|
||||
|
||||
func getSignedHeadersFromAuth(authHeaderValue string) ([]string, *probe.Error) {
|
||||
authValue := strings.TrimPrefix(authHeaderValue, authHeaderPrefix)
|
||||
authFields := strings.Split(strings.TrimSpace(authValue), ",")
|
||||
if len(authFields) != 3 {
|
||||
return nil, probe.NewError(errInvalidAuthHeaderValue)
|
||||
}
|
||||
if len(strings.Split(strings.TrimSpace(authFields[1]), "=")) != 2 {
|
||||
return nil, probe.NewError(errMissingFieldsSignedHeadersTag)
|
||||
}
|
||||
signedHeaders := strings.Split(strings.Split(strings.TrimSpace(authFields[1]), "=")[1], ";")
|
||||
return signedHeaders, nil
|
||||
}
|
||||
|
||||
// verify if region value is valid with configured minioRegion.
|
||||
func isValidRegion(region string, minioRegion string) *probe.Error {
|
||||
if minioRegion == "" {
|
||||
minioRegion = "us-east-1"
|
||||
}
|
||||
if region != minioRegion && region != "US" {
|
||||
return probe.NewError(errInvalidRegion)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// stripRegion - strip only region from auth header.
|
||||
func stripRegion(authHeaderValue string) (string, *probe.Error) {
|
||||
credentialElements, err := getCredentialsFromAuth(authHeaderValue)
|
||||
if err != nil {
|
||||
return "", err.Trace(authHeaderValue)
|
||||
}
|
||||
region := credentialElements[2]
|
||||
return region, nil
|
||||
}
|
||||
|
||||
// stripAccessKeyID - strip only access key id from auth header.
|
||||
func stripAccessKeyID(authHeaderValue string) (string, *probe.Error) {
|
||||
credentialElements, err := getCredentialsFromAuth(authHeaderValue)
|
||||
if err != nil {
|
||||
return "", err.Trace()
|
||||
}
|
||||
accessKeyID := credentialElements[0]
|
||||
if !isValidAccessKey(accessKeyID) {
|
||||
return "", probe.NewError(errAccessKeyIDInvalid)
|
||||
}
|
||||
return accessKeyID, nil
|
||||
}
|
||||
|
||||
// initSignatureV4 initializing signature verification.
|
||||
func initSignatureV4(req *http.Request) (*v4.Signature, *probe.Error) {
|
||||
// strip auth from authorization header.
|
||||
authHeaderValue := req.Header.Get("Authorization")
|
||||
|
||||
config, err := loadConfigV2()
|
||||
if err != nil {
|
||||
return nil, err.Trace()
|
||||
}
|
||||
|
||||
region, err := stripRegion(authHeaderValue)
|
||||
if err != nil {
|
||||
return nil, err.Trace(authHeaderValue)
|
||||
}
|
||||
|
||||
if err = isValidRegion(region, config.Credentials.Region); err != nil {
|
||||
return nil, err.Trace(authHeaderValue)
|
||||
}
|
||||
|
||||
accessKeyID, err := stripAccessKeyID(authHeaderValue)
|
||||
if err != nil {
|
||||
return nil, err.Trace(authHeaderValue)
|
||||
}
|
||||
signature, err := getSignatureFromAuth(authHeaderValue)
|
||||
if err != nil {
|
||||
return nil, err.Trace(authHeaderValue)
|
||||
}
|
||||
signedHeaders, err := getSignedHeadersFromAuth(authHeaderValue)
|
||||
if err != nil {
|
||||
return nil, err.Trace(authHeaderValue)
|
||||
}
|
||||
if config.Credentials.AccessKeyID == accessKeyID {
|
||||
signature := &v4.Signature{
|
||||
AccessKeyID: config.Credentials.AccessKeyID,
|
||||
SecretAccessKey: config.Credentials.SecretAccessKey,
|
||||
Region: region,
|
||||
Signature: signature,
|
||||
SignedHeaders: signedHeaders,
|
||||
Request: req,
|
||||
}
|
||||
return signature, nil
|
||||
}
|
||||
return nil, probe.NewError(errAccessKeyIDInvalid)
|
||||
}
|
||||
|
||||
func extractHTTPFormValues(reader *multipart.Reader) (io.Reader, map[string]string, *probe.Error) {
|
||||
/// HTML Form values
|
||||
formValues := make(map[string]string)
|
||||
filePart := new(bytes.Buffer)
|
||||
var e error
|
||||
for e == nil {
|
||||
var part *multipart.Part
|
||||
part, e = reader.NextPart()
|
||||
if part != nil {
|
||||
if part.FileName() == "" {
|
||||
buffer, e := ioutil.ReadAll(part)
|
||||
if e != nil {
|
||||
return nil, nil, probe.NewError(e)
|
||||
}
|
||||
formValues[http.CanonicalHeaderKey(part.FormName())] = string(buffer)
|
||||
} else {
|
||||
if _, e := io.Copy(filePart, part); e != nil {
|
||||
return nil, nil, probe.NewError(e)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return filePart, formValues, nil
|
||||
}
|
||||
|
||||
func applyPolicy(formValues map[string]string) *probe.Error {
|
||||
if formValues["X-Amz-Algorithm"] != "AWS4-HMAC-SHA256" {
|
||||
return probe.NewError(errUnsupportedAlgorithm)
|
||||
}
|
||||
/// Decoding policy
|
||||
policyBytes, e := base64.StdEncoding.DecodeString(formValues["Policy"])
|
||||
if e != nil {
|
||||
return probe.NewError(e)
|
||||
}
|
||||
postPolicyForm, err := v4.ParsePostPolicyForm(string(policyBytes))
|
||||
if err != nil {
|
||||
return err.Trace()
|
||||
}
|
||||
if !postPolicyForm.Expiration.After(time.Now().UTC()) {
|
||||
return probe.NewError(errPolicyAlreadyExpired)
|
||||
}
|
||||
if postPolicyForm.Conditions.Policies["$bucket"].Operator == "eq" {
|
||||
if formValues["Bucket"] != postPolicyForm.Conditions.Policies["$bucket"].Value {
|
||||
return probe.NewError(errPolicyMissingFields)
|
||||
}
|
||||
}
|
||||
if postPolicyForm.Conditions.Policies["$x-amz-date"].Operator == "eq" {
|
||||
if formValues["X-Amz-Date"] != postPolicyForm.Conditions.Policies["$x-amz-date"].Value {
|
||||
return probe.NewError(errPolicyMissingFields)
|
||||
}
|
||||
}
|
||||
if postPolicyForm.Conditions.Policies["$Content-Type"].Operator == "starts-with" {
|
||||
if !strings.HasPrefix(formValues["Content-Type"], postPolicyForm.Conditions.Policies["$Content-Type"].Value) {
|
||||
return probe.NewError(errPolicyMissingFields)
|
||||
}
|
||||
}
|
||||
if postPolicyForm.Conditions.Policies["$Content-Type"].Operator == "eq" {
|
||||
if formValues["Content-Type"] != postPolicyForm.Conditions.Policies["$Content-Type"].Value {
|
||||
return probe.NewError(errPolicyMissingFields)
|
||||
}
|
||||
}
|
||||
if postPolicyForm.Conditions.Policies["$key"].Operator == "starts-with" {
|
||||
if !strings.HasPrefix(formValues["Key"], postPolicyForm.Conditions.Policies["$key"].Value) {
|
||||
return probe.NewError(errPolicyMissingFields)
|
||||
}
|
||||
}
|
||||
if postPolicyForm.Conditions.Policies["$key"].Operator == "eq" {
|
||||
if formValues["Key"] != postPolicyForm.Conditions.Policies["$key"].Value {
|
||||
return probe.NewError(errPolicyMissingFields)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// initPostPresignedPolicyV4 initializing post policy signature verification
|
||||
func initPostPresignedPolicyV4(formValues map[string]string) (*v4.Signature, *probe.Error) {
|
||||
credentialElements := strings.Split(strings.TrimSpace(formValues["X-Amz-Credential"]), "/")
|
||||
if len(credentialElements) != 5 {
|
||||
return nil, probe.NewError(errCredentialTagMalformed)
|
||||
}
|
||||
accessKeyID := credentialElements[0]
|
||||
if !isValidAccessKey(accessKeyID) {
|
||||
return nil, probe.NewError(errAccessKeyIDInvalid)
|
||||
}
|
||||
config, err := loadConfigV2()
|
||||
if err != nil {
|
||||
return nil, err.Trace()
|
||||
}
|
||||
region := credentialElements[2]
|
||||
if config.Credentials.AccessKeyID == accessKeyID {
|
||||
signature := &v4.Signature{
|
||||
AccessKeyID: config.Credentials.AccessKeyID,
|
||||
SecretAccessKey: config.Credentials.SecretAccessKey,
|
||||
Region: region,
|
||||
Signature: formValues["X-Amz-Signature"],
|
||||
PresignedPolicy: formValues["Policy"],
|
||||
}
|
||||
return signature, nil
|
||||
}
|
||||
return nil, probe.NewError(errAccessKeyIDInvalid)
|
||||
}
|
||||
|
||||
// initPresignedSignatureV4 initializing presigned signature verification
|
||||
func initPresignedSignatureV4(req *http.Request) (*v4.Signature, *probe.Error) {
|
||||
credentialElements := strings.Split(strings.TrimSpace(req.URL.Query().Get("X-Amz-Credential")), "/")
|
||||
if len(credentialElements) != 5 {
|
||||
return nil, probe.NewError(errCredentialTagMalformed)
|
||||
}
|
||||
accessKeyID := credentialElements[0]
|
||||
if !isValidAccessKey(accessKeyID) {
|
||||
return nil, probe.NewError(errAccessKeyIDInvalid)
|
||||
}
|
||||
config, err := loadConfigV2()
|
||||
if err != nil {
|
||||
return nil, err.Trace()
|
||||
}
|
||||
region := credentialElements[2]
|
||||
signedHeaders := strings.Split(strings.TrimSpace(req.URL.Query().Get("X-Amz-SignedHeaders")), ";")
|
||||
signature := strings.TrimSpace(req.URL.Query().Get("X-Amz-Signature"))
|
||||
if config.Credentials.AccessKeyID == accessKeyID {
|
||||
signature := &v4.Signature{
|
||||
AccessKeyID: config.Credentials.AccessKeyID,
|
||||
SecretAccessKey: config.Credentials.SecretAccessKey,
|
||||
Region: region,
|
||||
Signature: signature,
|
||||
SignedHeaders: signedHeaders,
|
||||
Presigned: true,
|
||||
Request: req,
|
||||
}
|
||||
return signature, nil
|
||||
}
|
||||
return nil, probe.NewError(errAccessKeyIDInvalid)
|
||||
}
|
|
@ -0,0 +1,88 @@
|
|||
/*
|
||||
* Minio Cloud Storage, (C) 2015 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
jwtgo "github.com/dgrijalva/jwt-go"
|
||||
)
|
||||
|
||||
const (
|
||||
signV4Algorithm = "AWS4-HMAC-SHA256"
|
||||
jwtAlgorithm = "Bearer"
|
||||
)
|
||||
|
||||
// authHandler - handles all the incoming authorization headers and
|
||||
// validates them if possible.
|
||||
type authHandler struct {
|
||||
handler http.Handler
|
||||
}
|
||||
|
||||
// setAuthHandler to validate authorization header for the incoming request.
|
||||
func setAuthHandler(h http.Handler) http.Handler {
|
||||
return authHandler{h}
|
||||
}
|
||||
|
||||
// handler for validating incoming authorization headers.
|
||||
func (a authHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
// Verify if request is presigned, validate signature inside each handlers.
|
||||
if isRequestPresignedSignatureV4(r) {
|
||||
a.handler.ServeHTTP(w, r)
|
||||
return
|
||||
}
|
||||
|
||||
// Verify if request has post policy signature, validate signature
|
||||
// inside POST policy handler.
|
||||
if isRequestPostPolicySignatureV4(r) && r.Method == "POST" {
|
||||
a.handler.ServeHTTP(w, r)
|
||||
return
|
||||
}
|
||||
|
||||
// No authorization found, let the top level caller validate if
|
||||
// public request is allowed.
|
||||
if _, ok := r.Header["Authorization"]; !ok {
|
||||
a.handler.ServeHTTP(w, r)
|
||||
return
|
||||
}
|
||||
|
||||
// Verify if the signature algorithms are known.
|
||||
if !isRequestSignatureV4(r) && !isRequestJWT(r) {
|
||||
writeErrorResponse(w, r, SignatureVersionNotSupported, r.URL.Path)
|
||||
return
|
||||
}
|
||||
|
||||
// Verify JWT authorization header is present.
|
||||
if isRequestJWT(r) {
|
||||
// Validate Authorization header to be valid.
|
||||
jwt := InitJWT()
|
||||
token, e := jwtgo.ParseFromRequest(r, func(token *jwtgo.Token) (interface{}, error) {
|
||||
if _, ok := token.Method.(*jwtgo.SigningMethodHMAC); !ok {
|
||||
return nil, fmt.Errorf("Unexpected signing method: %v", token.Header["alg"])
|
||||
}
|
||||
return jwt.secretAccessKey, nil
|
||||
})
|
||||
if e != nil || !token.Valid {
|
||||
w.WriteHeader(http.StatusUnauthorized)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// For all other signed requests, let top level caller verify.
|
||||
a.handler.ServeHTTP(w, r)
|
||||
}
|
|
@ -17,26 +17,34 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/hex"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"mime/multipart"
|
||||
"net/http"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/minio/minio/pkg/crypto/sha256"
|
||||
"github.com/minio/minio/pkg/fs"
|
||||
"github.com/minio/minio/pkg/probe"
|
||||
v4 "github.com/minio/minio/pkg/signature"
|
||||
signV4 "github.com/minio/minio/pkg/signature"
|
||||
)
|
||||
|
||||
// GetBucketLocationHandler - GET Bucket location.
|
||||
// -------------------------
|
||||
// This operation returns bucket location.
|
||||
func (api CloudStorageAPI) GetBucketLocationHandler(w http.ResponseWriter, req *http.Request) {
|
||||
vars := mux.Vars(req)
|
||||
func (api CloudStorageAPI) GetBucketLocationHandler(w http.ResponseWriter, r *http.Request) {
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
|
||||
if isRequestRequiresACLCheck(req) {
|
||||
writeErrorResponse(w, req, AccessDenied, req.URL.Path)
|
||||
if isRequestRequiresACLCheck(r) {
|
||||
writeErrorResponse(w, r, AccessDenied, r.URL.Path)
|
||||
return
|
||||
}
|
||||
|
||||
if !isSignV4ReqAuthenticated(api.Signature, r) {
|
||||
writeErrorResponse(w, r, SignatureDoesNotMatch, r.URL.Path)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -45,20 +53,23 @@ func (api CloudStorageAPI) GetBucketLocationHandler(w http.ResponseWriter, req *
|
|||
errorIf(err.Trace(), "GetBucketMetadata failed.", nil)
|
||||
switch err.ToGoError().(type) {
|
||||
case fs.BucketNotFound:
|
||||
writeErrorResponse(w, req, NoSuchBucket, req.URL.Path)
|
||||
writeErrorResponse(w, r, NoSuchBucket, r.URL.Path)
|
||||
case fs.BucketNameInvalid:
|
||||
writeErrorResponse(w, req, InvalidBucketName, req.URL.Path)
|
||||
writeErrorResponse(w, r, InvalidBucketName, r.URL.Path)
|
||||
default:
|
||||
writeErrorResponse(w, req, InternalError, req.URL.Path)
|
||||
writeErrorResponse(w, r, InternalError, r.URL.Path)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// TODO: Location value for LocationResponse is deliberately not used, until
|
||||
// we bring in a mechanism of configurable regions. For the time being
|
||||
// default region is empty i.e 'us-east-1'.
|
||||
encodedSuccessResponse := encodeSuccessResponse(LocationResponse{}) // generate response
|
||||
setCommonHeaders(w) // write headers
|
||||
// Generate response.
|
||||
encodedSuccessResponse := encodeSuccessResponse(LocationResponse{})
|
||||
if api.Region != "us-east-1" {
|
||||
encodedSuccessResponse = encodeSuccessResponse(LocationResponse{
|
||||
Location: api.Region,
|
||||
})
|
||||
}
|
||||
setCommonHeaders(w) // write headers.
|
||||
writeSuccessResponse(w, encodedSuccessResponse)
|
||||
}
|
||||
|
||||
|
@ -70,18 +81,23 @@ func (api CloudStorageAPI) GetBucketLocationHandler(w http.ResponseWriter, req *
|
|||
// completed or aborted. This operation returns at most 1,000 multipart
|
||||
// uploads in the response.
|
||||
//
|
||||
func (api CloudStorageAPI) ListMultipartUploadsHandler(w http.ResponseWriter, req *http.Request) {
|
||||
vars := mux.Vars(req)
|
||||
func (api CloudStorageAPI) ListMultipartUploadsHandler(w http.ResponseWriter, r *http.Request) {
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
|
||||
if isRequestRequiresACLCheck(req) {
|
||||
writeErrorResponse(w, req, AccessDenied, req.URL.Path)
|
||||
if isRequestRequiresACLCheck(r) {
|
||||
writeErrorResponse(w, r, AccessDenied, r.URL.Path)
|
||||
return
|
||||
}
|
||||
|
||||
resources := getBucketMultipartResources(req.URL.Query())
|
||||
if !isSignV4ReqAuthenticated(api.Signature, r) {
|
||||
writeErrorResponse(w, r, SignatureDoesNotMatch, r.URL.Path)
|
||||
return
|
||||
}
|
||||
|
||||
resources := getBucketMultipartResources(r.URL.Query())
|
||||
if resources.MaxUploads < 0 {
|
||||
writeErrorResponse(w, req, InvalidMaxUploads, req.URL.Path)
|
||||
writeErrorResponse(w, r, InvalidMaxUploads, r.URL.Path)
|
||||
return
|
||||
}
|
||||
if resources.MaxUploads == 0 {
|
||||
|
@ -93,9 +109,9 @@ func (api CloudStorageAPI) ListMultipartUploadsHandler(w http.ResponseWriter, re
|
|||
errorIf(err.Trace(), "ListMultipartUploads failed.", nil)
|
||||
switch err.ToGoError().(type) {
|
||||
case fs.BucketNotFound:
|
||||
writeErrorResponse(w, req, NoSuchBucket, req.URL.Path)
|
||||
writeErrorResponse(w, r, NoSuchBucket, r.URL.Path)
|
||||
default:
|
||||
writeErrorResponse(w, req, InternalError, req.URL.Path)
|
||||
writeErrorResponse(w, r, InternalError, r.URL.Path)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
@ -109,26 +125,31 @@ func (api CloudStorageAPI) ListMultipartUploadsHandler(w http.ResponseWriter, re
|
|||
}
|
||||
|
||||
// ListObjectsHandler - GET Bucket (List Objects)
|
||||
// -------------------------
|
||||
// -- -----------------------
|
||||
// This implementation of the GET operation returns some or all (up to 1000)
|
||||
// of the objects in a bucket. You can use the request parameters as selection
|
||||
// criteria to return a subset of the objects in a bucket.
|
||||
//
|
||||
func (api CloudStorageAPI) ListObjectsHandler(w http.ResponseWriter, req *http.Request) {
|
||||
vars := mux.Vars(req)
|
||||
func (api CloudStorageAPI) ListObjectsHandler(w http.ResponseWriter, r *http.Request) {
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
|
||||
if isRequestRequiresACLCheck(req) {
|
||||
if isRequestRequiresACLCheck(r) {
|
||||
if api.Filesystem.IsPrivateBucket(bucket) {
|
||||
writeErrorResponse(w, req, AccessDenied, req.URL.Path)
|
||||
writeErrorResponse(w, r, AccessDenied, r.URL.Path)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if !isSignV4ReqAuthenticated(api.Signature, r) {
|
||||
writeErrorResponse(w, r, SignatureDoesNotMatch, r.URL.Path)
|
||||
return
|
||||
}
|
||||
|
||||
// TODO handle encoding type.
|
||||
prefix, marker, delimiter, maxkeys, _ := getBucketResources(req.URL.Query())
|
||||
prefix, marker, delimiter, maxkeys, _ := getBucketResources(r.URL.Query())
|
||||
if maxkeys < 0 {
|
||||
writeErrorResponse(w, req, InvalidMaxKeys, req.URL.Path)
|
||||
writeErrorResponse(w, r, InvalidMaxKeys, r.URL.Path)
|
||||
return
|
||||
}
|
||||
if maxkeys == 0 {
|
||||
|
@ -148,16 +169,16 @@ func (api CloudStorageAPI) ListObjectsHandler(w http.ResponseWriter, req *http.R
|
|||
}
|
||||
switch err.ToGoError().(type) {
|
||||
case fs.BucketNameInvalid:
|
||||
writeErrorResponse(w, req, InvalidBucketName, req.URL.Path)
|
||||
writeErrorResponse(w, r, InvalidBucketName, r.URL.Path)
|
||||
case fs.BucketNotFound:
|
||||
writeErrorResponse(w, req, NoSuchBucket, req.URL.Path)
|
||||
writeErrorResponse(w, r, NoSuchBucket, r.URL.Path)
|
||||
case fs.ObjectNotFound:
|
||||
writeErrorResponse(w, req, NoSuchKey, req.URL.Path)
|
||||
writeErrorResponse(w, r, NoSuchKey, r.URL.Path)
|
||||
case fs.ObjectNameInvalid:
|
||||
writeErrorResponse(w, req, NoSuchKey, req.URL.Path)
|
||||
writeErrorResponse(w, r, NoSuchKey, r.URL.Path)
|
||||
default:
|
||||
errorIf(err.Trace(), "ListObjects failed.", nil)
|
||||
writeErrorResponse(w, req, InternalError, req.URL.Path)
|
||||
writeErrorResponse(w, r, InternalError, r.URL.Path)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -165,9 +186,14 @@ func (api CloudStorageAPI) ListObjectsHandler(w http.ResponseWriter, req *http.R
|
|||
// -----------
|
||||
// This implementation of the GET operation returns a list of all buckets
|
||||
// owned by the authenticated sender of the request.
|
||||
func (api CloudStorageAPI) ListBucketsHandler(w http.ResponseWriter, req *http.Request) {
|
||||
if isRequestRequiresACLCheck(req) {
|
||||
writeErrorResponse(w, req, AccessDenied, req.URL.Path)
|
||||
func (api CloudStorageAPI) ListBucketsHandler(w http.ResponseWriter, r *http.Request) {
|
||||
if isRequestRequiresACLCheck(r) {
|
||||
writeErrorResponse(w, r, AccessDenied, r.URL.Path)
|
||||
return
|
||||
}
|
||||
|
||||
if !isSignV4ReqAuthenticated(api.Signature, r) {
|
||||
writeErrorResponse(w, r, SignatureDoesNotMatch, r.URL.Path)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -183,90 +209,83 @@ func (api CloudStorageAPI) ListBucketsHandler(w http.ResponseWriter, req *http.R
|
|||
return
|
||||
}
|
||||
errorIf(err.Trace(), "ListBuckets failed.", nil)
|
||||
writeErrorResponse(w, req, InternalError, req.URL.Path)
|
||||
writeErrorResponse(w, r, InternalError, r.URL.Path)
|
||||
}
|
||||
|
||||
// PutBucketHandler - PUT Bucket
|
||||
// ----------
|
||||
// This implementation of the PUT operation creates a new bucket for authenticated request
|
||||
func (api CloudStorageAPI) PutBucketHandler(w http.ResponseWriter, req *http.Request) {
|
||||
vars := mux.Vars(req)
|
||||
func (api CloudStorageAPI) PutBucketHandler(w http.ResponseWriter, r *http.Request) {
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
|
||||
if isRequestRequiresACLCheck(req) {
|
||||
writeErrorResponse(w, req, AccessDenied, req.URL.Path)
|
||||
if isRequestRequiresACLCheck(r) {
|
||||
writeErrorResponse(w, r, AccessDenied, r.URL.Path)
|
||||
return
|
||||
}
|
||||
|
||||
// read from 'x-amz-acl'
|
||||
aclType := getACLType(req)
|
||||
aclType := getACLType(r)
|
||||
if aclType == unsupportedACLType {
|
||||
writeErrorResponse(w, req, NotImplemented, req.URL.Path)
|
||||
writeErrorResponse(w, r, NotImplemented, r.URL.Path)
|
||||
return
|
||||
}
|
||||
|
||||
var signature *v4.Signature
|
||||
// Init signature V4 verification
|
||||
if isRequestSignatureV4(req) {
|
||||
var err *probe.Error
|
||||
signature, err = initSignatureV4(req)
|
||||
if err != nil {
|
||||
switch err.ToGoError() {
|
||||
case errInvalidRegion:
|
||||
errorIf(err.Trace(), "Unknown region in authorization header.", nil)
|
||||
writeErrorResponse(w, req, AuthorizationHeaderMalformed, req.URL.Path)
|
||||
return
|
||||
case errAccessKeyIDInvalid:
|
||||
errorIf(err.Trace(), "Invalid access key id.", nil)
|
||||
writeErrorResponse(w, req, InvalidAccessKeyID, req.URL.Path)
|
||||
return
|
||||
default:
|
||||
errorIf(err.Trace(), "Initializing signature v4 failed.", nil)
|
||||
writeErrorResponse(w, req, InternalError, req.URL.Path)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// if body of request is non-nil then check for validity of Content-Length
|
||||
if req.Body != nil {
|
||||
if r.Body != nil {
|
||||
/// if Content-Length is unknown/missing, deny the request
|
||||
if req.ContentLength == -1 && !contains(req.TransferEncoding, "chunked") {
|
||||
writeErrorResponse(w, req, MissingContentLength, req.URL.Path)
|
||||
if r.ContentLength == -1 && !contains(r.TransferEncoding, "chunked") {
|
||||
writeErrorResponse(w, r, MissingContentLength, r.URL.Path)
|
||||
return
|
||||
}
|
||||
if signature != nil {
|
||||
locationBytes, e := ioutil.ReadAll(req.Body)
|
||||
if e != nil {
|
||||
errorIf(probe.NewError(e), "MakeBucket failed.", nil)
|
||||
writeErrorResponse(w, req, InternalError, req.URL.Path)
|
||||
return
|
||||
}
|
||||
sh := sha256.New()
|
||||
sh.Write(locationBytes)
|
||||
ok, err := signature.DoesSignatureMatch(hex.EncodeToString(sh.Sum(nil)))
|
||||
if err != nil {
|
||||
errorIf(err.Trace(), "MakeBucket failed.", nil)
|
||||
writeErrorResponse(w, req, InternalError, req.URL.Path)
|
||||
return
|
||||
}
|
||||
if !ok {
|
||||
writeErrorResponse(w, req, SignatureDoesNotMatch, req.URL.Path)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Set http request for signature.
|
||||
auth := api.Signature.SetHTTPRequestToVerify(r)
|
||||
if isRequestPresignedSignatureV4(r) {
|
||||
ok, err := auth.DoesPresignedSignatureMatch()
|
||||
if err != nil {
|
||||
errorIf(err.Trace(r.URL.String()), "Presigned signature verification failed.", nil)
|
||||
writeErrorResponse(w, r, SignatureDoesNotMatch, r.URL.Path)
|
||||
return
|
||||
}
|
||||
if !ok {
|
||||
writeErrorResponse(w, r, SignatureDoesNotMatch, r.URL.Path)
|
||||
return
|
||||
}
|
||||
} else if isRequestSignatureV4(r) {
|
||||
// Verify signature for the incoming body if any.
|
||||
locationBytes, e := ioutil.ReadAll(r.Body)
|
||||
if e != nil {
|
||||
errorIf(probe.NewError(e), "MakeBucket failed.", nil)
|
||||
writeErrorResponse(w, r, InternalError, r.URL.Path)
|
||||
return
|
||||
}
|
||||
sh := sha256.New()
|
||||
sh.Write(locationBytes)
|
||||
ok, err := auth.DoesSignatureMatch(hex.EncodeToString(sh.Sum(nil)))
|
||||
if err != nil {
|
||||
errorIf(err.Trace(), "MakeBucket failed.", nil)
|
||||
writeErrorResponse(w, r, InternalError, r.URL.Path)
|
||||
return
|
||||
}
|
||||
if !ok {
|
||||
writeErrorResponse(w, r, SignatureDoesNotMatch, r.URL.Path)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Make bucket.
|
||||
err := api.Filesystem.MakeBucket(bucket, getACLTypeString(aclType))
|
||||
if err != nil {
|
||||
errorIf(err.Trace(), "MakeBucket failed.", nil)
|
||||
switch err.ToGoError().(type) {
|
||||
case fs.BucketNameInvalid:
|
||||
writeErrorResponse(w, req, InvalidBucketName, req.URL.Path)
|
||||
writeErrorResponse(w, r, InvalidBucketName, r.URL.Path)
|
||||
case fs.BucketExists:
|
||||
writeErrorResponse(w, req, BucketAlreadyExists, req.URL.Path)
|
||||
writeErrorResponse(w, r, BucketAlreadyExists, r.URL.Path)
|
||||
default:
|
||||
writeErrorResponse(w, req, InternalError, req.URL.Path)
|
||||
writeErrorResponse(w, r, InternalError, r.URL.Path)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
@ -275,16 +294,41 @@ func (api CloudStorageAPI) PutBucketHandler(w http.ResponseWriter, req *http.Req
|
|||
writeSuccessResponse(w, nil)
|
||||
}
|
||||
|
||||
func extractHTTPFormValues(reader *multipart.Reader) (io.Reader, map[string]string, *probe.Error) {
|
||||
/// HTML Form values
|
||||
formValues := make(map[string]string)
|
||||
filePart := new(bytes.Buffer)
|
||||
var e error
|
||||
for e == nil {
|
||||
var part *multipart.Part
|
||||
part, e = reader.NextPart()
|
||||
if part != nil {
|
||||
if part.FileName() == "" {
|
||||
buffer, e := ioutil.ReadAll(part)
|
||||
if e != nil {
|
||||
return nil, nil, probe.NewError(e)
|
||||
}
|
||||
formValues[http.CanonicalHeaderKey(part.FormName())] = string(buffer)
|
||||
} else {
|
||||
if _, e := io.Copy(filePart, part); e != nil {
|
||||
return nil, nil, probe.NewError(e)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return filePart, formValues, nil
|
||||
}
|
||||
|
||||
// PostPolicyBucketHandler - POST policy
|
||||
// ----------
|
||||
// This implementation of the POST operation handles object creation with a specified
|
||||
// signature policy in multipart/form-data
|
||||
func (api CloudStorageAPI) PostPolicyBucketHandler(w http.ResponseWriter, req *http.Request) {
|
||||
func (api CloudStorageAPI) PostPolicyBucketHandler(w http.ResponseWriter, r *http.Request) {
|
||||
// if body of request is non-nil then check for validity of Content-Length
|
||||
if req.Body != nil {
|
||||
if r.Body != nil {
|
||||
/// if Content-Length is unknown/missing, deny the request
|
||||
if req.ContentLength == -1 {
|
||||
writeErrorResponse(w, req, MissingContentLength, req.URL.Path)
|
||||
if r.ContentLength == -1 {
|
||||
writeErrorResponse(w, r, MissingContentLength, r.URL.Path)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
@ -292,65 +336,61 @@ func (api CloudStorageAPI) PostPolicyBucketHandler(w http.ResponseWriter, req *h
|
|||
// Here the parameter is the size of the form data that should
|
||||
// be loaded in memory, the remaining being put in temporary
|
||||
// files
|
||||
reader, e := req.MultipartReader()
|
||||
reader, e := r.MultipartReader()
|
||||
if e != nil {
|
||||
errorIf(probe.NewError(e), "Unable to initialize multipart reader.", nil)
|
||||
writeErrorResponse(w, req, MalformedPOSTRequest, req.URL.Path)
|
||||
writeErrorResponse(w, r, MalformedPOSTRequest, r.URL.Path)
|
||||
return
|
||||
}
|
||||
|
||||
fileBody, formValues, err := extractHTTPFormValues(reader)
|
||||
if err != nil {
|
||||
errorIf(err.Trace(), "Unable to parse form values.", nil)
|
||||
writeErrorResponse(w, req, MalformedPOSTRequest, req.URL.Path)
|
||||
writeErrorResponse(w, r, MalformedPOSTRequest, r.URL.Path)
|
||||
return
|
||||
}
|
||||
bucket := mux.Vars(req)["bucket"]
|
||||
bucket := mux.Vars(r)["bucket"]
|
||||
formValues["Bucket"] = bucket
|
||||
object := formValues["Key"]
|
||||
signature, err := initPostPresignedPolicyV4(formValues)
|
||||
if err != nil {
|
||||
errorIf(err.Trace(), "Unable to initialize post policy presigned.", nil)
|
||||
writeErrorResponse(w, req, MalformedPOSTRequest, req.URL.Path)
|
||||
return
|
||||
}
|
||||
var ok bool
|
||||
if ok, err = signature.DoesPolicySignatureMatch(formValues["X-Amz-Date"]); err != nil {
|
||||
|
||||
// Set http request for signature.
|
||||
api.Signature.SetHTTPRequestToVerify(r)
|
||||
|
||||
// Verify policy signature.
|
||||
ok, err = api.Signature.DoesPolicySignatureMatch(formValues)
|
||||
if err != nil {
|
||||
errorIf(err.Trace(), "Unable to verify signature.", nil)
|
||||
writeErrorResponse(w, req, SignatureDoesNotMatch, req.URL.Path)
|
||||
writeErrorResponse(w, r, SignatureDoesNotMatch, r.URL.Path)
|
||||
return
|
||||
}
|
||||
if ok == false {
|
||||
writeErrorResponse(w, req, SignatureDoesNotMatch, req.URL.Path)
|
||||
if !ok {
|
||||
writeErrorResponse(w, r, SignatureDoesNotMatch, r.URL.Path)
|
||||
return
|
||||
}
|
||||
if err = applyPolicy(formValues); err != nil {
|
||||
if err = signV4.ApplyPolicyCond(formValues); err != nil {
|
||||
errorIf(err.Trace(), "Invalid request, policy doesn't match with the endpoint.", nil)
|
||||
writeErrorResponse(w, req, MalformedPOSTRequest, req.URL.Path)
|
||||
writeErrorResponse(w, r, MalformedPOSTRequest, r.URL.Path)
|
||||
return
|
||||
}
|
||||
metadata, err := api.Filesystem.CreateObject(bucket, object, "", 0, fileBody, nil)
|
||||
metadata, err := api.Filesystem.CreateObject(bucket, object, "", -1, fileBody, nil)
|
||||
if err != nil {
|
||||
errorIf(err.Trace(), "CreateObject failed.", nil)
|
||||
switch err.ToGoError().(type) {
|
||||
case fs.RootPathFull:
|
||||
writeErrorResponse(w, req, RootPathFull, req.URL.Path)
|
||||
writeErrorResponse(w, r, RootPathFull, r.URL.Path)
|
||||
case fs.BucketNotFound:
|
||||
writeErrorResponse(w, req, NoSuchBucket, req.URL.Path)
|
||||
writeErrorResponse(w, r, NoSuchBucket, r.URL.Path)
|
||||
case fs.BucketNameInvalid:
|
||||
writeErrorResponse(w, req, InvalidBucketName, req.URL.Path)
|
||||
writeErrorResponse(w, r, InvalidBucketName, r.URL.Path)
|
||||
case fs.BadDigest:
|
||||
writeErrorResponse(w, req, BadDigest, req.URL.Path)
|
||||
case v4.SigDoesNotMatch:
|
||||
writeErrorResponse(w, req, SignatureDoesNotMatch, req.URL.Path)
|
||||
writeErrorResponse(w, r, BadDigest, r.URL.Path)
|
||||
case fs.IncompleteBody:
|
||||
writeErrorResponse(w, req, IncompleteBody, req.URL.Path)
|
||||
case fs.EntityTooLarge:
|
||||
writeErrorResponse(w, req, EntityTooLarge, req.URL.Path)
|
||||
writeErrorResponse(w, r, IncompleteBody, r.URL.Path)
|
||||
case fs.InvalidDigest:
|
||||
writeErrorResponse(w, req, InvalidDigest, req.URL.Path)
|
||||
writeErrorResponse(w, r, InvalidDigest, r.URL.Path)
|
||||
default:
|
||||
writeErrorResponse(w, req, InternalError, req.URL.Path)
|
||||
writeErrorResponse(w, r, InternalError, r.URL.Path)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
@ -363,19 +403,24 @@ func (api CloudStorageAPI) PostPolicyBucketHandler(w http.ResponseWriter, req *h
|
|||
// PutBucketACLHandler - PUT Bucket ACL
|
||||
// ----------
|
||||
// This implementation of the PUT operation modifies the bucketACL for authenticated request
|
||||
func (api CloudStorageAPI) PutBucketACLHandler(w http.ResponseWriter, req *http.Request) {
|
||||
vars := mux.Vars(req)
|
||||
func (api CloudStorageAPI) PutBucketACLHandler(w http.ResponseWriter, r *http.Request) {
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
|
||||
if isRequestRequiresACLCheck(req) {
|
||||
writeErrorResponse(w, req, AccessDenied, req.URL.Path)
|
||||
if isRequestRequiresACLCheck(r) {
|
||||
writeErrorResponse(w, r, AccessDenied, r.URL.Path)
|
||||
return
|
||||
}
|
||||
|
||||
if !isSignV4ReqAuthenticated(api.Signature, r) {
|
||||
writeErrorResponse(w, r, SignatureDoesNotMatch, r.URL.Path)
|
||||
return
|
||||
}
|
||||
|
||||
// read from 'x-amz-acl'
|
||||
aclType := getACLType(req)
|
||||
aclType := getACLType(r)
|
||||
if aclType == unsupportedACLType {
|
||||
writeErrorResponse(w, req, NotImplemented, req.URL.Path)
|
||||
writeErrorResponse(w, r, NotImplemented, r.URL.Path)
|
||||
return
|
||||
}
|
||||
err := api.Filesystem.SetBucketMetadata(bucket, map[string]string{"acl": getACLTypeString(aclType)})
|
||||
|
@ -383,11 +428,11 @@ func (api CloudStorageAPI) PutBucketACLHandler(w http.ResponseWriter, req *http.
|
|||
errorIf(err.Trace(), "PutBucketACL failed.", nil)
|
||||
switch err.ToGoError().(type) {
|
||||
case fs.BucketNameInvalid:
|
||||
writeErrorResponse(w, req, InvalidBucketName, req.URL.Path)
|
||||
writeErrorResponse(w, r, InvalidBucketName, r.URL.Path)
|
||||
case fs.BucketNotFound:
|
||||
writeErrorResponse(w, req, NoSuchBucket, req.URL.Path)
|
||||
writeErrorResponse(w, r, NoSuchBucket, r.URL.Path)
|
||||
default:
|
||||
writeErrorResponse(w, req, InternalError, req.URL.Path)
|
||||
writeErrorResponse(w, r, InternalError, r.URL.Path)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
@ -400,12 +445,17 @@ func (api CloudStorageAPI) PutBucketACLHandler(w http.ResponseWriter, req *http.
|
|||
// of a bucket. One must have permission to access the bucket to
|
||||
// know its ``acl``. This operation willl return response of 404
|
||||
// if bucket not found and 403 for invalid credentials.
|
||||
func (api CloudStorageAPI) GetBucketACLHandler(w http.ResponseWriter, req *http.Request) {
|
||||
vars := mux.Vars(req)
|
||||
func (api CloudStorageAPI) GetBucketACLHandler(w http.ResponseWriter, r *http.Request) {
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
|
||||
if isRequestRequiresACLCheck(req) {
|
||||
writeErrorResponse(w, req, AccessDenied, req.URL.Path)
|
||||
if isRequestRequiresACLCheck(r) {
|
||||
writeErrorResponse(w, r, AccessDenied, r.URL.Path)
|
||||
return
|
||||
}
|
||||
|
||||
if !isSignV4ReqAuthenticated(api.Signature, r) {
|
||||
writeErrorResponse(w, r, SignatureDoesNotMatch, r.URL.Path)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -414,11 +464,11 @@ func (api CloudStorageAPI) GetBucketACLHandler(w http.ResponseWriter, req *http.
|
|||
errorIf(err.Trace(), "GetBucketMetadata failed.", nil)
|
||||
switch err.ToGoError().(type) {
|
||||
case fs.BucketNotFound:
|
||||
writeErrorResponse(w, req, NoSuchBucket, req.URL.Path)
|
||||
writeErrorResponse(w, r, NoSuchBucket, r.URL.Path)
|
||||
case fs.BucketNameInvalid:
|
||||
writeErrorResponse(w, req, InvalidBucketName, req.URL.Path)
|
||||
writeErrorResponse(w, r, InvalidBucketName, r.URL.Path)
|
||||
default:
|
||||
writeErrorResponse(w, req, InternalError, req.URL.Path)
|
||||
writeErrorResponse(w, r, InternalError, r.URL.Path)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
@ -437,27 +487,32 @@ func (api CloudStorageAPI) GetBucketACLHandler(w http.ResponseWriter, req *http.
|
|||
// The operation returns a 200 OK if the bucket exists and you
|
||||
// have permission to access it. Otherwise, the operation might
|
||||
// return responses such as 404 Not Found and 403 Forbidden.
|
||||
func (api CloudStorageAPI) HeadBucketHandler(w http.ResponseWriter, req *http.Request) {
|
||||
vars := mux.Vars(req)
|
||||
func (api CloudStorageAPI) HeadBucketHandler(w http.ResponseWriter, r *http.Request) {
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
|
||||
if isRequestRequiresACLCheck(req) {
|
||||
if isRequestRequiresACLCheck(r) {
|
||||
if api.Filesystem.IsPrivateBucket(bucket) {
|
||||
writeErrorResponse(w, req, AccessDenied, req.URL.Path)
|
||||
writeErrorResponse(w, r, AccessDenied, r.URL.Path)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if !isSignV4ReqAuthenticated(api.Signature, r) {
|
||||
writeErrorResponse(w, r, SignatureDoesNotMatch, r.URL.Path)
|
||||
return
|
||||
}
|
||||
|
||||
_, err := api.Filesystem.GetBucketMetadata(bucket)
|
||||
if err != nil {
|
||||
errorIf(err.Trace(), "GetBucketMetadata failed.", nil)
|
||||
switch err.ToGoError().(type) {
|
||||
case fs.BucketNotFound:
|
||||
writeErrorResponse(w, req, NoSuchBucket, req.URL.Path)
|
||||
writeErrorResponse(w, r, NoSuchBucket, r.URL.Path)
|
||||
case fs.BucketNameInvalid:
|
||||
writeErrorResponse(w, req, InvalidBucketName, req.URL.Path)
|
||||
writeErrorResponse(w, r, InvalidBucketName, r.URL.Path)
|
||||
default:
|
||||
writeErrorResponse(w, req, InternalError, req.URL.Path)
|
||||
writeErrorResponse(w, r, InternalError, r.URL.Path)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
@ -465,12 +520,17 @@ func (api CloudStorageAPI) HeadBucketHandler(w http.ResponseWriter, req *http.Re
|
|||
}
|
||||
|
||||
// DeleteBucketHandler - Delete bucket
|
||||
func (api CloudStorageAPI) DeleteBucketHandler(w http.ResponseWriter, req *http.Request) {
|
||||
vars := mux.Vars(req)
|
||||
func (api CloudStorageAPI) DeleteBucketHandler(w http.ResponseWriter, r *http.Request) {
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
|
||||
if isRequestRequiresACLCheck(req) {
|
||||
writeErrorResponse(w, req, AccessDenied, req.URL.Path)
|
||||
if isRequestRequiresACLCheck(r) {
|
||||
writeErrorResponse(w, r, AccessDenied, r.URL.Path)
|
||||
return
|
||||
}
|
||||
|
||||
if !isSignV4ReqAuthenticated(api.Signature, r) {
|
||||
writeErrorResponse(w, r, SignatureDoesNotMatch, r.URL.Path)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -479,11 +539,11 @@ func (api CloudStorageAPI) DeleteBucketHandler(w http.ResponseWriter, req *http.
|
|||
errorIf(err.Trace(), "DeleteBucket failed.", nil)
|
||||
switch err.ToGoError().(type) {
|
||||
case fs.BucketNotFound:
|
||||
writeErrorResponse(w, req, NoSuchBucket, req.URL.Path)
|
||||
writeErrorResponse(w, r, NoSuchBucket, r.URL.Path)
|
||||
case fs.BucketNotEmpty:
|
||||
writeErrorResponse(w, req, BucketNotEmpty, req.URL.Path)
|
||||
writeErrorResponse(w, r, BucketNotEmpty, r.URL.Path)
|
||||
default:
|
||||
writeErrorResponse(w, req, InternalError, req.URL.Path)
|
||||
writeErrorResponse(w, r, InternalError, r.URL.Path)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
|
|
@ -26,6 +26,11 @@ import (
|
|||
"github.com/rs/cors"
|
||||
)
|
||||
|
||||
const (
|
||||
iso8601Format = "20060102T150405Z"
|
||||
privateBucket = "/minio"
|
||||
)
|
||||
|
||||
// HandlerFunc - useful to chain different middleware http.Handler
|
||||
type HandlerFunc func(http.Handler) http.Handler
|
||||
|
||||
|
@ -38,59 +43,81 @@ func registerHandlers(mux *router.Router, handlerFns ...HandlerFunc) http.Handle
|
|||
return f
|
||||
}
|
||||
|
||||
type timeHandler struct {
|
||||
handler http.Handler
|
||||
// Attempts to parse date string into known date layouts. Date layouts
|
||||
// currently supported are ``time.RFC1123``, ``time.RFC1123Z`` and
|
||||
// special ``iso8601Format``.
|
||||
func parseKnownLayouts(date string) (time.Time, error) {
|
||||
parsedTime, e := time.Parse(time.RFC1123, date)
|
||||
if e == nil {
|
||||
return parsedTime, nil
|
||||
}
|
||||
parsedTime, e = time.Parse(time.RFC1123Z, date)
|
||||
if e == nil {
|
||||
return parsedTime, nil
|
||||
}
|
||||
parsedTime, e = time.Parse(iso8601Format, date)
|
||||
if e == nil {
|
||||
return parsedTime, nil
|
||||
}
|
||||
return time.Time{}, e
|
||||
}
|
||||
|
||||
type resourceHandler struct {
|
||||
handler http.Handler
|
||||
}
|
||||
|
||||
type ignoreSignatureV2RequestHandler struct {
|
||||
handler http.Handler
|
||||
}
|
||||
|
||||
func parseDate(req *http.Request) (time.Time, error) {
|
||||
// Parse date string from incoming header, current supports and verifies
|
||||
// follow HTTP headers.
|
||||
//
|
||||
// - X-Amz-Date
|
||||
// - X-Minio-Date
|
||||
// - Date
|
||||
//
|
||||
// In following time layouts ``time.RFC1123``, ``time.RFC1123Z`` and ``iso8601Format``.
|
||||
func parseDateHeader(req *http.Request) (time.Time, error) {
|
||||
amzDate := req.Header.Get(http.CanonicalHeaderKey("x-amz-date"))
|
||||
switch {
|
||||
case amzDate != "":
|
||||
if _, err := time.Parse(time.RFC1123, amzDate); err == nil {
|
||||
return time.Parse(time.RFC1123, amzDate)
|
||||
}
|
||||
if _, err := time.Parse(time.RFC1123Z, amzDate); err == nil {
|
||||
return time.Parse(time.RFC1123Z, amzDate)
|
||||
}
|
||||
if _, err := time.Parse(iso8601Format, amzDate); err == nil {
|
||||
return time.Parse(iso8601Format, amzDate)
|
||||
}
|
||||
if amzDate != "" {
|
||||
return parseKnownLayouts(amzDate)
|
||||
}
|
||||
minioDate := req.Header.Get(http.CanonicalHeaderKey("x-minio-date"))
|
||||
switch {
|
||||
case minioDate != "":
|
||||
if _, err := time.Parse(time.RFC1123, minioDate); err == nil {
|
||||
return time.Parse(time.RFC1123, minioDate)
|
||||
}
|
||||
if _, err := time.Parse(time.RFC1123Z, minioDate); err == nil {
|
||||
return time.Parse(time.RFC1123Z, minioDate)
|
||||
}
|
||||
if _, err := time.Parse(iso8601Format, minioDate); err == nil {
|
||||
return time.Parse(iso8601Format, minioDate)
|
||||
if minioDate != "" {
|
||||
return parseKnownLayouts(minioDate)
|
||||
}
|
||||
genericDate := req.Header.Get("Date")
|
||||
if genericDate != "" {
|
||||
return parseKnownLayouts(genericDate)
|
||||
}
|
||||
return time.Time{}, errors.New("Date header missing, invalid request.")
|
||||
}
|
||||
|
||||
// Adds redirect rules for incoming requests.
|
||||
type redirectHandler struct {
|
||||
handler http.Handler
|
||||
locationPrefix string
|
||||
}
|
||||
|
||||
func setBrowserRedirectHandler(h http.Handler) http.Handler {
|
||||
return redirectHandler{handler: h, locationPrefix: privateBucket}
|
||||
}
|
||||
|
||||
func (h redirectHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
// Re-direction handled specifically for browsers.
|
||||
if strings.Contains(r.Header.Get("User-Agent"), "Mozilla") {
|
||||
switch r.URL.Path {
|
||||
case "/":
|
||||
// This could be the default route for browser, redirect
|
||||
// to 'locationPrefix/'.
|
||||
fallthrough
|
||||
case "/rpc":
|
||||
// This is '/rpc' API route for browser, redirect to
|
||||
// 'locationPrefix/rpc'.
|
||||
fallthrough
|
||||
case "/login":
|
||||
// This is '/login' route for browser, redirect to
|
||||
// 'locationPrefix/login'.
|
||||
location := h.locationPrefix + r.URL.Path
|
||||
// Redirect to new location.
|
||||
http.Redirect(w, r, location, http.StatusTemporaryRedirect)
|
||||
return
|
||||
}
|
||||
}
|
||||
date := req.Header.Get("Date")
|
||||
switch {
|
||||
case date != "":
|
||||
if _, err := time.Parse(time.RFC1123, date); err == nil {
|
||||
return time.Parse(time.RFC1123, date)
|
||||
}
|
||||
if _, err := time.Parse(time.RFC1123Z, date); err == nil {
|
||||
return time.Parse(time.RFC1123Z, date)
|
||||
}
|
||||
if _, err := time.Parse(iso8601Format, amzDate); err == nil {
|
||||
return time.Parse(iso8601Format, amzDate)
|
||||
}
|
||||
}
|
||||
return time.Time{}, errors.New("invalid request")
|
||||
h.handler.ServeHTTP(w, r)
|
||||
}
|
||||
|
||||
// Adds Cache-Control header
|
||||
|
@ -98,18 +125,41 @@ type cacheControlHandler struct {
|
|||
handler http.Handler
|
||||
}
|
||||
|
||||
func setCacheControlHandler(h http.Handler) http.Handler {
|
||||
func setBrowserCacheControlHandler(h http.Handler) http.Handler {
|
||||
return cacheControlHandler{h}
|
||||
}
|
||||
|
||||
func (h cacheControlHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
if r.Method == "GET" {
|
||||
// expire the cache in one week
|
||||
w.Header().Set("Cache-Control", "public, max-age=604800")
|
||||
if r.Method == "GET" && strings.Contains(r.Header.Get("User-Agent"), "Mozilla") {
|
||||
// Expire cache in one hour for all browser requests.
|
||||
w.Header().Set("Cache-Control", "public, max-age=3600")
|
||||
}
|
||||
h.handler.ServeHTTP(w, r)
|
||||
}
|
||||
|
||||
// Adds verification for incoming paths.
|
||||
type minioPrivateBucketHandler struct {
|
||||
handler http.Handler
|
||||
privateBucket string
|
||||
}
|
||||
|
||||
func setPrivateBucketHandler(h http.Handler) http.Handler {
|
||||
return minioPrivateBucketHandler{handler: h, privateBucket: privateBucket}
|
||||
}
|
||||
|
||||
func (h minioPrivateBucketHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
// For all non browser requests, reject access to 'privateBucket'.
|
||||
if !strings.Contains(r.Header.Get("User-Agent"), "Mozilla") && strings.HasPrefix(r.URL.Path, privateBucket) {
|
||||
writeErrorResponse(w, r, AllAccessDisabled, r.URL.Path)
|
||||
return
|
||||
}
|
||||
h.handler.ServeHTTP(w, r)
|
||||
}
|
||||
|
||||
type timeHandler struct {
|
||||
handler http.Handler
|
||||
}
|
||||
|
||||
// setTimeValidityHandler to validate parsable time over http header
|
||||
func setTimeValidityHandler(h http.Handler) http.Handler {
|
||||
return timeHandler{h}
|
||||
|
@ -118,19 +168,18 @@ func setTimeValidityHandler(h http.Handler) http.Handler {
|
|||
func (h timeHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
// Verify if date headers are set, if not reject the request
|
||||
if r.Header.Get("Authorization") != "" {
|
||||
if r.Header.Get(http.CanonicalHeaderKey("x-amz-date")) == "" && r.Header.Get(http.CanonicalHeaderKey("x-minio-date")) == "" && r.Header.Get("Date") == "" {
|
||||
// there is no way to knowing if this is a valid request, could be a attack reject such clients
|
||||
writeErrorResponse(w, r, RequestTimeTooSkewed, r.URL.Path)
|
||||
return
|
||||
}
|
||||
date, err := parseDate(r)
|
||||
if err != nil {
|
||||
// there is no way to knowing if this is a valid request, could be a attack reject such clients
|
||||
date, e := parseDateHeader(r)
|
||||
if e != nil {
|
||||
// All our internal APIs are sensitive towards Date
|
||||
// header, for all requests where Date header is not
|
||||
// present we will reject such clients.
|
||||
writeErrorResponse(w, r, RequestTimeTooSkewed, r.URL.Path)
|
||||
return
|
||||
}
|
||||
duration := time.Since(date)
|
||||
minutes := time.Duration(5) * time.Minute
|
||||
// Verify if the request date header is more than 5minutes
|
||||
// late, reject such clients.
|
||||
if duration.Minutes() > minutes.Minutes() {
|
||||
writeErrorResponse(w, r, RequestTimeTooSkewed, r.URL.Path)
|
||||
return
|
||||
|
@ -139,6 +188,10 @@ func (h timeHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
|||
h.handler.ServeHTTP(w, r)
|
||||
}
|
||||
|
||||
type resourceHandler struct {
|
||||
handler http.Handler
|
||||
}
|
||||
|
||||
// setCorsHandler handler for CORS (Cross Origin Resource Sharing)
|
||||
func setCorsHandler(h http.Handler) http.Handler {
|
||||
c := cors.New(cors.Options{
|
||||
|
@ -149,27 +202,10 @@ func setCorsHandler(h http.Handler) http.Handler {
|
|||
return c.Handler(h)
|
||||
}
|
||||
|
||||
// setIgnoreSignatureV2RequestHandler -
|
||||
// Verify if authorization header has signature version '2', reject it cleanly.
|
||||
func setIgnoreSignatureV2RequestHandler(h http.Handler) http.Handler {
|
||||
return ignoreSignatureV2RequestHandler{h}
|
||||
}
|
||||
|
||||
// Ignore signature version '2' ServerHTTP() wrapper.
|
||||
func (h ignoreSignatureV2RequestHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
if _, ok := r.Header["Authorization"]; ok {
|
||||
if !strings.HasPrefix(r.Header.Get("Authorization"), authHeaderPrefix) {
|
||||
writeErrorResponse(w, r, SignatureVersionNotSupported, r.URL.Path)
|
||||
return
|
||||
}
|
||||
}
|
||||
h.handler.ServeHTTP(w, r)
|
||||
}
|
||||
|
||||
// setIgnoreResourcesHandler -
|
||||
// Ignore resources handler is wrapper handler used for API request resource validation
|
||||
// Since we do not support all the S3 queries, it is necessary for us to throw back a
|
||||
// valid error message indicating such a feature is not implemented.
|
||||
// valid error message indicating that requested feature is not implemented.
|
||||
func setIgnoreResourcesHandler(h http.Handler) http.Handler {
|
||||
return resourceHandler{h}
|
||||
}
|
||||
|
|
|
@ -1,57 +0,0 @@
|
|||
/*
|
||||
* Minio Cloud Storage, (C) 2016 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
jwtgo "github.com/dgrijalva/jwt-go"
|
||||
)
|
||||
|
||||
type jwtAuthHandler struct {
|
||||
handler http.Handler
|
||||
}
|
||||
|
||||
// setJWTAuthHandler -
|
||||
// Verify if authorization header is of form JWT, reject it otherwise.
|
||||
func setJWTAuthHandler(h http.Handler) http.Handler {
|
||||
return jwtAuthHandler{h}
|
||||
}
|
||||
|
||||
// Ignore request if authorization header is not valid.
|
||||
func (h jwtAuthHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
// Let the top level caller handle if the requests should be
|
||||
// allowed, if there are no Authorization headers.
|
||||
if r.Header.Get("Authorization") == "" {
|
||||
h.handler.ServeHTTP(w, r)
|
||||
return
|
||||
}
|
||||
// Validate Authorization header to be valid.
|
||||
jwt := InitJWT()
|
||||
token, e := jwtgo.ParseFromRequest(r, func(token *jwtgo.Token) (interface{}, error) {
|
||||
if _, ok := token.Method.(*jwtgo.SigningMethodHMAC); !ok {
|
||||
return nil, fmt.Errorf("Unexpected signing method: %v", token.Header["alg"])
|
||||
}
|
||||
return jwt.secretAccessKey, nil
|
||||
})
|
||||
if e != nil || !token.Valid {
|
||||
w.WriteHeader(http.StatusUnauthorized)
|
||||
return
|
||||
}
|
||||
h.handler.ServeHTTP(w, r)
|
||||
}
|
|
@ -23,8 +23,6 @@ import (
|
|||
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/minio/minio/pkg/fs"
|
||||
"github.com/minio/minio/pkg/probe"
|
||||
v4 "github.com/minio/minio/pkg/signature"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -53,48 +51,53 @@ func setResponseHeaders(w http.ResponseWriter, reqParams url.Values) {
|
|||
// ----------
|
||||
// This implementation of the GET operation retrieves object. To use GET,
|
||||
// you must have READ access to the object.
|
||||
func (api CloudStorageAPI) GetObjectHandler(w http.ResponseWriter, req *http.Request) {
|
||||
func (api CloudStorageAPI) GetObjectHandler(w http.ResponseWriter, r *http.Request) {
|
||||
var object, bucket string
|
||||
vars := mux.Vars(req)
|
||||
vars := mux.Vars(r)
|
||||
bucket = vars["bucket"]
|
||||
object = vars["object"]
|
||||
|
||||
if isRequestRequiresACLCheck(req) {
|
||||
if isRequestRequiresACLCheck(r) {
|
||||
if api.Filesystem.IsPrivateBucket(bucket) {
|
||||
writeErrorResponse(w, req, AccessDenied, req.URL.Path)
|
||||
writeErrorResponse(w, r, AccessDenied, r.URL.Path)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if !isSignV4ReqAuthenticated(api.Signature, r) {
|
||||
writeErrorResponse(w, r, SignatureDoesNotMatch, r.URL.Path)
|
||||
return
|
||||
}
|
||||
|
||||
metadata, err := api.Filesystem.GetObjectMetadata(bucket, object)
|
||||
if err != nil {
|
||||
errorIf(err.Trace(), "GetObject failed.", nil)
|
||||
switch err.ToGoError().(type) {
|
||||
case fs.BucketNameInvalid:
|
||||
writeErrorResponse(w, req, InvalidBucketName, req.URL.Path)
|
||||
writeErrorResponse(w, r, InvalidBucketName, r.URL.Path)
|
||||
case fs.BucketNotFound:
|
||||
writeErrorResponse(w, req, NoSuchBucket, req.URL.Path)
|
||||
writeErrorResponse(w, r, NoSuchBucket, r.URL.Path)
|
||||
case fs.ObjectNotFound:
|
||||
writeErrorResponse(w, req, NoSuchKey, req.URL.Path)
|
||||
writeErrorResponse(w, r, NoSuchKey, r.URL.Path)
|
||||
case fs.ObjectNameInvalid:
|
||||
writeErrorResponse(w, req, NoSuchKey, req.URL.Path)
|
||||
writeErrorResponse(w, r, NoSuchKey, r.URL.Path)
|
||||
default:
|
||||
writeErrorResponse(w, req, InternalError, req.URL.Path)
|
||||
writeErrorResponse(w, r, InternalError, r.URL.Path)
|
||||
}
|
||||
return
|
||||
}
|
||||
var hrange *httpRange
|
||||
hrange, err = getRequestedRange(req.Header.Get("Range"), metadata.Size)
|
||||
hrange, err = getRequestedRange(r.Header.Get("Range"), metadata.Size)
|
||||
if err != nil {
|
||||
writeErrorResponse(w, req, InvalidRange, req.URL.Path)
|
||||
writeErrorResponse(w, r, InvalidRange, r.URL.Path)
|
||||
return
|
||||
}
|
||||
|
||||
// Set standard object headers.
|
||||
setObjectHeaders(w, metadata, hrange)
|
||||
|
||||
// Set any additional requested response headers.
|
||||
setResponseHeaders(w, req.URL.Query())
|
||||
// Set any additional ruested response headers.
|
||||
setResponseHeaders(w, r.URL.Query())
|
||||
|
||||
// Get the object.
|
||||
if _, err = api.Filesystem.GetObject(w, bucket, object, hrange.start, hrange.length); err != nil {
|
||||
|
@ -106,32 +109,37 @@ func (api CloudStorageAPI) GetObjectHandler(w http.ResponseWriter, req *http.Req
|
|||
// HeadObjectHandler - HEAD Object
|
||||
// -----------
|
||||
// The HEAD operation retrieves metadata from an object without returning the object itself.
|
||||
func (api CloudStorageAPI) HeadObjectHandler(w http.ResponseWriter, req *http.Request) {
|
||||
func (api CloudStorageAPI) HeadObjectHandler(w http.ResponseWriter, r *http.Request) {
|
||||
var object, bucket string
|
||||
vars := mux.Vars(req)
|
||||
vars := mux.Vars(r)
|
||||
bucket = vars["bucket"]
|
||||
object = vars["object"]
|
||||
|
||||
if isRequestRequiresACLCheck(req) {
|
||||
if isRequestRequiresACLCheck(r) {
|
||||
if api.Filesystem.IsPrivateBucket(bucket) {
|
||||
writeErrorResponse(w, req, AccessDenied, req.URL.Path)
|
||||
writeErrorResponse(w, r, AccessDenied, r.URL.Path)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if !isSignV4ReqAuthenticated(api.Signature, r) {
|
||||
writeErrorResponse(w, r, SignatureDoesNotMatch, r.URL.Path)
|
||||
return
|
||||
}
|
||||
|
||||
metadata, err := api.Filesystem.GetObjectMetadata(bucket, object)
|
||||
if err != nil {
|
||||
switch err.ToGoError().(type) {
|
||||
case fs.BucketNameInvalid:
|
||||
writeErrorResponse(w, req, InvalidBucketName, req.URL.Path)
|
||||
writeErrorResponse(w, r, InvalidBucketName, r.URL.Path)
|
||||
case fs.BucketNotFound:
|
||||
writeErrorResponse(w, req, NoSuchBucket, req.URL.Path)
|
||||
writeErrorResponse(w, r, NoSuchBucket, r.URL.Path)
|
||||
case fs.ObjectNotFound:
|
||||
writeErrorResponse(w, req, NoSuchKey, req.URL.Path)
|
||||
writeErrorResponse(w, r, NoSuchKey, r.URL.Path)
|
||||
case fs.ObjectNameInvalid:
|
||||
writeErrorResponse(w, req, NoSuchKey, req.URL.Path)
|
||||
writeErrorResponse(w, r, NoSuchKey, r.URL.Path)
|
||||
default:
|
||||
writeErrorResponse(w, req, InternalError, req.URL.Path)
|
||||
writeErrorResponse(w, r, InternalError, r.URL.Path)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
@ -142,86 +150,78 @@ func (api CloudStorageAPI) HeadObjectHandler(w http.ResponseWriter, req *http.Re
|
|||
// PutObjectHandler - PUT Object
|
||||
// ----------
|
||||
// This implementation of the PUT operation adds an object to a bucket.
|
||||
func (api CloudStorageAPI) PutObjectHandler(w http.ResponseWriter, req *http.Request) {
|
||||
func (api CloudStorageAPI) PutObjectHandler(w http.ResponseWriter, r *http.Request) {
|
||||
var object, bucket string
|
||||
vars := mux.Vars(req)
|
||||
vars := mux.Vars(r)
|
||||
bucket = vars["bucket"]
|
||||
object = vars["object"]
|
||||
|
||||
if isRequestRequiresACLCheck(req) {
|
||||
if isRequestRequiresACLCheck(r) {
|
||||
if api.Filesystem.IsPrivateBucket(bucket) || api.Filesystem.IsReadOnlyBucket(bucket) {
|
||||
writeErrorResponse(w, req, AccessDenied, req.URL.Path)
|
||||
writeErrorResponse(w, r, AccessDenied, r.URL.Path)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// get Content-MD5 sent by client and verify if valid
|
||||
md5 := req.Header.Get("Content-MD5")
|
||||
md5 := r.Header.Get("Content-MD5")
|
||||
if !isValidMD5(md5) {
|
||||
writeErrorResponse(w, req, InvalidDigest, req.URL.Path)
|
||||
writeErrorResponse(w, r, InvalidDigest, r.URL.Path)
|
||||
return
|
||||
}
|
||||
/// if Content-Length is unknown/missing, deny the request
|
||||
size := req.ContentLength
|
||||
if size == -1 {
|
||||
writeErrorResponse(w, req, MissingContentLength, req.URL.Path)
|
||||
size := r.ContentLength
|
||||
if size == -1 && !contains(r.TransferEncoding, "chunked") {
|
||||
writeErrorResponse(w, r, MissingContentLength, r.URL.Path)
|
||||
return
|
||||
}
|
||||
/// maximum Upload size for objects in a single operation
|
||||
if isMaxObjectSize(size) {
|
||||
writeErrorResponse(w, req, EntityTooLarge, req.URL.Path)
|
||||
writeErrorResponse(w, r, EntityTooLarge, r.URL.Path)
|
||||
return
|
||||
}
|
||||
|
||||
var signature *v4.Signature
|
||||
if isRequestSignatureV4(req) {
|
||||
// Init signature V4 verification
|
||||
var err *probe.Error
|
||||
signature, err = initSignatureV4(req)
|
||||
// Set http request for signature.
|
||||
auth := api.Signature.SetHTTPRequestToVerify(r)
|
||||
|
||||
// For presigned requests verify them right here.
|
||||
if isRequestPresignedSignatureV4(r) {
|
||||
ok, err := auth.DoesPresignedSignatureMatch()
|
||||
if err != nil {
|
||||
switch err.ToGoError() {
|
||||
case errInvalidRegion:
|
||||
errorIf(err.Trace(), "Unknown region in authorization header.", nil)
|
||||
writeErrorResponse(w, req, AuthorizationHeaderMalformed, req.URL.Path)
|
||||
return
|
||||
case errAccessKeyIDInvalid:
|
||||
errorIf(err.Trace(), "Invalid access key id.", nil)
|
||||
writeErrorResponse(w, req, InvalidAccessKeyID, req.URL.Path)
|
||||
return
|
||||
default:
|
||||
errorIf(err.Trace(), "Initializing signature v4 failed.", nil)
|
||||
writeErrorResponse(w, req, InternalError, req.URL.Path)
|
||||
return
|
||||
}
|
||||
errorIf(err.Trace(r.URL.String()), "Presigned signature verification failed.", nil)
|
||||
writeErrorResponse(w, r, SignatureDoesNotMatch, r.URL.Path)
|
||||
return
|
||||
}
|
||||
if !ok {
|
||||
writeErrorResponse(w, r, SignatureDoesNotMatch, r.URL.Path)
|
||||
return
|
||||
}
|
||||
auth = nil
|
||||
}
|
||||
|
||||
metadata, err := api.Filesystem.CreateObject(bucket, object, md5, size, req.Body, signature)
|
||||
// Create object.
|
||||
metadata, err := api.Filesystem.CreateObject(bucket, object, md5, size, r.Body, auth)
|
||||
if err != nil {
|
||||
errorIf(err.Trace(), "CreateObject failed.", nil)
|
||||
switch err.ToGoError().(type) {
|
||||
case fs.RootPathFull:
|
||||
writeErrorResponse(w, req, RootPathFull, req.URL.Path)
|
||||
writeErrorResponse(w, r, RootPathFull, r.URL.Path)
|
||||
case fs.BucketNotFound:
|
||||
writeErrorResponse(w, req, NoSuchBucket, req.URL.Path)
|
||||
writeErrorResponse(w, r, NoSuchBucket, r.URL.Path)
|
||||
case fs.BucketNameInvalid:
|
||||
writeErrorResponse(w, req, InvalidBucketName, req.URL.Path)
|
||||
writeErrorResponse(w, r, InvalidBucketName, r.URL.Path)
|
||||
case fs.BadDigest:
|
||||
writeErrorResponse(w, req, BadDigest, req.URL.Path)
|
||||
case fs.MissingDateHeader:
|
||||
writeErrorResponse(w, req, RequestTimeTooSkewed, req.URL.Path)
|
||||
case v4.SigDoesNotMatch:
|
||||
writeErrorResponse(w, req, SignatureDoesNotMatch, req.URL.Path)
|
||||
writeErrorResponse(w, r, BadDigest, r.URL.Path)
|
||||
case fs.SignDoesNotMatch:
|
||||
writeErrorResponse(w, r, SignatureDoesNotMatch, r.URL.Path)
|
||||
case fs.IncompleteBody:
|
||||
writeErrorResponse(w, req, IncompleteBody, req.URL.Path)
|
||||
case fs.EntityTooLarge:
|
||||
writeErrorResponse(w, req, EntityTooLarge, req.URL.Path)
|
||||
writeErrorResponse(w, r, IncompleteBody, r.URL.Path)
|
||||
case fs.InvalidDigest:
|
||||
writeErrorResponse(w, req, InvalidDigest, req.URL.Path)
|
||||
writeErrorResponse(w, r, InvalidDigest, r.URL.Path)
|
||||
case fs.ObjectExistsAsPrefix:
|
||||
writeErrorResponse(w, req, ObjectExistsAsPrefix, req.URL.Path)
|
||||
writeErrorResponse(w, r, ObjectExistsAsPrefix, r.URL.Path)
|
||||
default:
|
||||
writeErrorResponse(w, req, InternalError, req.URL.Path)
|
||||
writeErrorResponse(w, r, InternalError, r.URL.Path)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
@ -234,35 +234,40 @@ func (api CloudStorageAPI) PutObjectHandler(w http.ResponseWriter, req *http.Req
|
|||
/// Multipart CloudStorageAPI
|
||||
|
||||
// NewMultipartUploadHandler - New multipart upload
|
||||
func (api CloudStorageAPI) NewMultipartUploadHandler(w http.ResponseWriter, req *http.Request) {
|
||||
func (api CloudStorageAPI) NewMultipartUploadHandler(w http.ResponseWriter, r *http.Request) {
|
||||
var object, bucket string
|
||||
vars := mux.Vars(req)
|
||||
vars := mux.Vars(r)
|
||||
bucket = vars["bucket"]
|
||||
object = vars["object"]
|
||||
|
||||
if isRequestRequiresACLCheck(req) {
|
||||
if isRequestRequiresACLCheck(r) {
|
||||
if api.Filesystem.IsPrivateBucket(bucket) || api.Filesystem.IsReadOnlyBucket(bucket) {
|
||||
writeErrorResponse(w, req, AccessDenied, req.URL.Path)
|
||||
writeErrorResponse(w, r, AccessDenied, r.URL.Path)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if !isSignV4ReqAuthenticated(api.Signature, r) {
|
||||
writeErrorResponse(w, r, SignatureDoesNotMatch, r.URL.Path)
|
||||
return
|
||||
}
|
||||
|
||||
uploadID, err := api.Filesystem.NewMultipartUpload(bucket, object)
|
||||
if err != nil {
|
||||
errorIf(err.Trace(), "NewMultipartUpload failed.", nil)
|
||||
switch err.ToGoError().(type) {
|
||||
case fs.RootPathFull:
|
||||
writeErrorResponse(w, req, RootPathFull, req.URL.Path)
|
||||
writeErrorResponse(w, r, RootPathFull, r.URL.Path)
|
||||
case fs.BucketNameInvalid:
|
||||
writeErrorResponse(w, req, InvalidBucketName, req.URL.Path)
|
||||
writeErrorResponse(w, r, InvalidBucketName, r.URL.Path)
|
||||
case fs.BucketNotFound:
|
||||
writeErrorResponse(w, req, NoSuchBucket, req.URL.Path)
|
||||
writeErrorResponse(w, r, NoSuchBucket, r.URL.Path)
|
||||
case fs.ObjectNotFound:
|
||||
writeErrorResponse(w, req, NoSuchKey, req.URL.Path)
|
||||
writeErrorResponse(w, r, NoSuchKey, r.URL.Path)
|
||||
case fs.ObjectNameInvalid:
|
||||
writeErrorResponse(w, req, NoSuchKey, req.URL.Path)
|
||||
writeErrorResponse(w, r, NoSuchKey, r.URL.Path)
|
||||
default:
|
||||
writeErrorResponse(w, req, InternalError, req.URL.Path)
|
||||
writeErrorResponse(w, r, InternalError, r.URL.Path)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
@ -276,94 +281,88 @@ func (api CloudStorageAPI) NewMultipartUploadHandler(w http.ResponseWriter, req
|
|||
}
|
||||
|
||||
// PutObjectPartHandler - Upload part
|
||||
func (api CloudStorageAPI) PutObjectPartHandler(w http.ResponseWriter, req *http.Request) {
|
||||
vars := mux.Vars(req)
|
||||
func (api CloudStorageAPI) PutObjectPartHandler(w http.ResponseWriter, r *http.Request) {
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
object := vars["object"]
|
||||
|
||||
if isRequestRequiresACLCheck(req) {
|
||||
if isRequestRequiresACLCheck(r) {
|
||||
if api.Filesystem.IsPrivateBucket(bucket) || api.Filesystem.IsReadOnlyBucket(bucket) {
|
||||
writeErrorResponse(w, req, AccessDenied, req.URL.Path)
|
||||
writeErrorResponse(w, r, AccessDenied, r.URL.Path)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// get Content-MD5 sent by client and verify if valid
|
||||
md5 := req.Header.Get("Content-MD5")
|
||||
md5 := r.Header.Get("Content-MD5")
|
||||
if !isValidMD5(md5) {
|
||||
writeErrorResponse(w, req, InvalidDigest, req.URL.Path)
|
||||
writeErrorResponse(w, r, InvalidDigest, r.URL.Path)
|
||||
return
|
||||
}
|
||||
|
||||
/// if Content-Length is unknown/missing, throw away
|
||||
size := req.ContentLength
|
||||
size := r.ContentLength
|
||||
if size == -1 {
|
||||
writeErrorResponse(w, req, MissingContentLength, req.URL.Path)
|
||||
writeErrorResponse(w, r, MissingContentLength, r.URL.Path)
|
||||
return
|
||||
}
|
||||
|
||||
/// maximum Upload size for multipart objects in a single operation
|
||||
if isMaxObjectSize(size) {
|
||||
writeErrorResponse(w, req, EntityTooLarge, req.URL.Path)
|
||||
writeErrorResponse(w, r, EntityTooLarge, r.URL.Path)
|
||||
return
|
||||
}
|
||||
|
||||
uploadID := req.URL.Query().Get("uploadId")
|
||||
partIDString := req.URL.Query().Get("partNumber")
|
||||
uploadID := r.URL.Query().Get("uploadId")
|
||||
partIDString := r.URL.Query().Get("partNumber")
|
||||
|
||||
var partID int
|
||||
{
|
||||
var err error
|
||||
partID, err = strconv.Atoi(partIDString)
|
||||
if err != nil {
|
||||
writeErrorResponse(w, req, InvalidPart, req.URL.Path)
|
||||
writeErrorResponse(w, r, InvalidPart, r.URL.Path)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
var signature *v4.Signature
|
||||
if isRequestSignatureV4(req) {
|
||||
// Init signature V4 verification
|
||||
var err *probe.Error
|
||||
signature, err = initSignatureV4(req)
|
||||
// Set http request for signature.
|
||||
auth := api.Signature.SetHTTPRequestToVerify(r)
|
||||
// For presigned requests verify right here.
|
||||
if isRequestPresignedSignatureV4(r) {
|
||||
ok, err := auth.DoesPresignedSignatureMatch()
|
||||
if err != nil {
|
||||
switch err.ToGoError() {
|
||||
case errInvalidRegion:
|
||||
errorIf(err.Trace(), "Unknown region in authorization header.", nil)
|
||||
writeErrorResponse(w, req, AuthorizationHeaderMalformed, req.URL.Path)
|
||||
return
|
||||
case errAccessKeyIDInvalid:
|
||||
errorIf(err.Trace(), "Invalid access key id.", nil)
|
||||
writeErrorResponse(w, req, InvalidAccessKeyID, req.URL.Path)
|
||||
return
|
||||
default:
|
||||
errorIf(err.Trace(), "Initializing signature v4 failed.", nil)
|
||||
writeErrorResponse(w, req, InternalError, req.URL.Path)
|
||||
return
|
||||
}
|
||||
errorIf(err.Trace(r.URL.String()), "Presigned signature verification failed.", nil)
|
||||
writeErrorResponse(w, r, SignatureDoesNotMatch, r.URL.Path)
|
||||
return
|
||||
}
|
||||
if !ok {
|
||||
writeErrorResponse(w, r, SignatureDoesNotMatch, r.URL.Path)
|
||||
return
|
||||
}
|
||||
// Signature verified, set this to nil payload verification
|
||||
// not necessary.
|
||||
auth = nil
|
||||
}
|
||||
|
||||
calculatedMD5, err := api.Filesystem.CreateObjectPart(bucket, object, uploadID, md5, partID, size, req.Body, signature)
|
||||
calculatedMD5, err := api.Filesystem.CreateObjectPart(bucket, object, uploadID, md5, partID, size, r.Body, auth)
|
||||
if err != nil {
|
||||
errorIf(err.Trace(), "CreateObjectPart failed.", nil)
|
||||
switch err.ToGoError().(type) {
|
||||
case fs.RootPathFull:
|
||||
writeErrorResponse(w, req, RootPathFull, req.URL.Path)
|
||||
writeErrorResponse(w, r, RootPathFull, r.URL.Path)
|
||||
case fs.InvalidUploadID:
|
||||
writeErrorResponse(w, req, NoSuchUpload, req.URL.Path)
|
||||
writeErrorResponse(w, r, NoSuchUpload, r.URL.Path)
|
||||
case fs.BadDigest:
|
||||
writeErrorResponse(w, req, BadDigest, req.URL.Path)
|
||||
case v4.SigDoesNotMatch:
|
||||
writeErrorResponse(w, req, SignatureDoesNotMatch, req.URL.Path)
|
||||
writeErrorResponse(w, r, BadDigest, r.URL.Path)
|
||||
case fs.SignDoesNotMatch:
|
||||
writeErrorResponse(w, r, SignatureDoesNotMatch, r.URL.Path)
|
||||
case fs.IncompleteBody:
|
||||
writeErrorResponse(w, req, IncompleteBody, req.URL.Path)
|
||||
case fs.EntityTooLarge:
|
||||
writeErrorResponse(w, req, EntityTooLarge, req.URL.Path)
|
||||
writeErrorResponse(w, r, IncompleteBody, r.URL.Path)
|
||||
case fs.InvalidDigest:
|
||||
writeErrorResponse(w, req, InvalidDigest, req.URL.Path)
|
||||
writeErrorResponse(w, r, InvalidDigest, r.URL.Path)
|
||||
default:
|
||||
writeErrorResponse(w, req, InternalError, req.URL.Path)
|
||||
writeErrorResponse(w, r, InternalError, r.URL.Path)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
@ -374,35 +373,40 @@ func (api CloudStorageAPI) PutObjectPartHandler(w http.ResponseWriter, req *http
|
|||
}
|
||||
|
||||
// AbortMultipartUploadHandler - Abort multipart upload
|
||||
func (api CloudStorageAPI) AbortMultipartUploadHandler(w http.ResponseWriter, req *http.Request) {
|
||||
vars := mux.Vars(req)
|
||||
func (api CloudStorageAPI) AbortMultipartUploadHandler(w http.ResponseWriter, r *http.Request) {
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
object := vars["object"]
|
||||
|
||||
if isRequestRequiresACLCheck(req) {
|
||||
if isRequestRequiresACLCheck(r) {
|
||||
if api.Filesystem.IsPrivateBucket(bucket) || api.Filesystem.IsReadOnlyBucket(bucket) {
|
||||
writeErrorResponse(w, req, AccessDenied, req.URL.Path)
|
||||
writeErrorResponse(w, r, AccessDenied, r.URL.Path)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
objectResourcesMetadata := getObjectResources(req.URL.Query())
|
||||
if !isSignV4ReqAuthenticated(api.Signature, r) {
|
||||
writeErrorResponse(w, r, SignatureDoesNotMatch, r.URL.Path)
|
||||
return
|
||||
}
|
||||
|
||||
objectResourcesMetadata := getObjectResources(r.URL.Query())
|
||||
err := api.Filesystem.AbortMultipartUpload(bucket, object, objectResourcesMetadata.UploadID)
|
||||
if err != nil {
|
||||
errorIf(err.Trace(), "AbortMutlipartUpload failed.", nil)
|
||||
switch err.ToGoError().(type) {
|
||||
case fs.BucketNameInvalid:
|
||||
writeErrorResponse(w, req, InvalidBucketName, req.URL.Path)
|
||||
writeErrorResponse(w, r, InvalidBucketName, r.URL.Path)
|
||||
case fs.BucketNotFound:
|
||||
writeErrorResponse(w, req, NoSuchBucket, req.URL.Path)
|
||||
writeErrorResponse(w, r, NoSuchBucket, r.URL.Path)
|
||||
case fs.ObjectNotFound:
|
||||
writeErrorResponse(w, req, NoSuchKey, req.URL.Path)
|
||||
writeErrorResponse(w, r, NoSuchKey, r.URL.Path)
|
||||
case fs.ObjectNameInvalid:
|
||||
writeErrorResponse(w, req, NoSuchKey, req.URL.Path)
|
||||
writeErrorResponse(w, r, NoSuchKey, r.URL.Path)
|
||||
case fs.InvalidUploadID:
|
||||
writeErrorResponse(w, req, NoSuchUpload, req.URL.Path)
|
||||
writeErrorResponse(w, r, NoSuchUpload, r.URL.Path)
|
||||
default:
|
||||
writeErrorResponse(w, req, InternalError, req.URL.Path)
|
||||
writeErrorResponse(w, r, InternalError, r.URL.Path)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
@ -410,25 +414,30 @@ func (api CloudStorageAPI) AbortMultipartUploadHandler(w http.ResponseWriter, re
|
|||
}
|
||||
|
||||
// ListObjectPartsHandler - List object parts
|
||||
func (api CloudStorageAPI) ListObjectPartsHandler(w http.ResponseWriter, req *http.Request) {
|
||||
vars := mux.Vars(req)
|
||||
func (api CloudStorageAPI) ListObjectPartsHandler(w http.ResponseWriter, r *http.Request) {
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
object := vars["object"]
|
||||
|
||||
if isRequestRequiresACLCheck(req) {
|
||||
if isRequestRequiresACLCheck(r) {
|
||||
if api.Filesystem.IsPrivateBucket(bucket) || api.Filesystem.IsReadOnlyBucket(bucket) {
|
||||
writeErrorResponse(w, req, AccessDenied, req.URL.Path)
|
||||
writeErrorResponse(w, r, AccessDenied, r.URL.Path)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
objectResourcesMetadata := getObjectResources(req.URL.Query())
|
||||
if !isSignV4ReqAuthenticated(api.Signature, r) {
|
||||
writeErrorResponse(w, r, SignatureDoesNotMatch, r.URL.Path)
|
||||
return
|
||||
}
|
||||
|
||||
objectResourcesMetadata := getObjectResources(r.URL.Query())
|
||||
if objectResourcesMetadata.PartNumberMarker < 0 {
|
||||
writeErrorResponse(w, req, InvalidPartNumberMarker, req.URL.Path)
|
||||
writeErrorResponse(w, r, InvalidPartNumberMarker, r.URL.Path)
|
||||
return
|
||||
}
|
||||
if objectResourcesMetadata.MaxParts < 0 {
|
||||
writeErrorResponse(w, req, InvalidMaxParts, req.URL.Path)
|
||||
writeErrorResponse(w, r, InvalidMaxParts, r.URL.Path)
|
||||
return
|
||||
}
|
||||
if objectResourcesMetadata.MaxParts == 0 {
|
||||
|
@ -440,17 +449,17 @@ func (api CloudStorageAPI) ListObjectPartsHandler(w http.ResponseWriter, req *ht
|
|||
errorIf(err.Trace(), "ListObjectParts failed.", nil)
|
||||
switch err.ToGoError().(type) {
|
||||
case fs.BucketNameInvalid:
|
||||
writeErrorResponse(w, req, InvalidBucketName, req.URL.Path)
|
||||
writeErrorResponse(w, r, InvalidBucketName, r.URL.Path)
|
||||
case fs.BucketNotFound:
|
||||
writeErrorResponse(w, req, NoSuchBucket, req.URL.Path)
|
||||
writeErrorResponse(w, r, NoSuchBucket, r.URL.Path)
|
||||
case fs.ObjectNotFound:
|
||||
writeErrorResponse(w, req, NoSuchKey, req.URL.Path)
|
||||
writeErrorResponse(w, r, NoSuchKey, r.URL.Path)
|
||||
case fs.ObjectNameInvalid:
|
||||
writeErrorResponse(w, req, NoSuchKey, req.URL.Path)
|
||||
writeErrorResponse(w, r, NoSuchKey, r.URL.Path)
|
||||
case fs.InvalidUploadID:
|
||||
writeErrorResponse(w, req, NoSuchUpload, req.URL.Path)
|
||||
writeErrorResponse(w, r, NoSuchUpload, r.URL.Path)
|
||||
default:
|
||||
writeErrorResponse(w, req, InternalError, req.URL.Path)
|
||||
writeErrorResponse(w, r, InternalError, r.URL.Path)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
@ -463,72 +472,69 @@ func (api CloudStorageAPI) ListObjectPartsHandler(w http.ResponseWriter, req *ht
|
|||
}
|
||||
|
||||
// CompleteMultipartUploadHandler - Complete multipart upload
|
||||
func (api CloudStorageAPI) CompleteMultipartUploadHandler(w http.ResponseWriter, req *http.Request) {
|
||||
vars := mux.Vars(req)
|
||||
func (api CloudStorageAPI) CompleteMultipartUploadHandler(w http.ResponseWriter, r *http.Request) {
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
object := vars["object"]
|
||||
|
||||
if isRequestRequiresACLCheck(req) {
|
||||
if isRequestRequiresACLCheck(r) {
|
||||
if api.Filesystem.IsPrivateBucket(bucket) || api.Filesystem.IsReadOnlyBucket(bucket) {
|
||||
writeErrorResponse(w, req, AccessDenied, req.URL.Path)
|
||||
writeErrorResponse(w, r, AccessDenied, r.URL.Path)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
objectResourcesMetadata := getObjectResources(req.URL.Query())
|
||||
var signature *v4.Signature
|
||||
if isRequestSignatureV4(req) {
|
||||
// Init signature V4 verification
|
||||
var err *probe.Error
|
||||
signature, err = initSignatureV4(req)
|
||||
// Set http request for signature.
|
||||
auth := api.Signature.SetHTTPRequestToVerify(r)
|
||||
// For presigned requests verify right here.
|
||||
if isRequestPresignedSignatureV4(r) {
|
||||
ok, err := auth.DoesPresignedSignatureMatch()
|
||||
if err != nil {
|
||||
switch err.ToGoError() {
|
||||
case errInvalidRegion:
|
||||
errorIf(err.Trace(), "Unknown region in authorization header.", nil)
|
||||
writeErrorResponse(w, req, AuthorizationHeaderMalformed, req.URL.Path)
|
||||
return
|
||||
case errAccessKeyIDInvalid:
|
||||
errorIf(err.Trace(), "Invalid access key id.", nil)
|
||||
writeErrorResponse(w, req, InvalidAccessKeyID, req.URL.Path)
|
||||
return
|
||||
default:
|
||||
errorIf(err.Trace(), "Initializing signature v4 failed.", nil)
|
||||
writeErrorResponse(w, req, InternalError, req.URL.Path)
|
||||
return
|
||||
}
|
||||
errorIf(err.Trace(r.URL.String()), "Presigned signature verification failed.", nil)
|
||||
writeErrorResponse(w, r, SignatureDoesNotMatch, r.URL.Path)
|
||||
return
|
||||
}
|
||||
if !ok {
|
||||
writeErrorResponse(w, r, SignatureDoesNotMatch, r.URL.Path)
|
||||
return
|
||||
}
|
||||
auth = nil
|
||||
}
|
||||
|
||||
metadata, err := api.Filesystem.CompleteMultipartUpload(bucket, object, objectResourcesMetadata.UploadID, req.Body, signature)
|
||||
// Extract object resources.
|
||||
objectResourcesMetadata := getObjectResources(r.URL.Query())
|
||||
|
||||
// Complete multipart upload.
|
||||
metadata, err := api.Filesystem.CompleteMultipartUpload(bucket, object, objectResourcesMetadata.UploadID, r.Body, api.Signature)
|
||||
if err != nil {
|
||||
errorIf(err.Trace(), "CompleteMultipartUpload failed.", nil)
|
||||
switch err.ToGoError().(type) {
|
||||
case fs.BucketNameInvalid:
|
||||
writeErrorResponse(w, req, InvalidBucketName, req.URL.Path)
|
||||
writeErrorResponse(w, r, InvalidBucketName, r.URL.Path)
|
||||
case fs.BucketNotFound:
|
||||
writeErrorResponse(w, req, NoSuchBucket, req.URL.Path)
|
||||
writeErrorResponse(w, r, NoSuchBucket, r.URL.Path)
|
||||
case fs.ObjectNotFound:
|
||||
writeErrorResponse(w, req, NoSuchKey, req.URL.Path)
|
||||
writeErrorResponse(w, r, NoSuchKey, r.URL.Path)
|
||||
case fs.ObjectNameInvalid:
|
||||
writeErrorResponse(w, req, NoSuchKey, req.URL.Path)
|
||||
writeErrorResponse(w, r, NoSuchKey, r.URL.Path)
|
||||
case fs.InvalidUploadID:
|
||||
writeErrorResponse(w, req, NoSuchUpload, req.URL.Path)
|
||||
writeErrorResponse(w, r, NoSuchUpload, r.URL.Path)
|
||||
case fs.InvalidPart:
|
||||
writeErrorResponse(w, req, InvalidPart, req.URL.Path)
|
||||
writeErrorResponse(w, r, InvalidPart, r.URL.Path)
|
||||
case fs.InvalidPartOrder:
|
||||
writeErrorResponse(w, req, InvalidPartOrder, req.URL.Path)
|
||||
case v4.SigDoesNotMatch:
|
||||
writeErrorResponse(w, req, SignatureDoesNotMatch, req.URL.Path)
|
||||
writeErrorResponse(w, r, InvalidPartOrder, r.URL.Path)
|
||||
case fs.SignDoesNotMatch:
|
||||
writeErrorResponse(w, r, SignatureDoesNotMatch, r.URL.Path)
|
||||
case fs.IncompleteBody:
|
||||
writeErrorResponse(w, req, IncompleteBody, req.URL.Path)
|
||||
writeErrorResponse(w, r, IncompleteBody, r.URL.Path)
|
||||
case fs.MalformedXML:
|
||||
writeErrorResponse(w, req, MalformedXML, req.URL.Path)
|
||||
writeErrorResponse(w, r, MalformedXML, r.URL.Path)
|
||||
default:
|
||||
writeErrorResponse(w, req, InternalError, req.URL.Path)
|
||||
writeErrorResponse(w, r, InternalError, r.URL.Path)
|
||||
}
|
||||
return
|
||||
}
|
||||
response := generateCompleteMultpartUploadResponse(bucket, object, req.URL.String(), metadata.MD5)
|
||||
response := generateCompleteMultpartUploadResponse(bucket, object, r.URL.String(), metadata.MD5)
|
||||
encodedSuccessResponse := encodeSuccessResponse(response)
|
||||
// write headers
|
||||
setCommonHeaders(w)
|
||||
|
@ -539,32 +545,37 @@ func (api CloudStorageAPI) CompleteMultipartUploadHandler(w http.ResponseWriter,
|
|||
/// Delete CloudStorageAPI
|
||||
|
||||
// DeleteObjectHandler - Delete object
|
||||
func (api CloudStorageAPI) DeleteObjectHandler(w http.ResponseWriter, req *http.Request) {
|
||||
vars := mux.Vars(req)
|
||||
func (api CloudStorageAPI) DeleteObjectHandler(w http.ResponseWriter, r *http.Request) {
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
object := vars["object"]
|
||||
|
||||
if isRequestRequiresACLCheck(req) {
|
||||
if isRequestRequiresACLCheck(r) {
|
||||
if api.Filesystem.IsPrivateBucket(bucket) || api.Filesystem.IsReadOnlyBucket(bucket) {
|
||||
writeErrorResponse(w, req, AccessDenied, req.URL.Path)
|
||||
writeErrorResponse(w, r, AccessDenied, r.URL.Path)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if !isSignV4ReqAuthenticated(api.Signature, r) {
|
||||
writeErrorResponse(w, r, SignatureDoesNotMatch, r.URL.Path)
|
||||
return
|
||||
}
|
||||
|
||||
err := api.Filesystem.DeleteObject(bucket, object)
|
||||
if err != nil {
|
||||
errorIf(err.Trace(), "DeleteObject failed.", nil)
|
||||
switch err.ToGoError().(type) {
|
||||
case fs.BucketNameInvalid:
|
||||
writeErrorResponse(w, req, InvalidBucketName, req.URL.Path)
|
||||
writeErrorResponse(w, r, InvalidBucketName, r.URL.Path)
|
||||
case fs.BucketNotFound:
|
||||
writeErrorResponse(w, req, NoSuchBucket, req.URL.Path)
|
||||
writeErrorResponse(w, r, NoSuchBucket, r.URL.Path)
|
||||
case fs.ObjectNotFound:
|
||||
writeErrorResponse(w, req, NoSuchKey, req.URL.Path)
|
||||
writeErrorResponse(w, r, NoSuchKey, r.URL.Path)
|
||||
case fs.ObjectNameInvalid:
|
||||
writeErrorResponse(w, req, NoSuchKey, req.URL.Path)
|
||||
writeErrorResponse(w, r, NoSuchKey, r.URL.Path)
|
||||
default:
|
||||
writeErrorResponse(w, req, InternalError, req.URL.Path)
|
||||
writeErrorResponse(w, r, InternalError, r.URL.Path)
|
||||
}
|
||||
}
|
||||
writeSuccessNoContent(w)
|
||||
|
|
|
@ -23,7 +23,6 @@ import (
|
|||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// File container provided for atomic file writes
|
||||
|
@ -82,7 +81,6 @@ func FileCreateWithPrefix(filePath string, prefix string) (*File, error) {
|
|||
if err := os.MkdirAll(filepath.Dir(filePath), 0700); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
prefix = strings.TrimSpace(prefix)
|
||||
f, err := ioutil.TempFile(filepath.Dir(filePath), prefix+filepath.Base(filePath))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
|
@ -19,7 +19,6 @@ package fs
|
|||
import (
|
||||
"os"
|
||||
"regexp"
|
||||
"strings"
|
||||
"time"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
@ -167,7 +166,7 @@ var validBucket = regexp.MustCompile(`^[a-z0-9][a-z0-9\.\-]{1,61}[a-z0-9]$`)
|
|||
// IsValidBucketName - verify bucket name in accordance with
|
||||
// - http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html
|
||||
func IsValidBucketName(bucket string) bool {
|
||||
if strings.TrimSpace(bucket) == "" {
|
||||
if bucket == "" {
|
||||
return false
|
||||
}
|
||||
if len(bucket) < 3 || len(bucket) > 63 {
|
||||
|
@ -182,7 +181,7 @@ func IsValidBucketName(bucket string) bool {
|
|||
// IsValidObjectName - verify object name in accordance with
|
||||
// - http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html
|
||||
func IsValidObjectName(object string) bool {
|
||||
if strings.TrimSpace(object) == "" {
|
||||
if object == "" {
|
||||
return true
|
||||
}
|
||||
if len(object) > 1024 || len(object) == 0 {
|
||||
|
|
|
@ -18,25 +18,11 @@ package fs
|
|||
|
||||
import "fmt"
|
||||
|
||||
// MissingDateHeader date header missing
|
||||
type MissingDateHeader struct{}
|
||||
// SignDoesNotMatch - signature does not match.
|
||||
type SignDoesNotMatch struct{}
|
||||
|
||||
func (e MissingDateHeader) Error() string {
|
||||
return "Missing date header"
|
||||
}
|
||||
|
||||
// MissingExpiresQuery expires query string missing
|
||||
type MissingExpiresQuery struct{}
|
||||
|
||||
func (e MissingExpiresQuery) Error() string {
|
||||
return "Missing expires query string"
|
||||
}
|
||||
|
||||
// ExpiredPresignedRequest request already expired
|
||||
type ExpiredPresignedRequest struct{}
|
||||
|
||||
func (e ExpiredPresignedRequest) Error() string {
|
||||
return "Presigned request already expired"
|
||||
func (e SignDoesNotMatch) Error() string {
|
||||
return "Signature does not match."
|
||||
}
|
||||
|
||||
// InvalidArgument invalid argument
|
||||
|
@ -156,30 +142,8 @@ func (e BadDigest) Error() string {
|
|||
return "Bad digest"
|
||||
}
|
||||
|
||||
// ParityOverflow parity over flow
|
||||
type ParityOverflow struct{}
|
||||
|
||||
func (e ParityOverflow) Error() string {
|
||||
return "Parity overflow"
|
||||
}
|
||||
|
||||
// ChecksumMismatch checksum mismatch
|
||||
type ChecksumMismatch struct{}
|
||||
|
||||
func (e ChecksumMismatch) Error() string {
|
||||
return "Checksum mismatch"
|
||||
}
|
||||
|
||||
// MissingPOSTPolicy missing post policy
|
||||
type MissingPOSTPolicy struct{}
|
||||
|
||||
func (e MissingPOSTPolicy) Error() string {
|
||||
return "Missing POST policy in multipart form"
|
||||
}
|
||||
|
||||
// InternalError - generic internal error
|
||||
type InternalError struct {
|
||||
}
|
||||
type InternalError struct{}
|
||||
|
||||
// BackendError - generic disk backend error
|
||||
type BackendError struct {
|
||||
|
@ -237,13 +201,6 @@ type BucketNameInvalid GenericBucketError
|
|||
|
||||
/// Object related errors
|
||||
|
||||
// EntityTooLarge - object size exceeds maximum limit
|
||||
type EntityTooLarge struct {
|
||||
GenericObjectError
|
||||
Size string
|
||||
MaxSize string
|
||||
}
|
||||
|
||||
// ObjectNameInvalid - object name provided is invalid
|
||||
type ObjectNameInvalid GenericObjectError
|
||||
|
||||
|
@ -292,11 +249,6 @@ func (e ObjectNameInvalid) Error() string {
|
|||
return "Object name invalid: " + e.Bucket + "#" + e.Object
|
||||
}
|
||||
|
||||
// Return string an error formatted as the given text
|
||||
func (e EntityTooLarge) Error() string {
|
||||
return e.Bucket + "#" + e.Object + "with " + e.Size + "reached maximum allowed size limit " + e.MaxSize
|
||||
}
|
||||
|
||||
// IncompleteBody You did not provide the number of bytes specified by the Content-Length HTTP header
|
||||
type IncompleteBody GenericObjectError
|
||||
|
||||
|
|
|
@ -68,9 +68,11 @@ func (fs Filesystem) listObjects(bucket, prefix, marker, delimiter string, maxKe
|
|||
// Bucket path prefix should always end with a separator.
|
||||
bucketPathPrefix := bucketPath + string(os.PathSeparator)
|
||||
prefixPath := bucketPathPrefix + prefix
|
||||
st, err := os.Stat(prefixPath)
|
||||
if err != nil && os.IsNotExist(err) {
|
||||
walkPath = bucketPath
|
||||
st, e := os.Stat(prefixPath)
|
||||
if e != nil {
|
||||
if os.IsNotExist(e) {
|
||||
walkPath = bucketPath
|
||||
}
|
||||
} else {
|
||||
if st.IsDir() && !strings.HasSuffix(prefix, delimiter) {
|
||||
walkPath = bucketPath
|
||||
|
|
|
@ -152,7 +152,7 @@ func (fs Filesystem) MakeBucket(bucket, acl string) *probe.Error {
|
|||
}
|
||||
return probe.NewError(e)
|
||||
}
|
||||
if strings.TrimSpace(acl) == "" {
|
||||
if acl == "" {
|
||||
acl = "private"
|
||||
}
|
||||
|
||||
|
@ -232,7 +232,7 @@ func (fs Filesystem) SetBucketMetadata(bucket string, metadata map[string]string
|
|||
if !IsValidBucketACL(acl) {
|
||||
return probe.NewError(InvalidACL{ACL: acl})
|
||||
}
|
||||
if strings.TrimSpace(acl) == "" {
|
||||
if acl == "" {
|
||||
acl = "private"
|
||||
}
|
||||
bucket = fs.denormalizeBucket(bucket)
|
||||
|
|
|
@ -174,7 +174,15 @@ func saveParts(partPathPrefix string, mw io.Writer, parts []CompletePart) *probe
|
|||
md5Sum = strings.TrimSuffix(md5Sum, "\"")
|
||||
partFile, e := os.OpenFile(partPathPrefix+md5Sum+fmt.Sprintf("$%d-$multiparts", part.PartNumber), os.O_RDONLY, 0600)
|
||||
if e != nil {
|
||||
return probe.NewError(e)
|
||||
if !os.IsNotExist(e) {
|
||||
return probe.NewError(e)
|
||||
}
|
||||
// Some clients do not set Content-MD5, so we would have
|
||||
// created part files without 'ETag' in them.
|
||||
partFile, e = os.OpenFile(partPathPrefix+fmt.Sprintf("$%d-$multiparts", part.PartNumber), os.O_RDONLY, 0600)
|
||||
if e != nil {
|
||||
return probe.NewError(e)
|
||||
}
|
||||
}
|
||||
partReaders = append(partReaders, partFile)
|
||||
partClosers = append(partClosers, partFile)
|
||||
|
@ -322,9 +330,9 @@ func (fs Filesystem) CreateObjectPart(bucket, object, uploadID, expectedMD5Sum s
|
|||
return "", probe.NewError(InvalidUploadID{UploadID: uploadID})
|
||||
}
|
||||
|
||||
if strings.TrimSpace(expectedMD5Sum) != "" {
|
||||
if expectedMD5Sum != "" {
|
||||
var expectedMD5SumBytes []byte
|
||||
expectedMD5SumBytes, err = base64.StdEncoding.DecodeString(strings.TrimSpace(expectedMD5Sum))
|
||||
expectedMD5SumBytes, err = base64.StdEncoding.DecodeString(expectedMD5Sum)
|
||||
if err != nil {
|
||||
// Pro-actively close the connection
|
||||
return "", probe.NewError(InvalidDigest{MD5: expectedMD5Sum})
|
||||
|
@ -361,8 +369,8 @@ func (fs Filesystem) CreateObjectPart(bucket, object, uploadID, expectedMD5Sum s
|
|||
md5sum := hex.EncodeToString(md5Hasher.Sum(nil))
|
||||
// Verify if the written object is equal to what is expected, only
|
||||
// if it is requested as such.
|
||||
if strings.TrimSpace(expectedMD5Sum) != "" {
|
||||
if !isMD5SumEqual(strings.TrimSpace(expectedMD5Sum), md5sum) {
|
||||
if expectedMD5Sum != "" {
|
||||
if !isMD5SumEqual(expectedMD5Sum, md5sum) {
|
||||
partFile.CloseAndPurge()
|
||||
return "", probe.NewError(BadDigest{MD5: expectedMD5Sum, Bucket: bucket, Object: object})
|
||||
}
|
||||
|
@ -375,7 +383,7 @@ func (fs Filesystem) CreateObjectPart(bucket, object, uploadID, expectedMD5Sum s
|
|||
}
|
||||
if !ok {
|
||||
partFile.CloseAndPurge()
|
||||
return "", probe.NewError(signV4.SigDoesNotMatch{})
|
||||
return "", probe.NewError(SignDoesNotMatch{})
|
||||
}
|
||||
}
|
||||
partFile.Close()
|
||||
|
@ -472,7 +480,7 @@ func (fs Filesystem) CompleteMultipartUpload(bucket, object, uploadID string, da
|
|||
}
|
||||
if !ok {
|
||||
file.CloseAndPurge()
|
||||
return ObjectMetadata{}, probe.NewError(signV4.SigDoesNotMatch{})
|
||||
return ObjectMetadata{}, probe.NewError(SignDoesNotMatch{})
|
||||
}
|
||||
}
|
||||
completeMultipartUpload := &CompleteMultipartUpload{}
|
||||
|
|
|
@ -178,7 +178,7 @@ func getMetadata(rootPath, bucket, object string) (ObjectMetadata, *probe.Error)
|
|||
// isMD5SumEqual - returns error if md5sum mismatches, success its `nil`
|
||||
func isMD5SumEqual(expectedMD5Sum, actualMD5Sum string) bool {
|
||||
// Verify the md5sum.
|
||||
if strings.TrimSpace(expectedMD5Sum) != "" && strings.TrimSpace(actualMD5Sum) != "" {
|
||||
if expectedMD5Sum != "" && actualMD5Sum != "" {
|
||||
// Decode md5sum to bytes from their hexadecimal
|
||||
// representations.
|
||||
expectedMD5SumBytes, err := hex.DecodeString(expectedMD5Sum)
|
||||
|
@ -199,7 +199,7 @@ func isMD5SumEqual(expectedMD5Sum, actualMD5Sum string) bool {
|
|||
}
|
||||
|
||||
// CreateObject - create an object.
|
||||
func (fs Filesystem) CreateObject(bucket, object, expectedMD5Sum string, size int64, data io.Reader, signature *signV4.Signature) (ObjectMetadata, *probe.Error) {
|
||||
func (fs Filesystem) CreateObject(bucket, object, expectedMD5Sum string, size int64, data io.Reader, sig *signV4.Signature) (ObjectMetadata, *probe.Error) {
|
||||
di, e := disk.GetInfo(fs.path)
|
||||
if e != nil {
|
||||
return ObjectMetadata{}, probe.NewError(e)
|
||||
|
@ -233,9 +233,9 @@ func (fs Filesystem) CreateObject(bucket, object, expectedMD5Sum string, size in
|
|||
|
||||
// Get object path.
|
||||
objectPath := filepath.Join(bucketPath, object)
|
||||
if strings.TrimSpace(expectedMD5Sum) != "" {
|
||||
if expectedMD5Sum != "" {
|
||||
var expectedMD5SumBytes []byte
|
||||
expectedMD5SumBytes, e = base64.StdEncoding.DecodeString(strings.TrimSpace(expectedMD5Sum))
|
||||
expectedMD5SumBytes, e = base64.StdEncoding.DecodeString(expectedMD5Sum)
|
||||
if e != nil {
|
||||
// Pro-actively close the connection.
|
||||
return ObjectMetadata{}, probe.NewError(InvalidDigest{MD5: expectedMD5Sum})
|
||||
|
@ -244,7 +244,7 @@ func (fs Filesystem) CreateObject(bucket, object, expectedMD5Sum string, size in
|
|||
}
|
||||
|
||||
// Write object.
|
||||
file, e := atomic.FileCreateWithPrefix(objectPath, "$tmpobject")
|
||||
file, e := atomic.FileCreateWithPrefix(objectPath, expectedMD5Sum+"$tmpobject")
|
||||
if e != nil {
|
||||
switch e := e.(type) {
|
||||
case *os.PathError:
|
||||
|
@ -279,22 +279,22 @@ func (fs Filesystem) CreateObject(bucket, object, expectedMD5Sum string, size in
|
|||
md5Sum := hex.EncodeToString(md5Hasher.Sum(nil))
|
||||
// Verify if the written object is equal to what is expected, only
|
||||
// if it is requested as such.
|
||||
if strings.TrimSpace(expectedMD5Sum) != "" {
|
||||
if !isMD5SumEqual(strings.TrimSpace(expectedMD5Sum), md5Sum) {
|
||||
if expectedMD5Sum != "" {
|
||||
if !isMD5SumEqual(expectedMD5Sum, md5Sum) {
|
||||
file.CloseAndPurge()
|
||||
return ObjectMetadata{}, probe.NewError(BadDigest{MD5: expectedMD5Sum, Bucket: bucket, Object: object})
|
||||
}
|
||||
}
|
||||
sha256Sum := hex.EncodeToString(sha256Hasher.Sum(nil))
|
||||
if signature != nil {
|
||||
ok, err := signature.DoesSignatureMatch(sha256Sum)
|
||||
if sig != nil {
|
||||
ok, err := sig.DoesSignatureMatch(sha256Sum)
|
||||
if err != nil {
|
||||
file.CloseAndPurge()
|
||||
return ObjectMetadata{}, err.Trace()
|
||||
}
|
||||
if !ok {
|
||||
file.CloseAndPurge()
|
||||
return ObjectMetadata{}, probe.NewError(signV4.SigDoesNotMatch{})
|
||||
return ObjectMetadata{}, signV4.ErrSignDoesNotMath("Signature does not match")
|
||||
}
|
||||
}
|
||||
file.Close()
|
||||
|
|
|
@ -16,33 +16,41 @@
|
|||
|
||||
package signature
|
||||
|
||||
// MissingDateHeader date header missing
|
||||
type MissingDateHeader struct{}
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
func (e MissingDateHeader) Error() string {
|
||||
return "Missing date header"
|
||||
"github.com/minio/minio/pkg/probe"
|
||||
)
|
||||
|
||||
type errFunc func(msg string, a ...string) *probe.Error
|
||||
|
||||
func errFactory() errFunc {
|
||||
return func(msg string, a ...string) *probe.Error {
|
||||
return probe.NewError(fmt.Errorf("%s, Args: %s", msg, a)).Untrace()
|
||||
}
|
||||
}
|
||||
|
||||
// MissingExpiresQuery expires query string missing
|
||||
type MissingExpiresQuery struct{}
|
||||
|
||||
func (e MissingExpiresQuery) Error() string {
|
||||
return "Missing expires query string"
|
||||
}
|
||||
|
||||
// ExpiredPresignedRequest request already expired
|
||||
type ExpiredPresignedRequest struct{}
|
||||
|
||||
func (e ExpiredPresignedRequest) Error() string {
|
||||
return "Presigned request already expired"
|
||||
}
|
||||
|
||||
// SigDoesNotMatch invalid signature
|
||||
type SigDoesNotMatch struct {
|
||||
SignatureSent string
|
||||
SignatureCalculated string
|
||||
}
|
||||
|
||||
func (e SigDoesNotMatch) Error() string {
|
||||
return "The request signature we calculated does not match the signature you provided"
|
||||
}
|
||||
// Various errors.
|
||||
var (
|
||||
ErrPolicyAlreadyExpired = errFactory()
|
||||
ErrInvalidRegion = errFactory()
|
||||
ErrInvalidDateFormat = errFactory()
|
||||
ErrInvalidService = errFactory()
|
||||
ErrInvalidRequestVersion = errFactory()
|
||||
ErrMissingFields = errFactory()
|
||||
ErrMissingCredTag = errFactory()
|
||||
ErrCredMalformed = errFactory()
|
||||
ErrMissingSignTag = errFactory()
|
||||
ErrMissingSignHeadersTag = errFactory()
|
||||
ErrMissingDateHeader = errFactory()
|
||||
ErrMalformedDate = errFactory()
|
||||
ErrMalformedExpires = errFactory()
|
||||
ErrAuthHeaderEmpty = errFactory()
|
||||
ErrUnsuppSignAlgo = errFactory()
|
||||
ErrMissingExpiresQuery = errFactory()
|
||||
ErrExpiredPresignRequest = errFactory()
|
||||
ErrSignDoesNotMath = errFactory()
|
||||
ErrInvalidAccessKeyID = errFactory()
|
||||
ErrInvalidSecretKey = errFactory()
|
||||
ErrRegionISEmpty = errFactory()
|
||||
)
|
||||
|
|
|
@ -17,9 +17,11 @@
|
|||
package signature
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/minio/minio/pkg/probe"
|
||||
|
@ -67,8 +69,8 @@ type PostPolicyForm struct {
|
|||
}
|
||||
}
|
||||
|
||||
// ParsePostPolicyForm - Parse JSON policy string into typed POostPolicyForm structure.
|
||||
func ParsePostPolicyForm(policy string) (PostPolicyForm, *probe.Error) {
|
||||
// parsePostPolicyFormV4 - Parse JSON policy string into typed POostPolicyForm structure.
|
||||
func parsePostPolicyFormV4(policy string) (PostPolicyForm, *probe.Error) {
|
||||
// Convert po into interfaces and
|
||||
// perform strict type conversion using reflection.
|
||||
var rawPolicy struct {
|
||||
|
@ -155,3 +157,53 @@ func ParsePostPolicyForm(policy string) (PostPolicyForm, *probe.Error) {
|
|||
}
|
||||
return parsedPolicy, nil
|
||||
}
|
||||
|
||||
// ApplyPolicyCond - apply policy conditions and validate input values.
|
||||
func ApplyPolicyCond(formValues map[string]string) *probe.Error {
|
||||
if formValues["X-Amz-Algorithm"] != signV4Algorithm {
|
||||
return ErrUnsuppSignAlgo("Unsupported signature algorithm in policy form data.", formValues["X-Amz-Algorithm"]).Trace(formValues["X-Amz-Algorithm"])
|
||||
}
|
||||
/// Decoding policy
|
||||
policyBytes, e := base64.StdEncoding.DecodeString(formValues["Policy"])
|
||||
if e != nil {
|
||||
return probe.NewError(e)
|
||||
}
|
||||
postPolicyForm, err := parsePostPolicyFormV4(string(policyBytes))
|
||||
if err != nil {
|
||||
return err.Trace()
|
||||
}
|
||||
if !postPolicyForm.Expiration.After(time.Now().UTC()) {
|
||||
return ErrPolicyAlreadyExpired("Policy has already expired, please generate a new one.")
|
||||
}
|
||||
if postPolicyForm.Conditions.Policies["$bucket"].Operator == "eq" {
|
||||
if formValues["Bucket"] != postPolicyForm.Conditions.Policies["$bucket"].Value {
|
||||
return ErrMissingFields("Policy bucket is missing.", formValues["Bucket"])
|
||||
}
|
||||
}
|
||||
if postPolicyForm.Conditions.Policies["$x-amz-date"].Operator == "eq" {
|
||||
if formValues["X-Amz-Date"] != postPolicyForm.Conditions.Policies["$x-amz-date"].Value {
|
||||
return ErrMissingFields("Policy date is missing.", formValues["X-Amz-Date"])
|
||||
}
|
||||
}
|
||||
if postPolicyForm.Conditions.Policies["$Content-Type"].Operator == "starts-with" {
|
||||
if !strings.HasPrefix(formValues["Content-Type"], postPolicyForm.Conditions.Policies["$Content-Type"].Value) {
|
||||
return ErrMissingFields("Policy content-type is missing or invalid.", formValues["Content-Type"])
|
||||
}
|
||||
}
|
||||
if postPolicyForm.Conditions.Policies["$Content-Type"].Operator == "eq" {
|
||||
if formValues["Content-Type"] != postPolicyForm.Conditions.Policies["$Content-Type"].Value {
|
||||
return ErrMissingFields("Policy content-Type is missing or invalid.", formValues["Content-Type"])
|
||||
}
|
||||
}
|
||||
if postPolicyForm.Conditions.Policies["$key"].Operator == "starts-with" {
|
||||
if !strings.HasPrefix(formValues["Key"], postPolicyForm.Conditions.Policies["$key"].Value) {
|
||||
return ErrMissingFields("Policy key is missing.", formValues["Key"])
|
||||
}
|
||||
}
|
||||
if postPolicyForm.Conditions.Policies["$key"].Operator == "eq" {
|
||||
if formValues["Key"] != postPolicyForm.Conditions.Policies["$key"].Value {
|
||||
return ErrMissingFields("Policy key is missing.", formValues["Key"])
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -18,16 +18,13 @@ package signature
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/hmac"
|
||||
"encoding/hex"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/minio/minio/pkg/crypto/sha256"
|
||||
"github.com/minio/minio/pkg/probe"
|
||||
|
@ -35,72 +32,52 @@ import (
|
|||
|
||||
// Signature - local variables
|
||||
type Signature struct {
|
||||
AccessKeyID string
|
||||
SecretAccessKey string
|
||||
Region string
|
||||
Presigned bool
|
||||
PresignedPolicy string
|
||||
SignedHeaders []string
|
||||
Signature string
|
||||
Request *http.Request
|
||||
accessKeyID string
|
||||
secretAccessKey string
|
||||
region string
|
||||
httpRequest *http.Request
|
||||
extractedSignedHeaders http.Header
|
||||
}
|
||||
|
||||
const (
|
||||
authHeaderPrefix = "AWS4-HMAC-SHA256"
|
||||
iso8601Format = "20060102T150405Z"
|
||||
yyyymmdd = "20060102"
|
||||
signV4Algorithm = "AWS4-HMAC-SHA256"
|
||||
iso8601Format = "20060102T150405Z"
|
||||
yyyymmdd = "20060102"
|
||||
)
|
||||
|
||||
// sumHMAC calculate hmac between two input byte array
|
||||
func sumHMAC(key []byte, data []byte) []byte {
|
||||
hash := hmac.New(sha256.New, key)
|
||||
hash.Write(data)
|
||||
return hash.Sum(nil)
|
||||
// New - initialize a new authorization checkes.
|
||||
func New(accessKeyID, secretAccessKey, region string) (*Signature, *probe.Error) {
|
||||
if !isValidAccessKey.MatchString(accessKeyID) {
|
||||
return nil, ErrInvalidAccessKeyID("Invalid access key id.", accessKeyID).Trace(accessKeyID)
|
||||
}
|
||||
if !isValidSecretKey.MatchString(secretAccessKey) {
|
||||
return nil, ErrInvalidAccessKeyID("Invalid secret key.", secretAccessKey).Trace(secretAccessKey)
|
||||
}
|
||||
if region == "" {
|
||||
return nil, ErrRegionISEmpty("Region is empty.").Trace()
|
||||
}
|
||||
signature := &Signature{
|
||||
accessKeyID: accessKeyID,
|
||||
secretAccessKey: secretAccessKey,
|
||||
region: region,
|
||||
}
|
||||
return signature, nil
|
||||
}
|
||||
|
||||
// getURLEncodedName encode the strings from UTF-8 byte representations to HTML hex escape sequences
|
||||
//
|
||||
// This is necessary since regular url.Parse() and url.Encode() functions do not support UTF-8
|
||||
// non english characters cannot be parsed due to the nature in which url.Encode() is written
|
||||
//
|
||||
// This function on the other hand is a direct replacement for url.Encode() technique to support
|
||||
// pretty much every UTF-8 character.
|
||||
func getURLEncodedName(name string) string {
|
||||
// if object matches reserved string, no need to encode them
|
||||
reservedNames := regexp.MustCompile("^[a-zA-Z0-9-_.~/]+$")
|
||||
if reservedNames.MatchString(name) {
|
||||
return name
|
||||
// SetHTTPRequestToVerify - sets the http request which needs to be verified.
|
||||
func (s *Signature) SetHTTPRequestToVerify(r *http.Request) *Signature {
|
||||
// Do not set http request if its 'nil'.
|
||||
if r == nil {
|
||||
return s
|
||||
}
|
||||
var encodedName string
|
||||
for _, s := range name {
|
||||
if 'A' <= s && s <= 'Z' || 'a' <= s && s <= 'z' || '0' <= s && s <= '9' { // §2.3 Unreserved characters (mark)
|
||||
encodedName = encodedName + string(s)
|
||||
continue
|
||||
}
|
||||
switch s {
|
||||
case '-', '_', '.', '~', '/': // §2.3 Unreserved characters (mark)
|
||||
encodedName = encodedName + string(s)
|
||||
continue
|
||||
default:
|
||||
len := utf8.RuneLen(s)
|
||||
if len < 0 {
|
||||
return name
|
||||
}
|
||||
u := make([]byte, len)
|
||||
utf8.EncodeRune(u, s)
|
||||
for _, r := range u {
|
||||
hex := hex.EncodeToString([]byte{r})
|
||||
encodedName = encodedName + "%" + strings.ToUpper(hex)
|
||||
}
|
||||
}
|
||||
}
|
||||
return encodedName
|
||||
s.httpRequest = r
|
||||
return s
|
||||
}
|
||||
|
||||
// getCanonicalHeaders generate a list of request headers with their values
|
||||
func (r Signature) getCanonicalHeaders(signedHeaders map[string][]string) string {
|
||||
func (s Signature) getCanonicalHeaders(signedHeaders http.Header) string {
|
||||
var headers []string
|
||||
vals := make(map[string][]string)
|
||||
vals := make(http.Header)
|
||||
for k, vv := range signedHeaders {
|
||||
headers = append(headers, strings.ToLower(k))
|
||||
vals[strings.ToLower(k)] = vv
|
||||
|
@ -114,7 +91,7 @@ func (r Signature) getCanonicalHeaders(signedHeaders map[string][]string) string
|
|||
buf.WriteByte(':')
|
||||
switch {
|
||||
case k == "host":
|
||||
buf.WriteString(r.Request.Host)
|
||||
buf.WriteString(s.httpRequest.Host)
|
||||
fallthrough
|
||||
default:
|
||||
for idx, v := range vals[k] {
|
||||
|
@ -130,7 +107,7 @@ func (r Signature) getCanonicalHeaders(signedHeaders map[string][]string) string
|
|||
}
|
||||
|
||||
// getSignedHeaders generate a string i.e alphabetically sorted, semicolon-separated list of lowercase request header names
|
||||
func (r Signature) getSignedHeaders(signedHeaders map[string][]string) string {
|
||||
func (s Signature) getSignedHeaders(signedHeaders http.Header) string {
|
||||
var headers []string
|
||||
for k := range signedHeaders {
|
||||
headers = append(headers, strings.ToLower(k))
|
||||
|
@ -140,41 +117,6 @@ func (r Signature) getSignedHeaders(signedHeaders map[string][]string) string {
|
|||
return strings.Join(headers, ";")
|
||||
}
|
||||
|
||||
// extractSignedHeaders extract signed headers from Authorization header
|
||||
func (r Signature) extractSignedHeaders() map[string][]string {
|
||||
extractedSignedHeadersMap := make(map[string][]string)
|
||||
for _, header := range r.SignedHeaders {
|
||||
val, ok := r.Request.Header[http.CanonicalHeaderKey(header)]
|
||||
if !ok {
|
||||
// Golang http server strips off 'Expect' header, if the
|
||||
// client sent this as part of signed headers we need to
|
||||
// handle otherwise we would see a signature mismatch.
|
||||
// `aws-cli` sets this as part of signed headers which is
|
||||
// a bad idea since servers trying to implement AWS
|
||||
// Signature version '4' will all encounter this issue.
|
||||
//
|
||||
// According to
|
||||
// http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.20
|
||||
// Expect header is always of form:
|
||||
//
|
||||
// Expect = "Expect" ":" 1#expectation
|
||||
// expectation = "100-continue" | expectation-extension
|
||||
//
|
||||
// So it safe to assume that '100-continue' is what would
|
||||
// be sent, for the time being keep this work around.
|
||||
// Adding a *TODO* to remove this later when Golang server
|
||||
// doesn't filter out the 'Expect' header.
|
||||
if header == "expect" {
|
||||
extractedSignedHeadersMap[header] = []string{"100-continue"}
|
||||
}
|
||||
// if not found continue, we will fail later
|
||||
continue
|
||||
}
|
||||
extractedSignedHeadersMap[header] = val
|
||||
}
|
||||
return extractedSignedHeadersMap
|
||||
}
|
||||
|
||||
// getCanonicalRequest generate a canonical request of style
|
||||
//
|
||||
// canonicalRequest =
|
||||
|
@ -185,18 +127,18 @@ func (r Signature) extractSignedHeaders() map[string][]string {
|
|||
// <SignedHeaders>\n
|
||||
// <HashedPayload>
|
||||
//
|
||||
func (r *Signature) getCanonicalRequest() string {
|
||||
payload := r.Request.Header.Get(http.CanonicalHeaderKey("x-amz-content-sha256"))
|
||||
r.Request.URL.RawQuery = strings.Replace(r.Request.URL.Query().Encode(), "+", "%20", -1)
|
||||
encodedPath := getURLEncodedName(r.Request.URL.Path)
|
||||
// convert any space strings back to "+"
|
||||
func (s *Signature) getCanonicalRequest() string {
|
||||
payload := s.httpRequest.Header.Get(http.CanonicalHeaderKey("x-amz-content-sha256"))
|
||||
s.httpRequest.URL.RawQuery = strings.Replace(s.httpRequest.URL.Query().Encode(), "+", "%20", -1)
|
||||
encodedPath := getURLEncodedName(s.httpRequest.URL.Path)
|
||||
// Convert any space strings back to "+".
|
||||
encodedPath = strings.Replace(encodedPath, "+", "%20", -1)
|
||||
canonicalRequest := strings.Join([]string{
|
||||
r.Request.Method,
|
||||
s.httpRequest.Method,
|
||||
encodedPath,
|
||||
r.Request.URL.RawQuery,
|
||||
r.getCanonicalHeaders(r.extractSignedHeaders()),
|
||||
r.getSignedHeaders(r.extractSignedHeaders()),
|
||||
s.httpRequest.URL.RawQuery,
|
||||
s.getCanonicalHeaders(s.extractedSignedHeaders),
|
||||
s.getSignedHeaders(s.extractedSignedHeaders),
|
||||
payload,
|
||||
}, "\n")
|
||||
return canonicalRequest
|
||||
|
@ -212,69 +154,89 @@ func (r *Signature) getCanonicalRequest() string {
|
|||
// <SignedHeaders>\n
|
||||
// <HashedPayload>
|
||||
//
|
||||
func (r Signature) getPresignedCanonicalRequest(presignedQuery string) string {
|
||||
func (s Signature) getPresignedCanonicalRequest(presignedQuery string) string {
|
||||
rawQuery := strings.Replace(presignedQuery, "+", "%20", -1)
|
||||
encodedPath := getURLEncodedName(r.Request.URL.Path)
|
||||
// convert any space strings back to "+"
|
||||
encodedPath := getURLEncodedName(s.httpRequest.URL.Path)
|
||||
// Convert any space strings back to "+".
|
||||
encodedPath = strings.Replace(encodedPath, "+", "%20", -1)
|
||||
canonicalRequest := strings.Join([]string{
|
||||
r.Request.Method,
|
||||
s.httpRequest.Method,
|
||||
encodedPath,
|
||||
rawQuery,
|
||||
r.getCanonicalHeaders(r.extractSignedHeaders()),
|
||||
r.getSignedHeaders(r.extractSignedHeaders()),
|
||||
s.getCanonicalHeaders(s.extractedSignedHeaders),
|
||||
s.getSignedHeaders(s.extractedSignedHeaders),
|
||||
"UNSIGNED-PAYLOAD",
|
||||
}, "\n")
|
||||
return canonicalRequest
|
||||
}
|
||||
|
||||
// getScope generate a string of a specific date, an AWS region, and a service
|
||||
func (r Signature) getScope(t time.Time) string {
|
||||
// getScope generate a string of a specific date, an AWS region, and a service.
|
||||
func (s Signature) getScope(t time.Time) string {
|
||||
scope := strings.Join([]string{
|
||||
t.Format(yyyymmdd),
|
||||
r.Region,
|
||||
s.region,
|
||||
"s3",
|
||||
"aws4_request",
|
||||
}, "/")
|
||||
return scope
|
||||
}
|
||||
|
||||
// getStringToSign a string based on selected query values
|
||||
func (r Signature) getStringToSign(canonicalRequest string, t time.Time) string {
|
||||
stringToSign := authHeaderPrefix + "\n" + t.Format(iso8601Format) + "\n"
|
||||
stringToSign = stringToSign + r.getScope(t) + "\n"
|
||||
// getStringToSign a string based on selected query values.
|
||||
func (s Signature) getStringToSign(canonicalRequest string, t time.Time) string {
|
||||
stringToSign := signV4Algorithm + "\n" + t.Format(iso8601Format) + "\n"
|
||||
stringToSign = stringToSign + s.getScope(t) + "\n"
|
||||
canonicalRequestBytes := sha256.Sum256([]byte(canonicalRequest))
|
||||
stringToSign = stringToSign + hex.EncodeToString(canonicalRequestBytes[:])
|
||||
return stringToSign
|
||||
}
|
||||
|
||||
// getSigningKey hmac seed to calculate final signature
|
||||
func (r Signature) getSigningKey(t time.Time) []byte {
|
||||
secret := r.SecretAccessKey
|
||||
// getSigningKey hmac seed to calculate final signature.
|
||||
func (s Signature) getSigningKey(t time.Time) []byte {
|
||||
secret := s.secretAccessKey
|
||||
date := sumHMAC([]byte("AWS4"+secret), []byte(t.Format(yyyymmdd)))
|
||||
region := sumHMAC(date, []byte(r.Region))
|
||||
region := sumHMAC(date, []byte(s.region))
|
||||
service := sumHMAC(region, []byte("s3"))
|
||||
signingKey := sumHMAC(service, []byte("aws4_request"))
|
||||
return signingKey
|
||||
}
|
||||
|
||||
// getSignature final signature in hexadecimal form
|
||||
func (r Signature) getSignature(signingKey []byte, stringToSign string) string {
|
||||
// getSignature final signature in hexadecimal form.
|
||||
func (s Signature) getSignature(signingKey []byte, stringToSign string) string {
|
||||
return hex.EncodeToString(sumHMAC(signingKey, []byte(stringToSign)))
|
||||
}
|
||||
|
||||
// DoesPolicySignatureMatch - Verify query headers with post policy
|
||||
// - http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-HTTPPOSTConstructPolicy.html
|
||||
// returns true if matches, false otherwise. if error is not nil then it is always false
|
||||
func (r *Signature) DoesPolicySignatureMatch(date string) (bool, *probe.Error) {
|
||||
t, err := time.Parse(iso8601Format, date)
|
||||
func (s *Signature) DoesPolicySignatureMatch(formValues map[string]string) (bool, *probe.Error) {
|
||||
// Parse credential tag.
|
||||
creds, err := parseCredential("Credential=" + formValues["X-Amz-Credential"])
|
||||
if err != nil {
|
||||
return false, probe.NewError(err)
|
||||
return false, err.Trace(formValues["X-Amz-Credential"])
|
||||
}
|
||||
signingKey := r.getSigningKey(t)
|
||||
stringToSign := string(r.PresignedPolicy)
|
||||
newSignature := r.getSignature(signingKey, stringToSign)
|
||||
if newSignature != r.Signature {
|
||||
|
||||
// Verify if the access key id matches.
|
||||
if creds.accessKeyID != s.accessKeyID {
|
||||
return false, ErrInvalidAccessKeyID("Access key id does not match with our records.", creds.accessKeyID).Trace(creds.accessKeyID)
|
||||
}
|
||||
|
||||
// Verify if the region is valid.
|
||||
reqRegion := creds.scope.region
|
||||
if !isValidRegion(reqRegion, s.region) {
|
||||
return false, ErrInvalidRegion("Requested region is not recognized.", reqRegion).Trace(reqRegion)
|
||||
}
|
||||
|
||||
// Save region.
|
||||
s.region = reqRegion
|
||||
|
||||
// Parse date string.
|
||||
t, e := time.Parse(iso8601Format, formValues["X-Amz-Date"])
|
||||
if e != nil {
|
||||
return false, probe.NewError(e)
|
||||
}
|
||||
signingKey := s.getSigningKey(t)
|
||||
newSignature := s.getSignature(signingKey, formValues["Policy"])
|
||||
if newSignature != formValues["X-Amz-Signature"] {
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
|
@ -283,35 +245,49 @@ func (r *Signature) DoesPolicySignatureMatch(date string) (bool, *probe.Error) {
|
|||
// DoesPresignedSignatureMatch - Verify query headers with presigned signature
|
||||
// - http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html
|
||||
// returns true if matches, false otherwise. if error is not nil then it is always false
|
||||
func (r *Signature) DoesPresignedSignatureMatch() (bool, *probe.Error) {
|
||||
query := make(url.Values)
|
||||
query.Set("X-Amz-Algorithm", authHeaderPrefix)
|
||||
func (s *Signature) DoesPresignedSignatureMatch() (bool, *probe.Error) {
|
||||
// Parse request query string.
|
||||
preSignV4Values, err := parsePreSignV4(s.httpRequest.URL.Query())
|
||||
if err != nil {
|
||||
return false, err.Trace(s.httpRequest.URL.String())
|
||||
}
|
||||
|
||||
var date string
|
||||
if date = r.Request.URL.Query().Get("X-Amz-Date"); date == "" {
|
||||
return false, probe.NewError(MissingDateHeader{})
|
||||
// Verify if the access key id matches.
|
||||
if preSignV4Values.Creds.accessKeyID != s.accessKeyID {
|
||||
return false, ErrInvalidAccessKeyID("Access key id does not match with our records.", preSignV4Values.Creds.accessKeyID).Trace(preSignV4Values.Creds.accessKeyID)
|
||||
}
|
||||
t, err := time.Parse(iso8601Format, date)
|
||||
if err != nil {
|
||||
return false, probe.NewError(err)
|
||||
|
||||
// Verify if region is valid.
|
||||
reqRegion := preSignV4Values.Creds.scope.region
|
||||
if !isValidRegion(reqRegion, s.region) {
|
||||
return false, ErrInvalidRegion("Requested region is not recognized.", reqRegion).Trace(reqRegion)
|
||||
}
|
||||
if _, ok := r.Request.URL.Query()["X-Amz-Expires"]; !ok {
|
||||
return false, probe.NewError(MissingExpiresQuery{})
|
||||
}
|
||||
expireSeconds, err := strconv.Atoi(r.Request.URL.Query().Get("X-Amz-Expires"))
|
||||
if err != nil {
|
||||
return false, probe.NewError(err)
|
||||
}
|
||||
if time.Now().UTC().Sub(t) > time.Duration(expireSeconds)*time.Second {
|
||||
return false, probe.NewError(ExpiredPresignedRequest{})
|
||||
|
||||
// Save region.
|
||||
s.region = reqRegion
|
||||
|
||||
// Extract all the signed headers along with its values.
|
||||
s.extractedSignedHeaders = extractSignedHeaders(preSignV4Values.SignedHeaders, s.httpRequest.Header)
|
||||
|
||||
// Construct new query.
|
||||
query := make(url.Values)
|
||||
query.Set("X-Amz-Algorithm", signV4Algorithm)
|
||||
|
||||
if time.Now().UTC().Sub(preSignV4Values.Date) > time.Duration(preSignV4Values.Expires)/time.Second {
|
||||
return false, ErrExpiredPresignRequest("Presigned request already expired, please initiate a new request.")
|
||||
}
|
||||
|
||||
// Save the date and expires.
|
||||
t := preSignV4Values.Date
|
||||
expireSeconds := int(time.Duration(preSignV4Values.Expires) / time.Second)
|
||||
|
||||
query.Set("X-Amz-Date", t.Format(iso8601Format))
|
||||
query.Set("X-Amz-Expires", strconv.Itoa(expireSeconds))
|
||||
query.Set("X-Amz-SignedHeaders", r.getSignedHeaders(r.extractSignedHeaders()))
|
||||
query.Set("X-Amz-Credential", r.AccessKeyID+"/"+r.getScope(t))
|
||||
query.Set("X-Amz-SignedHeaders", s.getSignedHeaders(s.extractedSignedHeaders))
|
||||
query.Set("X-Amz-Credential", s.accessKeyID+"/"+s.getScope(t))
|
||||
|
||||
// Save other headers available in the request parameters.
|
||||
for k, v := range r.Request.URL.Query() {
|
||||
for k, v := range s.httpRequest.URL.Query() {
|
||||
if strings.HasPrefix(strings.ToLower(k), "x-amz") {
|
||||
continue
|
||||
}
|
||||
|
@ -320,24 +296,24 @@ func (r *Signature) DoesPresignedSignatureMatch() (bool, *probe.Error) {
|
|||
encodedQuery := query.Encode()
|
||||
|
||||
// Verify if date query is same.
|
||||
if r.Request.URL.Query().Get("X-Amz-Date") != query.Get("X-Amz-Date") {
|
||||
if s.httpRequest.URL.Query().Get("X-Amz-Date") != query.Get("X-Amz-Date") {
|
||||
return false, nil
|
||||
}
|
||||
// Verify if expires query is same.
|
||||
if r.Request.URL.Query().Get("X-Amz-Expires") != query.Get("X-Amz-Expires") {
|
||||
if s.httpRequest.URL.Query().Get("X-Amz-Expires") != query.Get("X-Amz-Expires") {
|
||||
return false, nil
|
||||
}
|
||||
// Verify if signed headers query is same.
|
||||
if r.Request.URL.Query().Get("X-Amz-SignedHeaders") != query.Get("X-Amz-SignedHeaders") {
|
||||
if s.httpRequest.URL.Query().Get("X-Amz-SignedHeaders") != query.Get("X-Amz-SignedHeaders") {
|
||||
return false, nil
|
||||
}
|
||||
// Verify if credential query is same.
|
||||
if r.Request.URL.Query().Get("X-Amz-Credential") != query.Get("X-Amz-Credential") {
|
||||
if s.httpRequest.URL.Query().Get("X-Amz-Credential") != query.Get("X-Amz-Credential") {
|
||||
return false, nil
|
||||
}
|
||||
// Verify finally if signature is same.
|
||||
newSignature := r.getSignature(r.getSigningKey(t), r.getStringToSign(r.getPresignedCanonicalRequest(encodedQuery), t))
|
||||
if r.Request.URL.Query().Get("X-Amz-Signature") != newSignature {
|
||||
newSignature := s.getSignature(s.getSigningKey(t), s.getStringToSign(s.getPresignedCanonicalRequest(encodedQuery), t))
|
||||
if s.httpRequest.URL.Query().Get("X-Amz-Signature") != newSignature {
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
|
@ -346,27 +322,57 @@ func (r *Signature) DoesPresignedSignatureMatch() (bool, *probe.Error) {
|
|||
// DoesSignatureMatch - Verify authorization header with calculated header in accordance with
|
||||
// - http://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html
|
||||
// returns true if matches, false otherwise. if error is not nil then it is always false
|
||||
func (r *Signature) DoesSignatureMatch(hashedPayload string) (bool, *probe.Error) {
|
||||
// set new calculated payload
|
||||
r.Request.Header.Set("X-Amz-Content-Sha256", hashedPayload)
|
||||
func (s *Signature) DoesSignatureMatch(hashedPayload string) (bool, *probe.Error) {
|
||||
// Save authorization header.
|
||||
v4Auth := s.httpRequest.Header.Get("Authorization")
|
||||
|
||||
// Add date if not present throw error
|
||||
// Parse signature version '4' header.
|
||||
signV4Values, err := parseSignV4(v4Auth)
|
||||
if err != nil {
|
||||
return false, err.Trace(v4Auth)
|
||||
}
|
||||
|
||||
// Extract all the signed headers along with its values.
|
||||
s.extractedSignedHeaders = extractSignedHeaders(signV4Values.SignedHeaders, s.httpRequest.Header)
|
||||
|
||||
// Verify if the access key id matches.
|
||||
if signV4Values.Creds.accessKeyID != s.accessKeyID {
|
||||
return false, ErrInvalidAccessKeyID("Access key id does not match with our records.", signV4Values.Creds.accessKeyID).Trace(signV4Values.Creds.accessKeyID)
|
||||
}
|
||||
|
||||
// Verify if region is valid.
|
||||
reqRegion := signV4Values.Creds.scope.region
|
||||
if !isValidRegion(reqRegion, s.region) {
|
||||
return false, ErrInvalidRegion("Requested region is not recognized.", reqRegion).Trace(reqRegion)
|
||||
}
|
||||
|
||||
// Save region.
|
||||
s.region = reqRegion
|
||||
|
||||
// Set input payload.
|
||||
s.httpRequest.Header.Set("X-Amz-Content-Sha256", hashedPayload)
|
||||
|
||||
// Extract date, if not present throw error.
|
||||
var date string
|
||||
if date = r.Request.Header.Get(http.CanonicalHeaderKey("x-amz-date")); date == "" {
|
||||
if date = r.Request.Header.Get("Date"); date == "" {
|
||||
return false, probe.NewError(MissingDateHeader{})
|
||||
if date = s.httpRequest.Header.Get(http.CanonicalHeaderKey("x-amz-date")); date == "" {
|
||||
if date = s.httpRequest.Header.Get("Date"); date == "" {
|
||||
return false, ErrMissingDateHeader("Date header is missing from the request.").Trace()
|
||||
}
|
||||
}
|
||||
t, err := time.Parse(iso8601Format, date)
|
||||
if err != nil {
|
||||
return false, probe.NewError(err)
|
||||
// Parse date header.
|
||||
t, e := time.Parse(iso8601Format, date)
|
||||
if e != nil {
|
||||
return false, probe.NewError(e)
|
||||
}
|
||||
canonicalRequest := r.getCanonicalRequest()
|
||||
stringToSign := r.getStringToSign(canonicalRequest, t)
|
||||
signingKey := r.getSigningKey(t)
|
||||
newSignature := r.getSignature(signingKey, stringToSign)
|
||||
|
||||
if newSignature != r.Signature {
|
||||
// Signature version '4'.
|
||||
canonicalRequest := s.getCanonicalRequest()
|
||||
stringToSign := s.getStringToSign(canonicalRequest, t)
|
||||
signingKey := s.getSigningKey(t)
|
||||
newSignature := s.getSignature(signingKey, stringToSign)
|
||||
|
||||
// Verify if signature match.
|
||||
if newSignature != signV4Values.Signature {
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
|
|
|
@ -0,0 +1,118 @@
|
|||
package signature
|
||||
|
||||
import (
|
||||
"crypto/hmac"
|
||||
"encoding/hex"
|
||||
"net/http"
|
||||
"regexp"
|
||||
"strings"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/minio/minio/pkg/crypto/sha256"
|
||||
)
|
||||
|
||||
// AccessID and SecretID length in bytes
|
||||
const (
|
||||
MinioAccessID = 20
|
||||
MinioSecretID = 40
|
||||
)
|
||||
|
||||
/// helpers
|
||||
|
||||
// isValidSecretKey - validate secret key.
|
||||
var isValidSecretKey = regexp.MustCompile("^.{40}$")
|
||||
|
||||
// isValidAccessKey - validate access key.
|
||||
var isValidAccessKey = regexp.MustCompile("^[A-Z0-9\\-\\.\\_\\~]{20}$")
|
||||
|
||||
// isValidRegion - verify if incoming region value is valid with configured Region.
|
||||
func isValidRegion(reqRegion string, confRegion string) bool {
|
||||
if confRegion == "" || confRegion == "US" {
|
||||
confRegion = "us-east-1"
|
||||
}
|
||||
// Some older s3 clients set region as "US" instead of
|
||||
// "us-east-1", handle it.
|
||||
if reqRegion == "US" {
|
||||
reqRegion = "us-east-1"
|
||||
}
|
||||
return reqRegion == confRegion
|
||||
}
|
||||
|
||||
// sumHMAC calculate hmac between two input byte array.
|
||||
func sumHMAC(key []byte, data []byte) []byte {
|
||||
hash := hmac.New(sha256.New, key)
|
||||
hash.Write(data)
|
||||
return hash.Sum(nil)
|
||||
}
|
||||
|
||||
// getURLEncodedName encode the strings from UTF-8 byte representations to HTML hex escape sequences
|
||||
//
|
||||
// This is necessary since regular url.Parse() and url.Encode() functions do not support UTF-8
|
||||
// non english characters cannot be parsed due to the nature in which url.Encode() is written
|
||||
//
|
||||
// This function on the other hand is a direct replacement for url.Encode() technique to support
|
||||
// pretty much every UTF-8 character.
|
||||
func getURLEncodedName(name string) string {
|
||||
// if object matches reserved string, no need to encode them
|
||||
reservedNames := regexp.MustCompile("^[a-zA-Z0-9-_.~/]+$")
|
||||
if reservedNames.MatchString(name) {
|
||||
return name
|
||||
}
|
||||
var encodedName string
|
||||
for _, s := range name {
|
||||
if 'A' <= s && s <= 'Z' || 'a' <= s && s <= 'z' || '0' <= s && s <= '9' { // §2.3 Unreserved characters (mark)
|
||||
encodedName = encodedName + string(s)
|
||||
continue
|
||||
}
|
||||
switch s {
|
||||
case '-', '_', '.', '~', '/': // §2.3 Unreserved characters (mark)
|
||||
encodedName = encodedName + string(s)
|
||||
continue
|
||||
default:
|
||||
len := utf8.RuneLen(s)
|
||||
if len < 0 {
|
||||
return name
|
||||
}
|
||||
u := make([]byte, len)
|
||||
utf8.EncodeRune(u, s)
|
||||
for _, r := range u {
|
||||
hex := hex.EncodeToString([]byte{r})
|
||||
encodedName = encodedName + "%" + strings.ToUpper(hex)
|
||||
}
|
||||
}
|
||||
}
|
||||
return encodedName
|
||||
}
|
||||
|
||||
// extractSignedHeaders extract signed headers from Authorization header
|
||||
func extractSignedHeaders(signedHeaders []string, reqHeaders http.Header) http.Header {
|
||||
extractedSignedHeaders := make(http.Header)
|
||||
for _, header := range signedHeaders {
|
||||
val, ok := reqHeaders[http.CanonicalHeaderKey(header)]
|
||||
if !ok {
|
||||
// Golang http server strips off 'Expect' header, if the
|
||||
// client sent this as part of signed headers we need to
|
||||
// handle otherwise we would see a signature mismatch.
|
||||
// `aws-cli` sets this as part of signed headers.
|
||||
//
|
||||
// According to
|
||||
// http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.20
|
||||
// Expect header is always of form:
|
||||
//
|
||||
// Expect = "Expect" ":" 1#expectation
|
||||
// expectation = "100-continue" | expectation-extension
|
||||
//
|
||||
// So it safe to assume that '100-continue' is what would
|
||||
// be sent, for the time being keep this work around.
|
||||
// Adding a *TODO* to remove this later when Golang server
|
||||
// doesn't filter out the 'Expect' header.
|
||||
if header == "expect" {
|
||||
extractedSignedHeaders[header] = []string{"100-continue"}
|
||||
}
|
||||
// If not found continue, we will fail later.
|
||||
continue
|
||||
}
|
||||
extractedSignedHeaders[header] = val
|
||||
}
|
||||
return extractedSignedHeaders
|
||||
}
|
|
@ -0,0 +1,203 @@
|
|||
package signature
|
||||
|
||||
import (
|
||||
"net/url"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/minio/minio/pkg/probe"
|
||||
)
|
||||
|
||||
type credScope struct {
|
||||
accessKeyID string
|
||||
scope struct {
|
||||
date time.Time
|
||||
region string
|
||||
service string
|
||||
request string
|
||||
}
|
||||
}
|
||||
|
||||
func parseCredential(credElement string) (credScope, *probe.Error) {
|
||||
creds := strings.Split(strings.TrimSpace(credElement), "=")
|
||||
if len(creds) != 2 {
|
||||
return credScope{}, ErrMissingFields("Credential tag has missing fields.", credElement).Trace(credElement)
|
||||
}
|
||||
if creds[0] != "Credential" {
|
||||
return credScope{}, ErrMissingCredTag("Missing credentials tag.", credElement).Trace(credElement)
|
||||
}
|
||||
credElements := strings.Split(strings.TrimSpace(creds[1]), "/")
|
||||
if len(credElements) != 5 {
|
||||
return credScope{}, ErrCredMalformed("Credential values malformed.", credElement).Trace(credElement)
|
||||
}
|
||||
if !isValidAccessKey.MatchString(credElements[0]) {
|
||||
return credScope{}, ErrInvalidAccessKeyID("Invalid access key id.", credElement).Trace(credElement)
|
||||
}
|
||||
cred := credScope{
|
||||
accessKeyID: credElements[0],
|
||||
}
|
||||
var e error
|
||||
cred.scope.date, e = time.Parse(yyyymmdd, credElements[1])
|
||||
if e != nil {
|
||||
return credScope{}, ErrInvalidDateFormat("Invalid date format.", credElement).Trace(credElement)
|
||||
}
|
||||
if credElements[2] == "" {
|
||||
return credScope{}, ErrRegionISEmpty("Region is empty.", credElement).Trace(credElement)
|
||||
}
|
||||
cred.scope.region = credElements[2]
|
||||
if credElements[3] != "s3" {
|
||||
return credScope{}, ErrInvalidService("Invalid service detected.", credElement).Trace(credElement)
|
||||
}
|
||||
cred.scope.service = credElements[3]
|
||||
if credElements[4] != "aws4_request" {
|
||||
return credScope{}, ErrInvalidRequestVersion("Invalid request version detected.", credElement).Trace(credElement)
|
||||
}
|
||||
cred.scope.request = credElements[4]
|
||||
return cred, nil
|
||||
}
|
||||
|
||||
// parse signature.
|
||||
func parseSignature(signElement string) (string, *probe.Error) {
|
||||
signFields := strings.Split(strings.TrimSpace(signElement), "=")
|
||||
if len(signFields) != 2 {
|
||||
return "", ErrMissingFields("Signature tag has missing fields.", signElement).Trace(signElement)
|
||||
}
|
||||
if signFields[0] != "Signature" {
|
||||
return "", ErrMissingSignTag("Signature tag is missing", signElement).Trace(signElement)
|
||||
}
|
||||
signature := signFields[1]
|
||||
return signature, nil
|
||||
}
|
||||
|
||||
// parse signed headers.
|
||||
func parseSignedHeaders(signedHdrElement string) ([]string, *probe.Error) {
|
||||
signedHdrFields := strings.Split(strings.TrimSpace(signedHdrElement), "=")
|
||||
if len(signedHdrFields) != 2 {
|
||||
return nil, ErrMissingFields("Signed headers tag has missing fields.", signedHdrElement).Trace(signedHdrElement)
|
||||
}
|
||||
if signedHdrFields[0] != "SignedHeaders" {
|
||||
return nil, ErrMissingSignHeadersTag("Signed headers tag is missing.", signedHdrElement).Trace(signedHdrElement)
|
||||
}
|
||||
signedHeaders := strings.Split(signedHdrFields[1], ";")
|
||||
return signedHeaders, nil
|
||||
}
|
||||
|
||||
// structured version of AWS Signature V4 header.
|
||||
type signValues struct {
|
||||
Creds credScope
|
||||
SignedHeaders []string
|
||||
Signature string
|
||||
}
|
||||
|
||||
// structued version of AWS Signature V4 query string.
|
||||
type preSignValues struct {
|
||||
signValues
|
||||
Date time.Time
|
||||
Expires time.Duration
|
||||
}
|
||||
|
||||
// Parses signature version '4' query string of the following form.
|
||||
//
|
||||
// querystring = X-Amz-Algorithm=algorithm
|
||||
// querystring += &X-Amz-Credential= urlencode(access_key_ID + '/' + credential_scope)
|
||||
// querystring += &X-Amz-Date=date
|
||||
// querystring += &X-Amz-Expires=timeout interval
|
||||
// querystring += &X-Amz-SignedHeaders=signed_headers
|
||||
// querystring += &X-Amz-Signature=signature
|
||||
//
|
||||
func parsePreSignV4(query url.Values) (preSignValues, *probe.Error) {
|
||||
// Verify if the query algorithm is supported or not.
|
||||
if query.Get("X-Amz-Algorithm") != signV4Algorithm {
|
||||
return preSignValues{}, ErrUnsuppSignAlgo("Unsupported algorithm in query string.", query.Get("X-Amz-Algorithm"))
|
||||
}
|
||||
|
||||
// Initialize signature version '4' structured header.
|
||||
preSignV4Values := preSignValues{}
|
||||
|
||||
var err *probe.Error
|
||||
// Save credentail values.
|
||||
preSignV4Values.Creds, err = parseCredential("Credential=" + query.Get("X-Amz-Credential"))
|
||||
if err != nil {
|
||||
return preSignValues{}, err.Trace(query.Get("X-Amz-Credential"))
|
||||
}
|
||||
|
||||
var e error
|
||||
// Save date in native time.Time.
|
||||
preSignV4Values.Date, e = time.Parse(iso8601Format, query.Get("X-Amz-Date"))
|
||||
if e != nil {
|
||||
return preSignValues{}, ErrMalformedDate("Malformed date string.", query.Get("X-Amz-Date")).Trace(query.Get("X-Amz-Date"))
|
||||
}
|
||||
|
||||
// Save expires in native time.Duration.
|
||||
preSignV4Values.Expires, e = time.ParseDuration(query.Get("X-Amz-Expires") + "s")
|
||||
if e != nil {
|
||||
return preSignValues{}, ErrMalformedExpires("Malformed expires string.", query.Get("X-Amz-Expires")).Trace(query.Get("X-Amz-Expires"))
|
||||
}
|
||||
|
||||
// Save signed headers.
|
||||
preSignV4Values.SignedHeaders, err = parseSignedHeaders("SignedHeaders=" + query.Get("X-Amz-SignedHeaders"))
|
||||
if err != nil {
|
||||
return preSignValues{}, err.Trace(query.Get("X-Amz-SignedHeaders"))
|
||||
}
|
||||
|
||||
// Save signature.
|
||||
preSignV4Values.Signature, err = parseSignature("Signature=" + query.Get("X-Amz-Signature"))
|
||||
if err != nil {
|
||||
return preSignValues{}, err.Trace(query.Get("X-Amz-Signature"))
|
||||
}
|
||||
|
||||
// Return structed form of signature query string.
|
||||
return preSignV4Values, nil
|
||||
}
|
||||
|
||||
// Parses signature version '4' header of the following form.
|
||||
//
|
||||
// Authorization: algorithm Credential=access key ID/credential scope, \
|
||||
// SignedHeaders=SignedHeaders, Signature=signature
|
||||
//
|
||||
func parseSignV4(v4Auth string) (signValues, *probe.Error) {
|
||||
// Replace all spaced strings, some clients can send spaced
|
||||
// parameters and some won't. So we pro-actively remove any spaces
|
||||
// to make parsing easier.
|
||||
v4Auth = strings.Replace(v4Auth, " ", "", -1)
|
||||
if v4Auth == "" {
|
||||
return signValues{}, ErrAuthHeaderEmpty("Auth header empty.").Trace(v4Auth)
|
||||
}
|
||||
|
||||
// Verify if the header algorithm is supported or not.
|
||||
if !strings.HasPrefix(v4Auth, signV4Algorithm) {
|
||||
return signValues{}, ErrUnsuppSignAlgo("Unsupported algorithm in authorization header.", v4Auth).Trace(v4Auth)
|
||||
}
|
||||
|
||||
// Strip off the Algorithm prefix.
|
||||
v4Auth = strings.TrimPrefix(v4Auth, signV4Algorithm)
|
||||
authFields := strings.Split(strings.TrimSpace(v4Auth), ",")
|
||||
if len(authFields) != 3 {
|
||||
return signValues{}, ErrMissingFields("Missing fields in authorization header.", v4Auth).Trace(v4Auth)
|
||||
}
|
||||
|
||||
// Initialize signature version '4' structured header.
|
||||
signV4Values := signValues{}
|
||||
|
||||
var err *probe.Error
|
||||
// Save credentail values.
|
||||
signV4Values.Creds, err = parseCredential(authFields[0])
|
||||
if err != nil {
|
||||
return signValues{}, err.Trace(v4Auth)
|
||||
}
|
||||
|
||||
// Save signed headers.
|
||||
signV4Values.SignedHeaders, err = parseSignedHeaders(authFields[1])
|
||||
if err != nil {
|
||||
return signValues{}, err.Trace(v4Auth)
|
||||
}
|
||||
|
||||
// Save signature.
|
||||
signV4Values.Signature, err = parseSignature(authFields[2])
|
||||
if err != nil {
|
||||
return signValues{}, err.Trace(v4Auth)
|
||||
}
|
||||
|
||||
// Return the structure here.
|
||||
return signV4Values, nil
|
||||
}
|
|
@ -306,7 +306,7 @@ func (b bucket) WriteObject(objectName string, objectData io.Reader, size int64,
|
|||
//
|
||||
// Signature mismatch occurred all temp files to be removed and all data purged.
|
||||
CleanupWritersOnError(writers)
|
||||
return ObjectMetadata{}, probe.NewError(signV4.SigDoesNotMatch{})
|
||||
return ObjectMetadata{}, probe.NewError(SignDoesNotMatch{})
|
||||
}
|
||||
}
|
||||
objMetadata.MD5Sum = hex.EncodeToString(dataMD5sum)
|
||||
|
|
|
@ -18,6 +18,13 @@ package xl
|
|||
|
||||
import "fmt"
|
||||
|
||||
// SignDoesNotMatch - signature does not match.
|
||||
type SignDoesNotMatch struct{}
|
||||
|
||||
func (e SignDoesNotMatch) Error() string {
|
||||
return "Signature does not match."
|
||||
}
|
||||
|
||||
// InvalidArgument invalid argument
|
||||
type InvalidArgument struct{}
|
||||
|
||||
|
|
|
@ -226,7 +226,7 @@ func (xl API) createObjectPart(bucket, key, uploadID string, partID int, content
|
|||
return "", err.Trace()
|
||||
}
|
||||
if !ok {
|
||||
return "", probe.NewError(signV4.SigDoesNotMatch{})
|
||||
return "", probe.NewError(SignDoesNotMatch{})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -342,7 +342,7 @@ func (xl API) completeMultipartUploadV2(bucket, key, uploadID string, data io.Re
|
|||
return nil, err.Trace()
|
||||
}
|
||||
if !ok {
|
||||
return nil, probe.NewError(signV4.SigDoesNotMatch{})
|
||||
return nil, probe.NewError(SignDoesNotMatch{})
|
||||
}
|
||||
}
|
||||
parts := &CompleteMultipartUpload{}
|
||||
|
|
|
@ -376,7 +376,7 @@ func (xl API) completeMultipartUpload(bucket, object, uploadID string, data io.R
|
|||
return ObjectMetadata{}, err.Trace()
|
||||
}
|
||||
if !ok {
|
||||
return ObjectMetadata{}, probe.NewError(signV4.SigDoesNotMatch{})
|
||||
return ObjectMetadata{}, probe.NewError(SignDoesNotMatch{})
|
||||
}
|
||||
}
|
||||
parts := &CompleteMultipartUpload{}
|
||||
|
|
|
@ -392,7 +392,7 @@ func (xl API) createObject(bucket, key, contentType, expectedMD5Sum string, size
|
|||
if !ok {
|
||||
// Delete perhaps the object is already saved, due to the nature of append()
|
||||
xl.objects.Delete(objectKey)
|
||||
return ObjectMetadata{}, probe.NewError(signV4.SigDoesNotMatch{})
|
||||
return ObjectMetadata{}, probe.NewError(SignDoesNotMatch{})
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -435,7 +435,7 @@ func (xl API) MakeBucket(bucketName, acl string, location io.Reader, signature *
|
|||
return err.Trace()
|
||||
}
|
||||
if !ok {
|
||||
return probe.NewError(signV4.SigDoesNotMatch{})
|
||||
return probe.NewError(SignDoesNotMatch{})
|
||||
}
|
||||
}
|
||||
|
||||
|
|
97
routers.go
97
routers.go
|
@ -19,6 +19,7 @@ package main
|
|||
import (
|
||||
"net"
|
||||
"net/http"
|
||||
"path/filepath"
|
||||
|
||||
router "github.com/gorilla/mux"
|
||||
jsonrpc "github.com/gorilla/rpc/v2"
|
||||
|
@ -26,6 +27,7 @@ import (
|
|||
"github.com/minio/minio-go"
|
||||
"github.com/minio/minio/pkg/fs"
|
||||
"github.com/minio/minio/pkg/probe"
|
||||
signV4 "github.com/minio/minio/pkg/signature"
|
||||
)
|
||||
|
||||
// CloudStorageAPI container for S3 compatible API.
|
||||
|
@ -34,6 +36,10 @@ type CloudStorageAPI struct {
|
|||
AccessLog bool
|
||||
// Filesystem instance.
|
||||
Filesystem fs.Filesystem
|
||||
// Signature instance.
|
||||
Signature *signV4.Signature
|
||||
// Region instance.
|
||||
Region string
|
||||
}
|
||||
|
||||
// WebAPI container for Web API.
|
||||
|
@ -53,39 +59,30 @@ type WebAPI struct {
|
|||
secretAccessKey string
|
||||
}
|
||||
|
||||
func getWebAPIHandler(web *WebAPI) http.Handler {
|
||||
var handlerFns = []HandlerFunc{
|
||||
setCacheControlHandler, // Adds Cache-Control header
|
||||
setTimeValidityHandler, // Validate time.
|
||||
setJWTAuthHandler, // Authentication handler for verifying JWT's.
|
||||
setCorsHandler, // CORS added only for testing purposes.
|
||||
}
|
||||
if web.AccessLog {
|
||||
handlerFns = append(handlerFns, setAccessLogHandler)
|
||||
}
|
||||
|
||||
s := jsonrpc.NewServer()
|
||||
codec := json2.NewCodec()
|
||||
s.RegisterCodec(codec, "application/json")
|
||||
s.RegisterCodec(codec, "application/json; charset=UTF-8")
|
||||
s.RegisterService(web, "Web")
|
||||
mux := router.NewRouter()
|
||||
// Root router.
|
||||
root := mux.NewRoute().PathPrefix("/").Subrouter()
|
||||
root.Handle("/rpc", s)
|
||||
|
||||
// Enable this when we add assets.
|
||||
root.PathPrefix("/login").Handler(http.StripPrefix("/login", http.FileServer(assetFS())))
|
||||
root.Handle("/{file:.*}", http.FileServer(assetFS()))
|
||||
return registerHandlers(mux, handlerFns...)
|
||||
}
|
||||
|
||||
// registerCloudStorageAPI - register all the handlers to their respective paths
|
||||
func registerCloudStorageAPI(mux *router.Router, a CloudStorageAPI) {
|
||||
// root Router
|
||||
root := mux.NewRoute().PathPrefix("/").Subrouter()
|
||||
func registerCloudStorageAPI(mux *router.Router, a CloudStorageAPI, w *WebAPI) {
|
||||
// Minio rpc router
|
||||
minio := mux.NewRoute().PathPrefix(privateBucket).Subrouter()
|
||||
|
||||
// Initialize json rpc handlers.
|
||||
rpc := jsonrpc.NewServer()
|
||||
codec := json2.NewCodec()
|
||||
rpc.RegisterCodec(codec, "application/json")
|
||||
rpc.RegisterCodec(codec, "application/json; charset=UTF-8")
|
||||
rpc.RegisterService(w, "Web")
|
||||
|
||||
// RPC handler at URI - /minio/rpc
|
||||
minio.Path("/rpc").Handler(rpc)
|
||||
|
||||
// Web handler assets at URI - /minio/login
|
||||
minio.Path("/login").Handler(http.StripPrefix(filepath.Join(privateBucket, "login"), http.FileServer(assetFS())))
|
||||
minio.Path("/{file:.*}").Handler(http.StripPrefix(privateBucket, http.FileServer(assetFS())))
|
||||
|
||||
// API Router
|
||||
api := mux.NewRoute().PathPrefix("/").Subrouter()
|
||||
|
||||
// Bucket router
|
||||
bucket := root.PathPrefix("/{bucket}").Subrouter()
|
||||
bucket := api.PathPrefix("/{bucket}").Subrouter()
|
||||
|
||||
// Object operations
|
||||
bucket.Methods("HEAD").Path("/{object:.+}").HandlerFunc(a.HeadObjectHandler)
|
||||
|
@ -110,7 +107,7 @@ func registerCloudStorageAPI(mux *router.Router, a CloudStorageAPI) {
|
|||
bucket.Methods("DELETE").HandlerFunc(a.DeleteBucketHandler)
|
||||
|
||||
// Root operation
|
||||
root.Methods("GET").HandlerFunc(a.ListBucketsHandler)
|
||||
api.Methods("GET").HandlerFunc(a.ListBucketsHandler)
|
||||
}
|
||||
|
||||
// getNewWebAPI instantiate a new WebAPI.
|
||||
|
@ -129,7 +126,7 @@ func getNewWebAPI(conf cloudServerConfig) *WebAPI {
|
|||
client, e := minio.NewV4(net.JoinHostPort(host, port), conf.AccessKeyID, conf.SecretAccessKey, inSecure)
|
||||
fatalIf(probe.NewError(e), "Unable to initialize minio client", nil)
|
||||
|
||||
web := &WebAPI{
|
||||
w := &WebAPI{
|
||||
FSPath: conf.Path,
|
||||
AccessLog: conf.AccessLog,
|
||||
Client: client,
|
||||
|
@ -138,7 +135,7 @@ func getNewWebAPI(conf cloudServerConfig) *WebAPI {
|
|||
accessKeyID: conf.AccessKeyID,
|
||||
secretAccessKey: conf.SecretAccessKey,
|
||||
}
|
||||
return web
|
||||
return w
|
||||
}
|
||||
|
||||
// getNewCloudStorageAPI instantiate a new CloudStorageAPI.
|
||||
|
@ -146,24 +143,40 @@ func getNewCloudStorageAPI(conf cloudServerConfig) CloudStorageAPI {
|
|||
fs, err := fs.New(conf.Path, conf.MinFreeDisk)
|
||||
fatalIf(err.Trace(), "Initializing filesystem failed.", nil)
|
||||
|
||||
sign, err := signV4.New(conf.AccessKeyID, conf.SecretAccessKey, conf.Region)
|
||||
fatalIf(err.Trace(conf.AccessKeyID, conf.SecretAccessKey, conf.Region), "Initializing signature version '4' failed.", nil)
|
||||
|
||||
return CloudStorageAPI{
|
||||
Filesystem: fs,
|
||||
AccessLog: conf.AccessLog,
|
||||
Filesystem: fs,
|
||||
Signature: sign,
|
||||
Region: conf.Region,
|
||||
}
|
||||
}
|
||||
|
||||
func getCloudStorageAPIHandler(api CloudStorageAPI) http.Handler {
|
||||
func getCloudStorageAPIHandler(api CloudStorageAPI, web *WebAPI) http.Handler {
|
||||
var handlerFns = []HandlerFunc{
|
||||
// Redirect some pre-defined browser request paths to a static
|
||||
// location prefix.
|
||||
setBrowserRedirectHandler,
|
||||
// Validates if incoming request is for restricted buckets.
|
||||
setPrivateBucketHandler,
|
||||
// Adds cache control for all browser requests.
|
||||
setBrowserCacheControlHandler,
|
||||
// Validates all incoming requests to have a valid date header.
|
||||
setTimeValidityHandler,
|
||||
// CORS setting for all browser API requests.
|
||||
setCorsHandler,
|
||||
// Validates all incoming URL resources, for invalid/unsupported
|
||||
// resources client receives a HTTP error.
|
||||
setIgnoreResourcesHandler,
|
||||
setIgnoreSignatureV2RequestHandler,
|
||||
setSignatureHandler,
|
||||
}
|
||||
if api.AccessLog {
|
||||
handlerFns = append(handlerFns, setAccessLogHandler)
|
||||
// Auth handler verifies incoming authorization headers and
|
||||
// routes them accordingly. Client receives a HTTP error for
|
||||
// invalid/unsupported signatures.
|
||||
setAuthHandler,
|
||||
}
|
||||
handlerFns = append(handlerFns, setCorsHandler)
|
||||
mux := router.NewRouter()
|
||||
registerCloudStorageAPI(mux, api)
|
||||
registerCloudStorageAPI(mux, api, web)
|
||||
return registerHandlers(mux, handlerFns...)
|
||||
}
|
||||
|
|
|
@ -77,6 +77,7 @@ type cloudServerConfig struct {
|
|||
// Credentials.
|
||||
AccessKeyID string // Access key id.
|
||||
SecretAccessKey string // Secret access key.
|
||||
Region string // Region string.
|
||||
|
||||
/// FS options
|
||||
Path string // Path to export for cloud storage
|
||||
|
@ -89,45 +90,12 @@ type cloudServerConfig struct {
|
|||
KeyFile string // Domain key
|
||||
}
|
||||
|
||||
func configureWebServer(conf cloudServerConfig) (*http.Server, *probe.Error) {
|
||||
// Split the api address into host and port.
|
||||
host, port, e := net.SplitHostPort(conf.Address)
|
||||
if e != nil {
|
||||
return nil, probe.NewError(e)
|
||||
}
|
||||
webPort, e := strconv.Atoi(port)
|
||||
if e != nil {
|
||||
return nil, probe.NewError(e)
|
||||
}
|
||||
// Always choose the next port, based on the API address port.
|
||||
webPort = webPort + 1
|
||||
webAddress := net.JoinHostPort(host, strconv.Itoa(webPort))
|
||||
|
||||
// Minio server config
|
||||
webServer := &http.Server{
|
||||
Addr: webAddress,
|
||||
Handler: getWebAPIHandler(getNewWebAPI(conf)),
|
||||
MaxHeaderBytes: 1 << 20,
|
||||
}
|
||||
|
||||
if conf.TLS {
|
||||
var err error
|
||||
webServer.TLSConfig = &tls.Config{}
|
||||
webServer.TLSConfig.Certificates = make([]tls.Certificate, 1)
|
||||
webServer.TLSConfig.Certificates[0], err = tls.LoadX509KeyPair(conf.CertFile, conf.KeyFile)
|
||||
if err != nil {
|
||||
return nil, probe.NewError(err)
|
||||
}
|
||||
}
|
||||
return webServer, nil
|
||||
}
|
||||
|
||||
// configureAPIServer configure a new server instance
|
||||
func configureAPIServer(conf cloudServerConfig) (*http.Server, *probe.Error) {
|
||||
// Minio server config
|
||||
apiServer := &http.Server{
|
||||
Addr: conf.Address,
|
||||
Handler: getCloudStorageAPIHandler(getNewCloudStorageAPI(conf)),
|
||||
Handler: getCloudStorageAPIHandler(getNewCloudStorageAPI(conf), getNewWebAPI(conf)),
|
||||
MaxHeaderBytes: 1 << 20,
|
||||
}
|
||||
|
||||
|
@ -299,12 +267,17 @@ func serverMain(c *cli.Context) {
|
|||
if _, err := os.Stat(path); err != nil {
|
||||
fatalIf(probe.NewError(err), "Unable to validate the path", nil)
|
||||
}
|
||||
region := conf.Credentials.Region
|
||||
if region == "" {
|
||||
region = "us-east-1"
|
||||
}
|
||||
tls := (certFile != "" && keyFile != "")
|
||||
serverConfig := cloudServerConfig{
|
||||
Address: c.GlobalString("address"),
|
||||
AccessLog: c.GlobalBool("enable-accesslog"),
|
||||
AccessKeyID: conf.Credentials.AccessKeyID,
|
||||
SecretAccessKey: conf.Credentials.SecretAccessKey,
|
||||
Region: region,
|
||||
Path: path,
|
||||
MinFreeDisk: minFreeDisk,
|
||||
TLS: tls,
|
||||
|
@ -319,13 +292,6 @@ func serverMain(c *cli.Context) {
|
|||
Println("\nMinio Object Storage:")
|
||||
printServerMsg(apiServer)
|
||||
|
||||
// configure Web server.
|
||||
webServer, err := configureWebServer(serverConfig)
|
||||
errorIf(err.Trace(), "Failed to configure Web server.", nil)
|
||||
|
||||
Println("\nMinio Browser:")
|
||||
printServerMsg(webServer)
|
||||
|
||||
Println("\nTo configure Minio Client:")
|
||||
if runtime.GOOS == "windows" {
|
||||
Println(" Download \"mc\" from https://dl.minio.io/client/mc/release/" + runtime.GOOS + "-" + runtime.GOARCH + "/mc.exe")
|
||||
|
@ -337,6 +303,6 @@ func serverMain(c *cli.Context) {
|
|||
}
|
||||
|
||||
// Start server.
|
||||
err = minhttp.ListenAndServe(apiServer, webServer)
|
||||
err = minhttp.ListenAndServe(apiServer)
|
||||
errorIf(err.Trace(), "Failed to start the minio server.", nil)
|
||||
}
|
||||
|
|
|
@ -21,8 +21,10 @@ import (
|
|||
"crypto/md5"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"os"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
|
@ -52,6 +54,21 @@ var _ = Suite(&MyAPIFSCacheSuite{})
|
|||
|
||||
var testAPIFSCacheServer *httptest.Server
|
||||
|
||||
// Ask the kernel for a free open port.
|
||||
func getFreePort() int {
|
||||
addr, err := net.ResolveTCPAddr("tcp", "localhost:0")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
l, err := net.ListenTCP("tcp", addr)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
defer l.Close()
|
||||
return l.Addr().(*net.TCPAddr).Port
|
||||
}
|
||||
|
||||
func (s *MyAPIFSCacheSuite) SetUpSuite(c *C) {
|
||||
root, e := ioutil.TempDir(os.TempDir(), "api-")
|
||||
c.Assert(e, IsNil)
|
||||
|
@ -77,11 +94,16 @@ func (s *MyAPIFSCacheSuite) SetUpSuite(c *C) {
|
|||
c.Assert(saveConfig(conf), IsNil)
|
||||
|
||||
cloudServer := cloudServerConfig{
|
||||
Path: fsroot,
|
||||
MinFreeDisk: 0,
|
||||
Address: ":" + strconv.Itoa(getFreePort()),
|
||||
Path: fsroot,
|
||||
MinFreeDisk: 0,
|
||||
AccessKeyID: s.accessKeyID,
|
||||
SecretAccessKey: s.secretAccessKey,
|
||||
Region: "us-east-1",
|
||||
}
|
||||
cloudStorageAPI := getNewCloudStorageAPI(cloudServer)
|
||||
httpHandler := getCloudStorageAPIHandler(cloudStorageAPI)
|
||||
webAPI := getNewWebAPI(cloudServer)
|
||||
httpHandler := getCloudStorageAPIHandler(cloudStorageAPI, webAPI)
|
||||
testAPIFSCacheServer = httptest.NewServer(httpHandler)
|
||||
}
|
||||
|
||||
|
@ -225,7 +247,7 @@ func (s *MyAPIFSCacheSuite) newRequest(method, urlStr string, contentLength int6
|
|||
"aws4_request",
|
||||
}, "/")
|
||||
|
||||
stringToSign := authHeaderPrefix + "\n" + t.Format(iso8601Format) + "\n"
|
||||
stringToSign := "AWS4-HMAC-SHA256" + "\n" + t.Format(iso8601Format) + "\n"
|
||||
stringToSign = stringToSign + scope + "\n"
|
||||
stringToSign = stringToSign + hex.EncodeToString(sum256([]byte(canonicalRequest)))
|
||||
|
||||
|
@ -238,7 +260,7 @@ func (s *MyAPIFSCacheSuite) newRequest(method, urlStr string, contentLength int6
|
|||
|
||||
// final Authorization header
|
||||
parts := []string{
|
||||
authHeaderPrefix + " Credential=" + s.accessKeyID + "/" + scope,
|
||||
"AWS4-HMAC-SHA256" + " Credential=" + s.accessKeyID + "/" + scope,
|
||||
"SignedHeaders=" + signedHeaders,
|
||||
"Signature=" + signature,
|
||||
}
|
||||
|
|
|
@ -1,144 +0,0 @@
|
|||
/*
|
||||
* Minio Cloud Storage, (C) 2015 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/minio/minio/pkg/crypto/sha256"
|
||||
"github.com/minio/minio/pkg/probe"
|
||||
v4 "github.com/minio/minio/pkg/signature"
|
||||
)
|
||||
|
||||
type signatureHandler struct {
|
||||
handler http.Handler
|
||||
}
|
||||
|
||||
// setSignatureHandler to validate authorization header for the incoming request.
|
||||
func setSignatureHandler(h http.Handler) http.Handler {
|
||||
return signatureHandler{h}
|
||||
}
|
||||
|
||||
func isRequestSignatureV4(req *http.Request) bool {
|
||||
if _, ok := req.Header["Authorization"]; ok {
|
||||
if strings.HasPrefix(req.Header.Get("Authorization"), authHeaderPrefix) {
|
||||
return ok
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func isRequestRequiresACLCheck(req *http.Request) bool {
|
||||
if isRequestSignatureV4(req) || isRequestPresignedSignatureV4(req) || isRequestPostPolicySignatureV4(req) {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func isRequestPresignedSignatureV4(req *http.Request) bool {
|
||||
if _, ok := req.URL.Query()["X-Amz-Credential"]; ok {
|
||||
return ok
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func isRequestPostPolicySignatureV4(req *http.Request) bool {
|
||||
if _, ok := req.Header["Content-Type"]; ok {
|
||||
if strings.Contains(req.Header.Get("Content-Type"), "multipart/form-data") {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (s signatureHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
if isRequestPostPolicySignatureV4(r) && r.Method == "POST" {
|
||||
s.handler.ServeHTTP(w, r)
|
||||
return
|
||||
}
|
||||
var signature *v4.Signature
|
||||
if isRequestSignatureV4(r) {
|
||||
// For PUT and POST requests with payload, send the call upwards for verification.
|
||||
// Or PUT and POST requests without payload, verify here.
|
||||
if (r.Body == nil && (r.Method == "PUT" || r.Method == "POST")) || (r.Method != "PUT" && r.Method != "POST") {
|
||||
// Init signature V4 verification
|
||||
var err *probe.Error
|
||||
signature, err = initSignatureV4(r)
|
||||
if err != nil {
|
||||
switch err.ToGoError() {
|
||||
case errInvalidRegion:
|
||||
errorIf(err.Trace(), "Unknown region in authorization header.", nil)
|
||||
writeErrorResponse(w, r, AuthorizationHeaderMalformed, r.URL.Path)
|
||||
return
|
||||
case errAccessKeyIDInvalid:
|
||||
errorIf(err.Trace(), "Invalid access key id.", nil)
|
||||
writeErrorResponse(w, r, InvalidAccessKeyID, r.URL.Path)
|
||||
return
|
||||
default:
|
||||
errorIf(err.Trace(), "Initializing signature v4 failed.", nil)
|
||||
writeErrorResponse(w, r, InternalError, r.URL.Path)
|
||||
return
|
||||
}
|
||||
}
|
||||
dummySha256Bytes := sha256.Sum256([]byte(""))
|
||||
ok, err := signature.DoesSignatureMatch(hex.EncodeToString(dummySha256Bytes[:]))
|
||||
if err != nil {
|
||||
errorIf(err.Trace(), "Unable to verify signature.", nil)
|
||||
writeErrorResponse(w, r, InternalError, r.URL.Path)
|
||||
return
|
||||
}
|
||||
if !ok {
|
||||
writeErrorResponse(w, r, SignatureDoesNotMatch, r.URL.Path)
|
||||
return
|
||||
}
|
||||
}
|
||||
s.handler.ServeHTTP(w, r)
|
||||
return
|
||||
}
|
||||
if isRequestPresignedSignatureV4(r) {
|
||||
var err *probe.Error
|
||||
signature, err = initPresignedSignatureV4(r)
|
||||
if err != nil {
|
||||
switch err.ToGoError() {
|
||||
case errAccessKeyIDInvalid:
|
||||
errorIf(err.Trace(), "Invalid access key id requested.", nil)
|
||||
writeErrorResponse(w, r, InvalidAccessKeyID, r.URL.Path)
|
||||
return
|
||||
default:
|
||||
errorIf(err.Trace(), "Initializing signature v4 failed.", nil)
|
||||
writeErrorResponse(w, r, InternalError, r.URL.Path)
|
||||
return
|
||||
}
|
||||
}
|
||||
ok, err := signature.DoesPresignedSignatureMatch()
|
||||
if err != nil {
|
||||
errorIf(err.Trace(), "Unable to verify signature.", nil)
|
||||
writeErrorResponse(w, r, InternalError, r.URL.Path)
|
||||
return
|
||||
}
|
||||
if !ok {
|
||||
writeErrorResponse(w, r, SignatureDoesNotMatch, r.URL.Path)
|
||||
return
|
||||
}
|
||||
s.handler.ServeHTTP(w, r)
|
||||
return
|
||||
}
|
||||
// call goes up from here, let ACL's verify the validity of the request
|
||||
s.handler.ServeHTTP(w, r)
|
||||
}
|
|
@ -0,0 +1,72 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
signV4 "github.com/minio/minio/pkg/signature"
|
||||
)
|
||||
|
||||
func isRequestJWT(r *http.Request) bool {
|
||||
if _, ok := r.Header["Authorization"]; ok {
|
||||
if strings.HasPrefix(r.Header.Get("Authorization"), jwtAlgorithm) {
|
||||
return ok
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func isRequestSignatureV4(r *http.Request) bool {
|
||||
if _, ok := r.Header["Authorization"]; ok {
|
||||
if strings.HasPrefix(r.Header.Get("Authorization"), signV4Algorithm) {
|
||||
return ok
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func isRequestPresignedSignatureV4(r *http.Request) bool {
|
||||
if _, ok := r.URL.Query()["X-Amz-Credential"]; ok {
|
||||
return ok
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func isRequestPostPolicySignatureV4(r *http.Request) bool {
|
||||
if _, ok := r.Header["Content-Type"]; ok {
|
||||
if strings.Contains(r.Header.Get("Content-Type"), "multipart/form-data") {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func isRequestRequiresACLCheck(r *http.Request) bool {
|
||||
if isRequestSignatureV4(r) || isRequestPresignedSignatureV4(r) || isRequestPostPolicySignatureV4(r) {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func isSignV4ReqAuthenticated(sign *signV4.Signature, r *http.Request) bool {
|
||||
auth := sign.SetHTTPRequestToVerify(r)
|
||||
if isRequestSignatureV4(r) {
|
||||
dummyPayload := sha256.Sum256([]byte(""))
|
||||
ok, err := auth.DoesSignatureMatch(hex.EncodeToString(dummyPayload[:]))
|
||||
if err != nil {
|
||||
errorIf(err.Trace(), "Signature verification failed.", nil)
|
||||
return false
|
||||
}
|
||||
return ok
|
||||
} else if isRequestPresignedSignatureV4(r) {
|
||||
ok, err := auth.DoesPresignedSignatureMatch()
|
||||
if err != nil {
|
||||
errorIf(err.Trace(), "Presigned signature verification failed.", nil)
|
||||
return false
|
||||
}
|
||||
return ok
|
||||
}
|
||||
return false
|
||||
}
|
|
@ -36,9 +36,9 @@ import (
|
|||
"github.com/minio/minio/pkg/probe"
|
||||
)
|
||||
|
||||
// isAuthenticated validates if any incoming request to be a valid JWT
|
||||
// isJWTReqAuthencatied validates if any incoming request to be a valid JWT
|
||||
// authenticated request.
|
||||
func isAuthenticated(req *http.Request) bool {
|
||||
func isJWTReqAuthencatied(req *http.Request) bool {
|
||||
jwt := InitJWT()
|
||||
tokenRequest, e := jwtgo.ParseFromRequest(req, func(token *jwtgo.Token) (interface{}, error) {
|
||||
if _, ok := token.Method.(*jwtgo.SigningMethodHMAC); !ok {
|
||||
|
@ -60,7 +60,7 @@ func (web WebAPI) GetUIVersion(r *http.Request, args *GenericArgs, reply *Generi
|
|||
|
||||
// ServerInfo - get server info.
|
||||
func (web *WebAPI) ServerInfo(r *http.Request, args *ServerInfoArgs, reply *ServerInfoRep) error {
|
||||
if !isAuthenticated(r) {
|
||||
if !isJWTReqAuthencatied(r) {
|
||||
return &json2.Error{Message: "Unauthorized request"}
|
||||
}
|
||||
host, err := os.Hostname()
|
||||
|
@ -89,7 +89,7 @@ func (web *WebAPI) ServerInfo(r *http.Request, args *ServerInfoArgs, reply *Serv
|
|||
|
||||
// DiskInfo - get disk statistics.
|
||||
func (web *WebAPI) DiskInfo(r *http.Request, args *DiskInfoArgs, reply *DiskInfoRep) error {
|
||||
if !isAuthenticated(r) {
|
||||
if !isJWTReqAuthencatied(r) {
|
||||
return &json2.Error{Message: "Unauthorized request"}
|
||||
}
|
||||
info, e := disk.GetInfo(web.FSPath)
|
||||
|
@ -103,7 +103,7 @@ func (web *WebAPI) DiskInfo(r *http.Request, args *DiskInfoArgs, reply *DiskInfo
|
|||
|
||||
// MakeBucket - make a bucket.
|
||||
func (web *WebAPI) MakeBucket(r *http.Request, args *MakeBucketArgs, reply *GenericRep) error {
|
||||
if !isAuthenticated(r) {
|
||||
if !isJWTReqAuthencatied(r) {
|
||||
return &json2.Error{Message: "Unauthorized request"}
|
||||
}
|
||||
reply.UIVersion = uiVersion
|
||||
|
@ -116,7 +116,7 @@ func (web *WebAPI) MakeBucket(r *http.Request, args *MakeBucketArgs, reply *Gene
|
|||
|
||||
// ListBuckets - list buckets api.
|
||||
func (web *WebAPI) ListBuckets(r *http.Request, args *ListBucketsArgs, reply *ListBucketsRep) error {
|
||||
if !isAuthenticated(r) {
|
||||
if !isJWTReqAuthencatied(r) {
|
||||
return &json2.Error{Message: "Unauthorized request"}
|
||||
}
|
||||
buckets, e := web.Client.ListBuckets()
|
||||
|
@ -135,7 +135,7 @@ func (web *WebAPI) ListBuckets(r *http.Request, args *ListBucketsArgs, reply *Li
|
|||
|
||||
// ListObjects - list objects api.
|
||||
func (web *WebAPI) ListObjects(r *http.Request, args *ListObjectsArgs, reply *ListObjectsRep) error {
|
||||
if !isAuthenticated(r) {
|
||||
if !isJWTReqAuthencatied(r) {
|
||||
return &json2.Error{Message: "Unauthorized request"}
|
||||
}
|
||||
doneCh := make(chan struct{})
|
||||
|
@ -183,7 +183,7 @@ func getTargetHost(apiAddress, targetHost string) (string, *probe.Error) {
|
|||
|
||||
// PutObjectURL - generates url for upload access.
|
||||
func (web *WebAPI) PutObjectURL(r *http.Request, args *PutObjectURLArgs, reply *PutObjectURLRep) error {
|
||||
if !isAuthenticated(r) {
|
||||
if !isJWTReqAuthencatied(r) {
|
||||
return &json2.Error{Message: "Unauthorized request"}
|
||||
}
|
||||
targetHost, err := getTargetHost(web.apiAddress, args.TargetHost)
|
||||
|
@ -205,7 +205,7 @@ func (web *WebAPI) PutObjectURL(r *http.Request, args *PutObjectURLArgs, reply *
|
|||
|
||||
// GetObjectURL - generates url for download access.
|
||||
func (web *WebAPI) GetObjectURL(r *http.Request, args *GetObjectURLArgs, reply *GetObjectURLRep) error {
|
||||
if !isAuthenticated(r) {
|
||||
if !isJWTReqAuthencatied(r) {
|
||||
return &json2.Error{Message: "Unauthorized request"}
|
||||
}
|
||||
|
||||
|
@ -237,7 +237,7 @@ func (web *WebAPI) GetObjectURL(r *http.Request, args *GetObjectURLArgs, reply *
|
|||
|
||||
// RemoveObject - removes an object.
|
||||
func (web *WebAPI) RemoveObject(r *http.Request, args *RemoveObjectArgs, reply *GenericRep) error {
|
||||
if !isAuthenticated(r) {
|
||||
if !isJWTReqAuthencatied(r) {
|
||||
return &json2.Error{Message: "Unauthorized request"}
|
||||
}
|
||||
reply.UIVersion = uiVersion
|
||||
|
|
Loading…
Reference in New Issue