Implement presigned policy

This commit is contained in:
Harshavardhana
2015-10-01 23:51:17 -07:00
parent 09dc360e06
commit c8de5bad2f
18 changed files with 560 additions and 240 deletions

View File

@@ -35,6 +35,7 @@ import (
"github.com/minio/minio/pkg/crypto/sha512"
"github.com/minio/minio/pkg/donut/disk"
"github.com/minio/minio/pkg/probe"
signv4 "github.com/minio/minio/pkg/signature"
)
const (
@@ -235,7 +236,7 @@ func (b bucket) ReadObject(objectName string) (reader io.ReadCloser, size int64,
}
// WriteObject - write a new object into bucket
func (b bucket) WriteObject(objectName string, objectData io.Reader, size int64, expectedMD5Sum string, metadata map[string]string, signature *Signature) (ObjectMetadata, *probe.Error) {
func (b bucket) WriteObject(objectName string, objectData io.Reader, size int64, expectedMD5Sum string, metadata map[string]string, signature *signv4.Signature) (ObjectMetadata, *probe.Error) {
b.lock.Lock()
defer b.lock.Unlock()
if objectName == "" || objectData == nil {
@@ -306,7 +307,7 @@ func (b bucket) WriteObject(objectName string, objectData io.Reader, size int64,
//
// Signature mismatch occurred all temp files to be removed and all data purged.
CleanupWritersOnError(writers)
return ObjectMetadata{}, probe.NewError(SignatureDoesNotMatch{})
return ObjectMetadata{}, probe.NewError(signv4.DoesNotMatch{})
}
}
objMetadata.MD5Sum = hex.EncodeToString(dataMD5sum)
@@ -429,27 +430,25 @@ func (b bucket) getDataAndParity(totalWriters int) (k uint8, m uint8, err *probe
}
// writeObjectData -
func (b bucket) writeObjectData(k, m uint8, writers []io.WriteCloser, objectData io.Reader, size int64, writer io.Writer) (int, int, *probe.Error) {
func (b bucket) writeObjectData(k, m uint8, writers []io.WriteCloser, objectData io.Reader, size int64, hashWriter io.Writer) (int, int, *probe.Error) {
encoder, err := newEncoder(k, m, "Cauchy")
chunkSize := int64(10 * 1024 * 1024)
if err != nil {
return 0, 0, err.Trace()
}
chunkSize := int64(10 * 1024 * 1024)
chunkCount := 0
totalLength := 0
remaining := size
for remaining > 0 {
readSize := chunkSize
if remaining < chunkSize {
readSize = remaining
}
remaining = remaining - readSize
totalLength = totalLength + int(readSize)
encodedBlocks, inputData, err := encoder.EncodeStream(objectData, readSize)
var e error
for e == nil {
var length int
inputData := make([]byte, chunkSize)
length, e = objectData.Read(inputData)
encodedBlocks, err := encoder.Encode(inputData)
if err != nil {
return 0, 0, err.Trace()
}
if _, err := writer.Write(inputData); err != nil {
if _, err := hashWriter.Write(inputData[0:length]); err != nil {
return 0, 0, probe.NewError(err)
}
for blockIndex, block := range encodedBlocks {
@@ -464,8 +463,12 @@ func (b bucket) writeObjectData(k, m uint8, writers []io.WriteCloser, objectData
return 0, 0, probe.NewError(err)
}
}
totalLength += length
chunkCount = chunkCount + 1
}
if e != io.EOF {
return 0, 0, probe.NewError(e)
}
return chunkCount, totalLength, nil
}

View File

@@ -36,6 +36,7 @@ import (
"github.com/minio/minio/pkg/crypto/sha512"
"github.com/minio/minio/pkg/donut/disk"
"github.com/minio/minio/pkg/probe"
signv4 "github.com/minio/minio/pkg/signature"
)
// config files used inside Donut
@@ -127,7 +128,7 @@ func (donut API) listObjects(bucket, prefix, marker, delimiter string, maxkeys i
}
// putObject - put object
func (donut API) putObject(bucket, object, expectedMD5Sum string, reader io.Reader, size int64, metadata map[string]string, signature *Signature) (ObjectMetadata, *probe.Error) {
func (donut API) putObject(bucket, object, expectedMD5Sum string, reader io.Reader, size int64, metadata map[string]string, signature *signv4.Signature) (ObjectMetadata, *probe.Error) {
if bucket == "" || strings.TrimSpace(bucket) == "" {
return ObjectMetadata{}, probe.NewError(InvalidArgument{})
}
@@ -159,7 +160,7 @@ func (donut API) putObject(bucket, object, expectedMD5Sum string, reader io.Read
}
// putObject - put object
func (donut API) putObjectPart(bucket, object, expectedMD5Sum, uploadID string, partID int, reader io.Reader, size int64, metadata map[string]string, signature *Signature) (PartMetadata, *probe.Error) {
func (donut API) putObjectPart(bucket, object, expectedMD5Sum, uploadID string, partID int, reader io.Reader, size int64, metadata map[string]string, signature *signv4.Signature) (PartMetadata, *probe.Error) {
if bucket == "" || strings.TrimSpace(bucket) == "" {
return PartMetadata{}, probe.NewError(InvalidArgument{})
}
@@ -336,7 +337,7 @@ func (donut API) listObjectParts(bucket, object string, resources ObjectResource
}
// completeMultipartUpload complete an incomplete multipart upload
func (donut API) completeMultipartUpload(bucket, object, uploadID string, data io.Reader, signature *Signature) (ObjectMetadata, *probe.Error) {
func (donut API) completeMultipartUpload(bucket, object, uploadID string, data io.Reader, signature *signv4.Signature) (ObjectMetadata, *probe.Error) {
if bucket == "" || strings.TrimSpace(bucket) == "" {
return ObjectMetadata{}, probe.NewError(InvalidArgument{})
}
@@ -374,7 +375,7 @@ func (donut API) completeMultipartUpload(bucket, object, uploadID string, data i
return ObjectMetadata{}, err.Trace()
}
if !ok {
return ObjectMetadata{}, probe.NewError(SignatureDoesNotMatch{})
return ObjectMetadata{}, probe.NewError(signv4.DoesNotMatch{})
}
}
parts := &CompleteMultipartUpload{}

View File

@@ -36,6 +36,7 @@ import (
"github.com/minio/minio/pkg/donut/cache/metadata"
"github.com/minio/minio/pkg/probe"
"github.com/minio/minio/pkg/quick"
signv4 "github.com/minio/minio/pkg/signature"
)
// total Number of buckets allowed
@@ -204,7 +205,7 @@ func (donut API) GetObject(w io.Writer, bucket string, object string, start, len
}
// GetBucketMetadata -
func (donut API) GetBucketMetadata(bucket string, signature *Signature) (BucketMetadata, *probe.Error) {
func (donut API) GetBucketMetadata(bucket string, signature *signv4.Signature) (BucketMetadata, *probe.Error) {
donut.lock.Lock()
defer donut.lock.Unlock()
@@ -214,7 +215,7 @@ func (donut API) GetBucketMetadata(bucket string, signature *Signature) (BucketM
return BucketMetadata{}, err.Trace()
}
if !ok {
return BucketMetadata{}, probe.NewError(SignatureDoesNotMatch{})
return BucketMetadata{}, probe.NewError(signv4.DoesNotMatch{})
}
}
@@ -237,7 +238,7 @@ func (donut API) GetBucketMetadata(bucket string, signature *Signature) (BucketM
}
// SetBucketMetadata -
func (donut API) SetBucketMetadata(bucket string, metadata map[string]string, signature *Signature) *probe.Error {
func (donut API) SetBucketMetadata(bucket string, metadata map[string]string, signature *signv4.Signature) *probe.Error {
donut.lock.Lock()
defer donut.lock.Unlock()
@@ -247,7 +248,7 @@ func (donut API) SetBucketMetadata(bucket string, metadata map[string]string, si
return err.Trace()
}
if !ok {
return probe.NewError(SignatureDoesNotMatch{})
return probe.NewError(signv4.DoesNotMatch{})
}
}
@@ -288,7 +289,7 @@ func isMD5SumEqual(expectedMD5Sum, actualMD5Sum string) *probe.Error {
}
// CreateObject - create an object
func (donut API) CreateObject(bucket, key, expectedMD5Sum string, size int64, data io.Reader, metadata map[string]string, signature *Signature) (ObjectMetadata, *probe.Error) {
func (donut API) CreateObject(bucket, key, expectedMD5Sum string, size int64, data io.Reader, metadata map[string]string, signature *signv4.Signature) (ObjectMetadata, *probe.Error) {
donut.lock.Lock()
defer donut.lock.Unlock()
@@ -301,7 +302,7 @@ func (donut API) CreateObject(bucket, key, expectedMD5Sum string, size int64, da
}
// createObject - PUT object to cache buffer
func (donut API) createObject(bucket, key, contentType, expectedMD5Sum string, size int64, data io.Reader, signature *Signature) (ObjectMetadata, *probe.Error) {
func (donut API) createObject(bucket, key, contentType, expectedMD5Sum string, size int64, data io.Reader, signature *signv4.Signature) (ObjectMetadata, *probe.Error) {
if len(donut.config.NodeDiskMap) == 0 {
if size > int64(donut.config.MaxSize) {
generic := GenericObjectError{Bucket: bucket, Object: key}
@@ -381,10 +382,12 @@ func (donut API) createObject(bucket, key, contentType, expectedMD5Sum string, s
totalLength += int64(length)
go debug.FreeOSMemory()
}
if totalLength != size {
// Delete perhaps the object is already saved, due to the nature of append()
donut.objects.Delete(objectKey)
return ObjectMetadata{}, probe.NewError(IncompleteBody{Bucket: bucket, Object: key})
if size != 0 {
if totalLength != size {
// Delete perhaps the object is already saved, due to the nature of append()
donut.objects.Delete(objectKey)
return ObjectMetadata{}, probe.NewError(IncompleteBody{Bucket: bucket, Object: key})
}
}
if err != io.EOF {
return ObjectMetadata{}, probe.NewError(err)
@@ -403,7 +406,7 @@ func (donut API) createObject(bucket, key, contentType, expectedMD5Sum string, s
return ObjectMetadata{}, err.Trace()
}
if !ok {
return ObjectMetadata{}, probe.NewError(SignatureDoesNotMatch{})
return ObjectMetadata{}, probe.NewError(signv4.DoesNotMatch{})
}
}
@@ -425,7 +428,7 @@ func (donut API) createObject(bucket, key, contentType, expectedMD5Sum string, s
}
// MakeBucket - create bucket in cache
func (donut API) MakeBucket(bucketName, acl string, location io.Reader, signature *Signature) *probe.Error {
func (donut API) MakeBucket(bucketName, acl string, location io.Reader, signature *signv4.Signature) *probe.Error {
donut.lock.Lock()
defer donut.lock.Unlock()
@@ -445,7 +448,7 @@ func (donut API) MakeBucket(bucketName, acl string, location io.Reader, signatur
return err.Trace()
}
if !ok {
return probe.NewError(SignatureDoesNotMatch{})
return probe.NewError(signv4.DoesNotMatch{})
}
}
@@ -484,7 +487,7 @@ func (donut API) MakeBucket(bucketName, acl string, location io.Reader, signatur
}
// ListObjects - list objects from cache
func (donut API) ListObjects(bucket string, resources BucketResourcesMetadata, signature *Signature) ([]ObjectMetadata, BucketResourcesMetadata, *probe.Error) {
func (donut API) ListObjects(bucket string, resources BucketResourcesMetadata, signature *signv4.Signature) ([]ObjectMetadata, BucketResourcesMetadata, *probe.Error) {
donut.lock.Lock()
defer donut.lock.Unlock()
@@ -494,7 +497,7 @@ func (donut API) ListObjects(bucket string, resources BucketResourcesMetadata, s
return nil, BucketResourcesMetadata{}, err.Trace()
}
if !ok {
return nil, BucketResourcesMetadata{}, probe.NewError(SignatureDoesNotMatch{})
return nil, BucketResourcesMetadata{}, probe.NewError(signv4.DoesNotMatch{})
}
}
@@ -587,7 +590,7 @@ func (b byBucketName) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
func (b byBucketName) Less(i, j int) bool { return b[i].Name < b[j].Name }
// ListBuckets - List buckets from cache
func (donut API) ListBuckets(signature *Signature) ([]BucketMetadata, *probe.Error) {
func (donut API) ListBuckets(signature *signv4.Signature) ([]BucketMetadata, *probe.Error) {
donut.lock.Lock()
defer donut.lock.Unlock()
@@ -597,7 +600,7 @@ func (donut API) ListBuckets(signature *Signature) ([]BucketMetadata, *probe.Err
return nil, err.Trace()
}
if !ok {
return nil, probe.NewError(SignatureDoesNotMatch{})
return nil, probe.NewError(signv4.DoesNotMatch{})
}
}
@@ -621,7 +624,7 @@ func (donut API) ListBuckets(signature *Signature) ([]BucketMetadata, *probe.Err
}
// GetObjectMetadata - get object metadata from cache
func (donut API) GetObjectMetadata(bucket, key string, signature *Signature) (ObjectMetadata, *probe.Error) {
func (donut API) GetObjectMetadata(bucket, key string, signature *signv4.Signature) (ObjectMetadata, *probe.Error) {
donut.lock.Lock()
defer donut.lock.Unlock()
@@ -632,7 +635,7 @@ func (donut API) GetObjectMetadata(bucket, key string, signature *Signature) (Ob
return ObjectMetadata{}, err.Trace()
}
if !ok {
return ObjectMetadata{}, probe.NewError(SignatureDoesNotMatch{})
return ObjectMetadata{}, probe.NewError(signv4.DoesNotMatch{})
}
} else {
ok, err := signature.DoesSignatureMatch("e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855")
@@ -640,7 +643,7 @@ func (donut API) GetObjectMetadata(bucket, key string, signature *Signature) (Ob
return ObjectMetadata{}, err.Trace()
}
if !ok {
return ObjectMetadata{}, probe.NewError(SignatureDoesNotMatch{})
return ObjectMetadata{}, probe.NewError(signv4.DoesNotMatch{})
}
}
}

View File

@@ -17,8 +17,6 @@
package donut
import (
"io"
encoding "github.com/minio/minio/pkg/erasure"
"github.com/minio/minio/pkg/probe"
)
@@ -83,14 +81,6 @@ func (e encoder) Encode(data []byte) ([][]byte, *probe.Error) {
return encodedData, nil
}
func (e encoder) EncodeStream(data io.Reader, size int64) ([][]byte, []byte, *probe.Error) {
encodedData, inputData, err := e.encoder.EncodeStream(data, size)
if err != nil {
return nil, nil, probe.NewError(err)
}
return encodedData, inputData, nil
}
// Decode - erasure decode input encoded bytes
func (e encoder) Decode(encodedData [][]byte, dataLength int) ([]byte, *probe.Error) {
decodedData, err := e.encoder.Decode(encodedData, dataLength)

View File

@@ -125,6 +125,13 @@ func (e ChecksumMismatch) Error() string {
return "Checksum mismatch"
}
// MissingPOSTPolicy missing post policy
type MissingPOSTPolicy struct{}
func (e MissingPOSTPolicy) Error() string {
return "Missing POST policy in multipart form"
}
// MissingErasureTechnique missing erasure technique
type MissingErasureTechnique struct{}
@@ -318,37 +325,6 @@ func (e InvalidUploadID) Error() string {
return "Invalid upload id " + e.UploadID
}
// SignatureDoesNotMatch invalid signature
type SignatureDoesNotMatch struct {
SignatureSent string
SignatureCalculated string
}
func (e SignatureDoesNotMatch) Error() string {
return "The request signature we calculated does not match the signature you provided"
}
// ExpiredPresignedRequest request already expired
type ExpiredPresignedRequest struct{}
func (e ExpiredPresignedRequest) Error() string {
return "Presigned request already expired"
}
// MissingExpiresQuery expires query string missing
type MissingExpiresQuery struct{}
func (e MissingExpiresQuery) Error() string {
return "Missing expires query string"
}
// MissingDateHeader date header missing
type MissingDateHeader struct{}
func (e MissingDateHeader) Error() string {
return "Missing date header"
}
// InvalidPart One or more of the specified parts could not be found
type InvalidPart struct{}

View File

@@ -20,6 +20,7 @@ import (
"io"
"github.com/minio/minio/pkg/probe"
signv4 "github.com/minio/minio/pkg/signature"
)
// Collection of Donut specification interfaces
@@ -33,31 +34,31 @@ type Interface interface {
// CloudStorage is a donut cloud storage interface
type CloudStorage interface {
// Storage service operations
GetBucketMetadata(bucket string, signature *Signature) (BucketMetadata, *probe.Error)
SetBucketMetadata(bucket string, metadata map[string]string, signature *Signature) *probe.Error
ListBuckets(signature *Signature) ([]BucketMetadata, *probe.Error)
MakeBucket(bucket string, ACL string, location io.Reader, signature *Signature) *probe.Error
GetBucketMetadata(bucket string, signature *signv4.Signature) (BucketMetadata, *probe.Error)
SetBucketMetadata(bucket string, metadata map[string]string, signature *signv4.Signature) *probe.Error
ListBuckets(signature *signv4.Signature) ([]BucketMetadata, *probe.Error)
MakeBucket(bucket string, ACL string, location io.Reader, signature *signv4.Signature) *probe.Error
// Bucket operations
ListObjects(string, BucketResourcesMetadata, *Signature) ([]ObjectMetadata, BucketResourcesMetadata, *probe.Error)
ListObjects(string, BucketResourcesMetadata, *signv4.Signature) ([]ObjectMetadata, BucketResourcesMetadata, *probe.Error)
// Object operations
GetObject(w io.Writer, bucket, object string, start, length int64) (int64, *probe.Error)
GetObjectMetadata(bucket, object string, signature *Signature) (ObjectMetadata, *probe.Error)
GetObjectMetadata(bucket, object string, signature *signv4.Signature) (ObjectMetadata, *probe.Error)
// bucket, object, expectedMD5Sum, size, reader, metadata, signature
CreateObject(string, string, string, int64, io.Reader, map[string]string, *Signature) (ObjectMetadata, *probe.Error)
CreateObject(string, string, string, int64, io.Reader, map[string]string, *signv4.Signature) (ObjectMetadata, *probe.Error)
Multipart
}
// Multipart API
type Multipart interface {
NewMultipartUpload(bucket, key, contentType string, signature *Signature) (string, *probe.Error)
AbortMultipartUpload(bucket, key, uploadID string, signature *Signature) *probe.Error
CreateObjectPart(string, string, string, int, string, string, int64, io.Reader, *Signature) (string, *probe.Error)
CompleteMultipartUpload(bucket, key, uploadID string, data io.Reader, signature *Signature) (ObjectMetadata, *probe.Error)
ListMultipartUploads(string, BucketMultipartResourcesMetadata, *Signature) (BucketMultipartResourcesMetadata, *probe.Error)
ListObjectParts(string, string, ObjectResourcesMetadata, *Signature) (ObjectResourcesMetadata, *probe.Error)
NewMultipartUpload(bucket, key, contentType string, signature *signv4.Signature) (string, *probe.Error)
AbortMultipartUpload(bucket, key, uploadID string, signature *signv4.Signature) *probe.Error
CreateObjectPart(string, string, string, int, string, string, int64, io.Reader, *signv4.Signature) (string, *probe.Error)
CompleteMultipartUpload(bucket, key, uploadID string, data io.Reader, signature *signv4.Signature) (ObjectMetadata, *probe.Error)
ListMultipartUploads(string, BucketMultipartResourcesMetadata, *signv4.Signature) (BucketMultipartResourcesMetadata, *probe.Error)
ListObjectParts(string, string, ObjectResourcesMetadata, *signv4.Signature) (ObjectResourcesMetadata, *probe.Error)
}
// Management is a donut management system interface

View File

@@ -35,12 +35,13 @@ import (
"github.com/minio/minio/pkg/crypto/sha256"
"github.com/minio/minio/pkg/donut/cache/data"
"github.com/minio/minio/pkg/probe"
signv4 "github.com/minio/minio/pkg/signature"
)
/// V2 API functions
// NewMultipartUpload - initiate a new multipart session
func (donut API) NewMultipartUpload(bucket, key, contentType string, signature *Signature) (string, *probe.Error) {
func (donut API) NewMultipartUpload(bucket, key, contentType string, signature *signv4.Signature) (string, *probe.Error) {
donut.lock.Lock()
defer donut.lock.Unlock()
@@ -56,7 +57,7 @@ func (donut API) NewMultipartUpload(bucket, key, contentType string, signature *
return "", err.Trace()
}
if !ok {
return "", probe.NewError(SignatureDoesNotMatch{})
return "", probe.NewError(signv4.DoesNotMatch{})
}
}
// if len(donut.config.NodeDiskMap) > 0 {
@@ -88,7 +89,7 @@ func (donut API) NewMultipartUpload(bucket, key, contentType string, signature *
}
// AbortMultipartUpload - abort an incomplete multipart session
func (donut API) AbortMultipartUpload(bucket, key, uploadID string, signature *Signature) *probe.Error {
func (donut API) AbortMultipartUpload(bucket, key, uploadID string, signature *signv4.Signature) *probe.Error {
donut.lock.Lock()
defer donut.lock.Unlock()
@@ -104,7 +105,7 @@ func (donut API) AbortMultipartUpload(bucket, key, uploadID string, signature *S
return err.Trace()
}
if !ok {
return probe.NewError(SignatureDoesNotMatch{})
return probe.NewError(signv4.DoesNotMatch{})
}
}
// TODO: multipart support for donut is broken, since we haven't finalized the format in which
@@ -125,7 +126,7 @@ func (donut API) AbortMultipartUpload(bucket, key, uploadID string, signature *S
}
// CreateObjectPart - create a part in a multipart session
func (donut API) CreateObjectPart(bucket, key, uploadID string, partID int, contentType, expectedMD5Sum string, size int64, data io.Reader, signature *Signature) (string, *probe.Error) {
func (donut API) CreateObjectPart(bucket, key, uploadID string, partID int, contentType, expectedMD5Sum string, size int64, data io.Reader, signature *signv4.Signature) (string, *probe.Error) {
donut.lock.Lock()
etag, err := donut.createObjectPart(bucket, key, uploadID, partID, "", expectedMD5Sum, size, data, signature)
donut.lock.Unlock()
@@ -136,7 +137,7 @@ func (donut API) CreateObjectPart(bucket, key, uploadID string, partID int, cont
}
// createObject - internal wrapper function called by CreateObjectPart
func (donut API) createObjectPart(bucket, key, uploadID string, partID int, contentType, expectedMD5Sum string, size int64, data io.Reader, signature *Signature) (string, *probe.Error) {
func (donut API) createObjectPart(bucket, key, uploadID string, partID int, contentType, expectedMD5Sum string, size int64, data io.Reader, signature *signv4.Signature) (string, *probe.Error) {
if !IsValidBucket(bucket) {
return "", probe.NewError(BucketNameInvalid{Bucket: bucket})
}
@@ -240,7 +241,7 @@ func (donut API) createObjectPart(bucket, key, uploadID string, partID int, cont
return "", err.Trace()
}
if !ok {
return "", probe.NewError(SignatureDoesNotMatch{})
return "", probe.NewError(signv4.DoesNotMatch{})
}
}
}
@@ -303,7 +304,7 @@ func (donut API) mergeMultipart(parts *CompleteMultipartUpload, uploadID string,
}
// CompleteMultipartUpload - complete a multipart upload and persist the data
func (donut API) CompleteMultipartUpload(bucket, key, uploadID string, data io.Reader, signature *Signature) (ObjectMetadata, *probe.Error) {
func (donut API) CompleteMultipartUpload(bucket, key, uploadID string, data io.Reader, signature *signv4.Signature) (ObjectMetadata, *probe.Error) {
donut.lock.Lock()
defer donut.lock.Unlock()
size := int64(donut.multiPartObjects[uploadID].Stats().Bytes)
@@ -321,7 +322,7 @@ func (donut API) CompleteMultipartUpload(bucket, key, uploadID string, data io.R
return objectMetadata, nil
}
func (donut API) completeMultipartUploadV2(bucket, key, uploadID string, data io.Reader, signature *Signature) (io.Reader, *probe.Error) {
func (donut API) completeMultipartUploadV2(bucket, key, uploadID string, data io.Reader, signature *signv4.Signature) (io.Reader, *probe.Error) {
if !IsValidBucket(bucket) {
return nil, probe.NewError(BucketNameInvalid{Bucket: bucket})
}
@@ -355,7 +356,7 @@ func (donut API) completeMultipartUploadV2(bucket, key, uploadID string, data io
return nil, err.Trace()
}
if !ok {
return nil, probe.NewError(SignatureDoesNotMatch{})
return nil, probe.NewError(signv4.DoesNotMatch{})
}
}
parts := &CompleteMultipartUpload{}
@@ -380,7 +381,7 @@ func (a byKey) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a byKey) Less(i, j int) bool { return a[i].Key < a[j].Key }
// ListMultipartUploads - list incomplete multipart sessions for a given bucket
func (donut API) ListMultipartUploads(bucket string, resources BucketMultipartResourcesMetadata, signature *Signature) (BucketMultipartResourcesMetadata, *probe.Error) {
func (donut API) ListMultipartUploads(bucket string, resources BucketMultipartResourcesMetadata, signature *signv4.Signature) (BucketMultipartResourcesMetadata, *probe.Error) {
// TODO handle delimiter, low priority
donut.lock.Lock()
defer donut.lock.Unlock()
@@ -391,7 +392,7 @@ func (donut API) ListMultipartUploads(bucket string, resources BucketMultipartRe
return BucketMultipartResourcesMetadata{}, err.Trace()
}
if !ok {
return BucketMultipartResourcesMetadata{}, probe.NewError(SignatureDoesNotMatch{})
return BucketMultipartResourcesMetadata{}, probe.NewError(signv4.DoesNotMatch{})
}
}
@@ -465,7 +466,7 @@ func (a partNumber) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a partNumber) Less(i, j int) bool { return a[i].PartNumber < a[j].PartNumber }
// ListObjectParts - list parts from incomplete multipart session for a given object
func (donut API) ListObjectParts(bucket, key string, resources ObjectResourcesMetadata, signature *Signature) (ObjectResourcesMetadata, *probe.Error) {
func (donut API) ListObjectParts(bucket, key string, resources ObjectResourcesMetadata, signature *signv4.Signature) (ObjectResourcesMetadata, *probe.Error) {
// Verify upload id
donut.lock.Lock()
defer donut.lock.Unlock()
@@ -476,7 +477,7 @@ func (donut API) ListObjectParts(bucket, key string, resources ObjectResourcesMe
return ObjectResourcesMetadata{}, err.Trace()
}
if !ok {
return ObjectResourcesMetadata{}, probe.NewError(SignatureDoesNotMatch{})
return ObjectResourcesMetadata{}, probe.NewError(signv4.DoesNotMatch{})
}
}

View File

@@ -1,318 +0,0 @@
/*
* Minio Cloud Storage, (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package donut
import (
"bytes"
"crypto/hmac"
"encoding/hex"
"net/http"
"net/url"
"regexp"
"sort"
"strconv"
"strings"
"time"
"unicode/utf8"
"github.com/minio/minio/pkg/crypto/sha256"
"github.com/minio/minio/pkg/probe"
)
// Signature - local variables
type Signature struct {
AccessKeyID string
SecretAccessKey string
Presigned bool
PresignedPolicy bool
SignedHeaders []string
Signature string
Request *http.Request
}
const (
authHeaderPrefix = "AWS4-HMAC-SHA256"
iso8601Format = "20060102T150405Z"
yyyymmdd = "20060102"
)
// sumHMAC calculate hmac between two input byte array
func sumHMAC(key []byte, data []byte) []byte {
hash := hmac.New(sha256.New, key)
hash.Write(data)
return hash.Sum(nil)
}
// urlEncodedName encode the strings from UTF-8 byte representations to HTML hex escape sequences
//
// This is necessary since regular url.Parse() and url.Encode() functions do not support UTF-8
// non english characters cannot be parsed due to the nature in which url.Encode() is written
//
// This function on the other hand is a direct replacement for url.Encode() technique to support
// pretty much every UTF-8 character.
func urlEncodeName(name string) (string, *probe.Error) {
// if object matches reserved string, no need to encode them
reservedNames := regexp.MustCompile("^[a-zA-Z0-9-_.~/]+$")
if reservedNames.MatchString(name) {
return name, nil
}
var encodedName string
for _, s := range name {
if 'A' <= s && s <= 'Z' || 'a' <= s && s <= 'z' || '0' <= s && s <= '9' { // §2.3 Unreserved characters (mark)
encodedName = encodedName + string(s)
continue
}
switch s {
case '-', '_', '.', '~', '/': // §2.3 Unreserved characters (mark)
encodedName = encodedName + string(s)
continue
default:
len := utf8.RuneLen(s)
if len < 0 {
return "", probe.NewError(InvalidArgument{})
}
u := make([]byte, len)
utf8.EncodeRune(u, s)
for _, r := range u {
hex := hex.EncodeToString([]byte{r})
encodedName = encodedName + "%" + strings.ToUpper(hex)
}
}
}
return encodedName, nil
}
// getCanonicalHeaders generate a list of request headers with their values
func (r *Signature) getCanonicalHeaders(signedHeaders map[string][]string) string {
var headers []string
vals := make(map[string][]string)
for k, vv := range signedHeaders {
headers = append(headers, strings.ToLower(k))
vals[strings.ToLower(k)] = vv
}
headers = append(headers, "host")
sort.Strings(headers)
var buf bytes.Buffer
for _, k := range headers {
buf.WriteString(k)
buf.WriteByte(':')
switch {
case k == "host":
buf.WriteString(r.Request.Host)
fallthrough
default:
for idx, v := range vals[k] {
if idx > 0 {
buf.WriteByte(',')
}
buf.WriteString(v)
}
buf.WriteByte('\n')
}
}
return buf.String()
}
// getSignedHeaders generate a string i.e alphabetically sorted, semicolon-separated list of lowercase request header names
func (r *Signature) getSignedHeaders(signedHeaders map[string][]string) string {
var headers []string
for k := range signedHeaders {
headers = append(headers, strings.ToLower(k))
}
headers = append(headers, "host")
sort.Strings(headers)
return strings.Join(headers, ";")
}
// extractSignedHeaders extract signed headers from Authorization header
func (r Signature) extractSignedHeaders() map[string][]string {
extractedSignedHeadersMap := make(map[string][]string)
for _, header := range r.SignedHeaders {
val, ok := r.Request.Header[http.CanonicalHeaderKey(header)]
if !ok {
// if not found continue, we will fail later
continue
}
extractedSignedHeadersMap[header] = val
}
return extractedSignedHeadersMap
}
// getCanonicalRequest generate a canonical request of style
//
// canonicalRequest =
// <HTTPMethod>\n
// <CanonicalURI>\n
// <CanonicalQueryString>\n
// <CanonicalHeaders>\n
// <SignedHeaders>\n
// <HashedPayload>
//
func (r *Signature) getCanonicalRequest() string {
payload := r.Request.Header.Get(http.CanonicalHeaderKey("x-amz-content-sha256"))
r.Request.URL.RawQuery = strings.Replace(r.Request.URL.Query().Encode(), "+", "%20", -1)
encodedPath, _ := urlEncodeName(r.Request.URL.Path)
// convert any space strings back to "+"
encodedPath = strings.Replace(encodedPath, "+", "%20", -1)
canonicalRequest := strings.Join([]string{
r.Request.Method,
encodedPath,
r.Request.URL.RawQuery,
r.getCanonicalHeaders(r.extractSignedHeaders()),
r.getSignedHeaders(r.extractSignedHeaders()),
payload,
}, "\n")
return canonicalRequest
}
// getCanonicalRequest generate a canonical request of style
//
// canonicalRequest =
// <HTTPMethod>\n
// <CanonicalURI>\n
// <CanonicalQueryString>\n
// <CanonicalHeaders>\n
// <SignedHeaders>\n
// <HashedPayload>
//
func (r *Signature) getPresignedCanonicalRequest(presignedQuery string) string {
rawQuery := strings.Replace(presignedQuery, "+", "%20", -1)
encodedPath, _ := urlEncodeName(r.Request.URL.Path)
// convert any space strings back to "+"
encodedPath = strings.Replace(encodedPath, "+", "%20", -1)
canonicalRequest := strings.Join([]string{
r.Request.Method,
encodedPath,
rawQuery,
r.getCanonicalHeaders(r.extractSignedHeaders()),
r.getSignedHeaders(r.extractSignedHeaders()),
"UNSIGNED-PAYLOAD",
}, "\n")
return canonicalRequest
}
// getScope generate a string of a specific date, an AWS region, and a service
func (r *Signature) getScope(t time.Time) string {
scope := strings.Join([]string{
t.Format(yyyymmdd),
"milkyway",
"s3",
"aws4_request",
}, "/")
return scope
}
// getStringToSign a string based on selected query values
func (r *Signature) getStringToSign(canonicalRequest string, t time.Time) string {
stringToSign := authHeaderPrefix + "\n" + t.Format(iso8601Format) + "\n"
stringToSign = stringToSign + r.getScope(t) + "\n"
stringToSign = stringToSign + hex.EncodeToString(sha256.Sum256([]byte(canonicalRequest)))
return stringToSign
}
// getSigningKey hmac seed to calculate final signature
func (r *Signature) getSigningKey(t time.Time) []byte {
secret := r.SecretAccessKey
date := sumHMAC([]byte("AWS4"+secret), []byte(t.Format(yyyymmdd)))
region := sumHMAC(date, []byte("milkyway"))
service := sumHMAC(region, []byte("s3"))
signingKey := sumHMAC(service, []byte("aws4_request"))
return signingKey
}
// getSignature final signature in hexadecimal form
func (r *Signature) getSignature(signingKey []byte, stringToSign string) string {
return hex.EncodeToString(sumHMAC(signingKey, []byte(stringToSign)))
}
// DoesPolicySignatureMatch - Verify query headers with post policy
// - http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-HTTPPOSTConstructPolicy.html
// returns true if matches, false otherwise. if error is not nil then it is always false
func (r *Signature) DoesPolicySignatureMatch() (bool, *probe.Error) {
// FIXME: Implement this
return true, nil
}
// DoesPresignedSignatureMatch - Verify query headers with presigned signature
// - http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html
// returns true if matches, false otherwise. if error is not nil then it is always false
func (r *Signature) DoesPresignedSignatureMatch() (bool, *probe.Error) {
query := make(url.Values)
query.Set("X-Amz-Algorithm", authHeaderPrefix)
var date string
if date = r.Request.URL.Query().Get("X-Amz-Date"); date == "" {
return false, probe.NewError(MissingDateHeader{})
}
t, err := time.Parse(iso8601Format, date)
if err != nil {
return false, probe.NewError(err)
}
if _, ok := r.Request.URL.Query()["X-Amz-Expires"]; !ok {
return false, probe.NewError(MissingExpiresQuery{})
}
expireSeconds, err := strconv.Atoi(r.Request.URL.Query().Get("X-Amz-Expires"))
if err != nil {
return false, probe.NewError(err)
}
if time.Now().UTC().Sub(t) > time.Duration(expireSeconds)*time.Second {
return false, probe.NewError(ExpiredPresignedRequest{})
}
query.Set("X-Amz-Date", t.Format(iso8601Format))
query.Set("X-Amz-Expires", strconv.Itoa(expireSeconds))
query.Set("X-Amz-SignedHeaders", r.getSignedHeaders(r.extractSignedHeaders()))
query.Set("X-Amz-Credential", r.AccessKeyID+"/"+r.getScope(t))
encodedQuery := query.Encode()
newSignature := r.getSignature(r.getSigningKey(t), r.getStringToSign(r.getPresignedCanonicalRequest(encodedQuery), t))
encodedQuery += "&X-Amz-Signature=" + newSignature
if encodedQuery != r.Request.URL.RawQuery {
return false, nil
}
return true, nil
}
// DoesSignatureMatch - Verify authorization header with calculated header in accordance with
// - http://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html
// returns true if matches, false otherwise. if error is not nil then it is always false
func (r *Signature) DoesSignatureMatch(hashedPayload string) (bool, *probe.Error) {
// set new calulated payload
r.Request.Header.Set("X-Amz-Content-Sha256", hashedPayload)
// Add date if not present throw error
var date string
if date = r.Request.Header.Get(http.CanonicalHeaderKey("x-amz-date")); date == "" {
if date = r.Request.Header.Get("Date"); date == "" {
return false, probe.NewError(MissingDateHeader{})
}
}
t, err := time.Parse(iso8601Format, date)
if err != nil {
return false, probe.NewError(err)
}
canonicalRequest := r.getCanonicalRequest()
stringToSign := r.getStringToSign(canonicalRequest, t)
signingKey := r.getSigningKey(t)
newSignature := r.getSignature(signingKey, stringToSign)
if newSignature != r.Signature {
return false, nil
}
return true, nil
}