Implement presigned policy

This commit is contained in:
Harshavardhana
2015-10-01 23:51:17 -07:00
parent 09dc360e06
commit c8de5bad2f
18 changed files with 560 additions and 240 deletions

View File

@@ -35,6 +35,7 @@ import (
"github.com/minio/minio/pkg/crypto/sha512"
"github.com/minio/minio/pkg/donut/disk"
"github.com/minio/minio/pkg/probe"
signv4 "github.com/minio/minio/pkg/signature"
)
const (
@@ -235,7 +236,7 @@ func (b bucket) ReadObject(objectName string) (reader io.ReadCloser, size int64,
}
// WriteObject - write a new object into bucket
func (b bucket) WriteObject(objectName string, objectData io.Reader, size int64, expectedMD5Sum string, metadata map[string]string, signature *Signature) (ObjectMetadata, *probe.Error) {
func (b bucket) WriteObject(objectName string, objectData io.Reader, size int64, expectedMD5Sum string, metadata map[string]string, signature *signv4.Signature) (ObjectMetadata, *probe.Error) {
b.lock.Lock()
defer b.lock.Unlock()
if objectName == "" || objectData == nil {
@@ -306,7 +307,7 @@ func (b bucket) WriteObject(objectName string, objectData io.Reader, size int64,
//
// Signature mismatch occurred all temp files to be removed and all data purged.
CleanupWritersOnError(writers)
return ObjectMetadata{}, probe.NewError(SignatureDoesNotMatch{})
return ObjectMetadata{}, probe.NewError(signv4.DoesNotMatch{})
}
}
objMetadata.MD5Sum = hex.EncodeToString(dataMD5sum)
@@ -429,27 +430,25 @@ func (b bucket) getDataAndParity(totalWriters int) (k uint8, m uint8, err *probe
}
// writeObjectData -
func (b bucket) writeObjectData(k, m uint8, writers []io.WriteCloser, objectData io.Reader, size int64, writer io.Writer) (int, int, *probe.Error) {
func (b bucket) writeObjectData(k, m uint8, writers []io.WriteCloser, objectData io.Reader, size int64, hashWriter io.Writer) (int, int, *probe.Error) {
encoder, err := newEncoder(k, m, "Cauchy")
chunkSize := int64(10 * 1024 * 1024)
if err != nil {
return 0, 0, err.Trace()
}
chunkSize := int64(10 * 1024 * 1024)
chunkCount := 0
totalLength := 0
remaining := size
for remaining > 0 {
readSize := chunkSize
if remaining < chunkSize {
readSize = remaining
}
remaining = remaining - readSize
totalLength = totalLength + int(readSize)
encodedBlocks, inputData, err := encoder.EncodeStream(objectData, readSize)
var e error
for e == nil {
var length int
inputData := make([]byte, chunkSize)
length, e = objectData.Read(inputData)
encodedBlocks, err := encoder.Encode(inputData)
if err != nil {
return 0, 0, err.Trace()
}
if _, err := writer.Write(inputData); err != nil {
if _, err := hashWriter.Write(inputData[0:length]); err != nil {
return 0, 0, probe.NewError(err)
}
for blockIndex, block := range encodedBlocks {
@@ -464,8 +463,12 @@ func (b bucket) writeObjectData(k, m uint8, writers []io.WriteCloser, objectData
return 0, 0, probe.NewError(err)
}
}
totalLength += length
chunkCount = chunkCount + 1
}
if e != io.EOF {
return 0, 0, probe.NewError(e)
}
return chunkCount, totalLength, nil
}

View File

@@ -36,6 +36,7 @@ import (
"github.com/minio/minio/pkg/crypto/sha512"
"github.com/minio/minio/pkg/donut/disk"
"github.com/minio/minio/pkg/probe"
signv4 "github.com/minio/minio/pkg/signature"
)
// config files used inside Donut
@@ -127,7 +128,7 @@ func (donut API) listObjects(bucket, prefix, marker, delimiter string, maxkeys i
}
// putObject - put object
func (donut API) putObject(bucket, object, expectedMD5Sum string, reader io.Reader, size int64, metadata map[string]string, signature *Signature) (ObjectMetadata, *probe.Error) {
func (donut API) putObject(bucket, object, expectedMD5Sum string, reader io.Reader, size int64, metadata map[string]string, signature *signv4.Signature) (ObjectMetadata, *probe.Error) {
if bucket == "" || strings.TrimSpace(bucket) == "" {
return ObjectMetadata{}, probe.NewError(InvalidArgument{})
}
@@ -159,7 +160,7 @@ func (donut API) putObject(bucket, object, expectedMD5Sum string, reader io.Read
}
// putObject - put object
func (donut API) putObjectPart(bucket, object, expectedMD5Sum, uploadID string, partID int, reader io.Reader, size int64, metadata map[string]string, signature *Signature) (PartMetadata, *probe.Error) {
func (donut API) putObjectPart(bucket, object, expectedMD5Sum, uploadID string, partID int, reader io.Reader, size int64, metadata map[string]string, signature *signv4.Signature) (PartMetadata, *probe.Error) {
if bucket == "" || strings.TrimSpace(bucket) == "" {
return PartMetadata{}, probe.NewError(InvalidArgument{})
}
@@ -336,7 +337,7 @@ func (donut API) listObjectParts(bucket, object string, resources ObjectResource
}
// completeMultipartUpload complete an incomplete multipart upload
func (donut API) completeMultipartUpload(bucket, object, uploadID string, data io.Reader, signature *Signature) (ObjectMetadata, *probe.Error) {
func (donut API) completeMultipartUpload(bucket, object, uploadID string, data io.Reader, signature *signv4.Signature) (ObjectMetadata, *probe.Error) {
if bucket == "" || strings.TrimSpace(bucket) == "" {
return ObjectMetadata{}, probe.NewError(InvalidArgument{})
}
@@ -374,7 +375,7 @@ func (donut API) completeMultipartUpload(bucket, object, uploadID string, data i
return ObjectMetadata{}, err.Trace()
}
if !ok {
return ObjectMetadata{}, probe.NewError(SignatureDoesNotMatch{})
return ObjectMetadata{}, probe.NewError(signv4.DoesNotMatch{})
}
}
parts := &CompleteMultipartUpload{}

View File

@@ -36,6 +36,7 @@ import (
"github.com/minio/minio/pkg/donut/cache/metadata"
"github.com/minio/minio/pkg/probe"
"github.com/minio/minio/pkg/quick"
signv4 "github.com/minio/minio/pkg/signature"
)
// total Number of buckets allowed
@@ -204,7 +205,7 @@ func (donut API) GetObject(w io.Writer, bucket string, object string, start, len
}
// GetBucketMetadata -
func (donut API) GetBucketMetadata(bucket string, signature *Signature) (BucketMetadata, *probe.Error) {
func (donut API) GetBucketMetadata(bucket string, signature *signv4.Signature) (BucketMetadata, *probe.Error) {
donut.lock.Lock()
defer donut.lock.Unlock()
@@ -214,7 +215,7 @@ func (donut API) GetBucketMetadata(bucket string, signature *Signature) (BucketM
return BucketMetadata{}, err.Trace()
}
if !ok {
return BucketMetadata{}, probe.NewError(SignatureDoesNotMatch{})
return BucketMetadata{}, probe.NewError(signv4.DoesNotMatch{})
}
}
@@ -237,7 +238,7 @@ func (donut API) GetBucketMetadata(bucket string, signature *Signature) (BucketM
}
// SetBucketMetadata -
func (donut API) SetBucketMetadata(bucket string, metadata map[string]string, signature *Signature) *probe.Error {
func (donut API) SetBucketMetadata(bucket string, metadata map[string]string, signature *signv4.Signature) *probe.Error {
donut.lock.Lock()
defer donut.lock.Unlock()
@@ -247,7 +248,7 @@ func (donut API) SetBucketMetadata(bucket string, metadata map[string]string, si
return err.Trace()
}
if !ok {
return probe.NewError(SignatureDoesNotMatch{})
return probe.NewError(signv4.DoesNotMatch{})
}
}
@@ -288,7 +289,7 @@ func isMD5SumEqual(expectedMD5Sum, actualMD5Sum string) *probe.Error {
}
// CreateObject - create an object
func (donut API) CreateObject(bucket, key, expectedMD5Sum string, size int64, data io.Reader, metadata map[string]string, signature *Signature) (ObjectMetadata, *probe.Error) {
func (donut API) CreateObject(bucket, key, expectedMD5Sum string, size int64, data io.Reader, metadata map[string]string, signature *signv4.Signature) (ObjectMetadata, *probe.Error) {
donut.lock.Lock()
defer donut.lock.Unlock()
@@ -301,7 +302,7 @@ func (donut API) CreateObject(bucket, key, expectedMD5Sum string, size int64, da
}
// createObject - PUT object to cache buffer
func (donut API) createObject(bucket, key, contentType, expectedMD5Sum string, size int64, data io.Reader, signature *Signature) (ObjectMetadata, *probe.Error) {
func (donut API) createObject(bucket, key, contentType, expectedMD5Sum string, size int64, data io.Reader, signature *signv4.Signature) (ObjectMetadata, *probe.Error) {
if len(donut.config.NodeDiskMap) == 0 {
if size > int64(donut.config.MaxSize) {
generic := GenericObjectError{Bucket: bucket, Object: key}
@@ -381,10 +382,12 @@ func (donut API) createObject(bucket, key, contentType, expectedMD5Sum string, s
totalLength += int64(length)
go debug.FreeOSMemory()
}
if totalLength != size {
// Delete perhaps the object is already saved, due to the nature of append()
donut.objects.Delete(objectKey)
return ObjectMetadata{}, probe.NewError(IncompleteBody{Bucket: bucket, Object: key})
if size != 0 {
if totalLength != size {
// Delete perhaps the object is already saved, due to the nature of append()
donut.objects.Delete(objectKey)
return ObjectMetadata{}, probe.NewError(IncompleteBody{Bucket: bucket, Object: key})
}
}
if err != io.EOF {
return ObjectMetadata{}, probe.NewError(err)
@@ -403,7 +406,7 @@ func (donut API) createObject(bucket, key, contentType, expectedMD5Sum string, s
return ObjectMetadata{}, err.Trace()
}
if !ok {
return ObjectMetadata{}, probe.NewError(SignatureDoesNotMatch{})
return ObjectMetadata{}, probe.NewError(signv4.DoesNotMatch{})
}
}
@@ -425,7 +428,7 @@ func (donut API) createObject(bucket, key, contentType, expectedMD5Sum string, s
}
// MakeBucket - create bucket in cache
func (donut API) MakeBucket(bucketName, acl string, location io.Reader, signature *Signature) *probe.Error {
func (donut API) MakeBucket(bucketName, acl string, location io.Reader, signature *signv4.Signature) *probe.Error {
donut.lock.Lock()
defer donut.lock.Unlock()
@@ -445,7 +448,7 @@ func (donut API) MakeBucket(bucketName, acl string, location io.Reader, signatur
return err.Trace()
}
if !ok {
return probe.NewError(SignatureDoesNotMatch{})
return probe.NewError(signv4.DoesNotMatch{})
}
}
@@ -484,7 +487,7 @@ func (donut API) MakeBucket(bucketName, acl string, location io.Reader, signatur
}
// ListObjects - list objects from cache
func (donut API) ListObjects(bucket string, resources BucketResourcesMetadata, signature *Signature) ([]ObjectMetadata, BucketResourcesMetadata, *probe.Error) {
func (donut API) ListObjects(bucket string, resources BucketResourcesMetadata, signature *signv4.Signature) ([]ObjectMetadata, BucketResourcesMetadata, *probe.Error) {
donut.lock.Lock()
defer donut.lock.Unlock()
@@ -494,7 +497,7 @@ func (donut API) ListObjects(bucket string, resources BucketResourcesMetadata, s
return nil, BucketResourcesMetadata{}, err.Trace()
}
if !ok {
return nil, BucketResourcesMetadata{}, probe.NewError(SignatureDoesNotMatch{})
return nil, BucketResourcesMetadata{}, probe.NewError(signv4.DoesNotMatch{})
}
}
@@ -587,7 +590,7 @@ func (b byBucketName) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
func (b byBucketName) Less(i, j int) bool { return b[i].Name < b[j].Name }
// ListBuckets - List buckets from cache
func (donut API) ListBuckets(signature *Signature) ([]BucketMetadata, *probe.Error) {
func (donut API) ListBuckets(signature *signv4.Signature) ([]BucketMetadata, *probe.Error) {
donut.lock.Lock()
defer donut.lock.Unlock()
@@ -597,7 +600,7 @@ func (donut API) ListBuckets(signature *Signature) ([]BucketMetadata, *probe.Err
return nil, err.Trace()
}
if !ok {
return nil, probe.NewError(SignatureDoesNotMatch{})
return nil, probe.NewError(signv4.DoesNotMatch{})
}
}
@@ -621,7 +624,7 @@ func (donut API) ListBuckets(signature *Signature) ([]BucketMetadata, *probe.Err
}
// GetObjectMetadata - get object metadata from cache
func (donut API) GetObjectMetadata(bucket, key string, signature *Signature) (ObjectMetadata, *probe.Error) {
func (donut API) GetObjectMetadata(bucket, key string, signature *signv4.Signature) (ObjectMetadata, *probe.Error) {
donut.lock.Lock()
defer donut.lock.Unlock()
@@ -632,7 +635,7 @@ func (donut API) GetObjectMetadata(bucket, key string, signature *Signature) (Ob
return ObjectMetadata{}, err.Trace()
}
if !ok {
return ObjectMetadata{}, probe.NewError(SignatureDoesNotMatch{})
return ObjectMetadata{}, probe.NewError(signv4.DoesNotMatch{})
}
} else {
ok, err := signature.DoesSignatureMatch("e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855")
@@ -640,7 +643,7 @@ func (donut API) GetObjectMetadata(bucket, key string, signature *Signature) (Ob
return ObjectMetadata{}, err.Trace()
}
if !ok {
return ObjectMetadata{}, probe.NewError(SignatureDoesNotMatch{})
return ObjectMetadata{}, probe.NewError(signv4.DoesNotMatch{})
}
}
}

View File

@@ -17,8 +17,6 @@
package donut
import (
"io"
encoding "github.com/minio/minio/pkg/erasure"
"github.com/minio/minio/pkg/probe"
)
@@ -83,14 +81,6 @@ func (e encoder) Encode(data []byte) ([][]byte, *probe.Error) {
return encodedData, nil
}
func (e encoder) EncodeStream(data io.Reader, size int64) ([][]byte, []byte, *probe.Error) {
encodedData, inputData, err := e.encoder.EncodeStream(data, size)
if err != nil {
return nil, nil, probe.NewError(err)
}
return encodedData, inputData, nil
}
// Decode - erasure decode input encoded bytes
func (e encoder) Decode(encodedData [][]byte, dataLength int) ([]byte, *probe.Error) {
decodedData, err := e.encoder.Decode(encodedData, dataLength)

View File

@@ -125,6 +125,13 @@ func (e ChecksumMismatch) Error() string {
return "Checksum mismatch"
}
// MissingPOSTPolicy missing post policy
type MissingPOSTPolicy struct{}
func (e MissingPOSTPolicy) Error() string {
return "Missing POST policy in multipart form"
}
// MissingErasureTechnique missing erasure technique
type MissingErasureTechnique struct{}
@@ -318,37 +325,6 @@ func (e InvalidUploadID) Error() string {
return "Invalid upload id " + e.UploadID
}
// SignatureDoesNotMatch invalid signature
type SignatureDoesNotMatch struct {
SignatureSent string
SignatureCalculated string
}
func (e SignatureDoesNotMatch) Error() string {
return "The request signature we calculated does not match the signature you provided"
}
// ExpiredPresignedRequest request already expired
type ExpiredPresignedRequest struct{}
func (e ExpiredPresignedRequest) Error() string {
return "Presigned request already expired"
}
// MissingExpiresQuery expires query string missing
type MissingExpiresQuery struct{}
func (e MissingExpiresQuery) Error() string {
return "Missing expires query string"
}
// MissingDateHeader date header missing
type MissingDateHeader struct{}
func (e MissingDateHeader) Error() string {
return "Missing date header"
}
// InvalidPart One or more of the specified parts could not be found
type InvalidPart struct{}

View File

@@ -20,6 +20,7 @@ import (
"io"
"github.com/minio/minio/pkg/probe"
signv4 "github.com/minio/minio/pkg/signature"
)
// Collection of Donut specification interfaces
@@ -33,31 +34,31 @@ type Interface interface {
// CloudStorage is a donut cloud storage interface
type CloudStorage interface {
// Storage service operations
GetBucketMetadata(bucket string, signature *Signature) (BucketMetadata, *probe.Error)
SetBucketMetadata(bucket string, metadata map[string]string, signature *Signature) *probe.Error
ListBuckets(signature *Signature) ([]BucketMetadata, *probe.Error)
MakeBucket(bucket string, ACL string, location io.Reader, signature *Signature) *probe.Error
GetBucketMetadata(bucket string, signature *signv4.Signature) (BucketMetadata, *probe.Error)
SetBucketMetadata(bucket string, metadata map[string]string, signature *signv4.Signature) *probe.Error
ListBuckets(signature *signv4.Signature) ([]BucketMetadata, *probe.Error)
MakeBucket(bucket string, ACL string, location io.Reader, signature *signv4.Signature) *probe.Error
// Bucket operations
ListObjects(string, BucketResourcesMetadata, *Signature) ([]ObjectMetadata, BucketResourcesMetadata, *probe.Error)
ListObjects(string, BucketResourcesMetadata, *signv4.Signature) ([]ObjectMetadata, BucketResourcesMetadata, *probe.Error)
// Object operations
GetObject(w io.Writer, bucket, object string, start, length int64) (int64, *probe.Error)
GetObjectMetadata(bucket, object string, signature *Signature) (ObjectMetadata, *probe.Error)
GetObjectMetadata(bucket, object string, signature *signv4.Signature) (ObjectMetadata, *probe.Error)
// bucket, object, expectedMD5Sum, size, reader, metadata, signature
CreateObject(string, string, string, int64, io.Reader, map[string]string, *Signature) (ObjectMetadata, *probe.Error)
CreateObject(string, string, string, int64, io.Reader, map[string]string, *signv4.Signature) (ObjectMetadata, *probe.Error)
Multipart
}
// Multipart API
type Multipart interface {
NewMultipartUpload(bucket, key, contentType string, signature *Signature) (string, *probe.Error)
AbortMultipartUpload(bucket, key, uploadID string, signature *Signature) *probe.Error
CreateObjectPart(string, string, string, int, string, string, int64, io.Reader, *Signature) (string, *probe.Error)
CompleteMultipartUpload(bucket, key, uploadID string, data io.Reader, signature *Signature) (ObjectMetadata, *probe.Error)
ListMultipartUploads(string, BucketMultipartResourcesMetadata, *Signature) (BucketMultipartResourcesMetadata, *probe.Error)
ListObjectParts(string, string, ObjectResourcesMetadata, *Signature) (ObjectResourcesMetadata, *probe.Error)
NewMultipartUpload(bucket, key, contentType string, signature *signv4.Signature) (string, *probe.Error)
AbortMultipartUpload(bucket, key, uploadID string, signature *signv4.Signature) *probe.Error
CreateObjectPart(string, string, string, int, string, string, int64, io.Reader, *signv4.Signature) (string, *probe.Error)
CompleteMultipartUpload(bucket, key, uploadID string, data io.Reader, signature *signv4.Signature) (ObjectMetadata, *probe.Error)
ListMultipartUploads(string, BucketMultipartResourcesMetadata, *signv4.Signature) (BucketMultipartResourcesMetadata, *probe.Error)
ListObjectParts(string, string, ObjectResourcesMetadata, *signv4.Signature) (ObjectResourcesMetadata, *probe.Error)
}
// Management is a donut management system interface

View File

@@ -35,12 +35,13 @@ import (
"github.com/minio/minio/pkg/crypto/sha256"
"github.com/minio/minio/pkg/donut/cache/data"
"github.com/minio/minio/pkg/probe"
signv4 "github.com/minio/minio/pkg/signature"
)
/// V2 API functions
// NewMultipartUpload - initiate a new multipart session
func (donut API) NewMultipartUpload(bucket, key, contentType string, signature *Signature) (string, *probe.Error) {
func (donut API) NewMultipartUpload(bucket, key, contentType string, signature *signv4.Signature) (string, *probe.Error) {
donut.lock.Lock()
defer donut.lock.Unlock()
@@ -56,7 +57,7 @@ func (donut API) NewMultipartUpload(bucket, key, contentType string, signature *
return "", err.Trace()
}
if !ok {
return "", probe.NewError(SignatureDoesNotMatch{})
return "", probe.NewError(signv4.DoesNotMatch{})
}
}
// if len(donut.config.NodeDiskMap) > 0 {
@@ -88,7 +89,7 @@ func (donut API) NewMultipartUpload(bucket, key, contentType string, signature *
}
// AbortMultipartUpload - abort an incomplete multipart session
func (donut API) AbortMultipartUpload(bucket, key, uploadID string, signature *Signature) *probe.Error {
func (donut API) AbortMultipartUpload(bucket, key, uploadID string, signature *signv4.Signature) *probe.Error {
donut.lock.Lock()
defer donut.lock.Unlock()
@@ -104,7 +105,7 @@ func (donut API) AbortMultipartUpload(bucket, key, uploadID string, signature *S
return err.Trace()
}
if !ok {
return probe.NewError(SignatureDoesNotMatch{})
return probe.NewError(signv4.DoesNotMatch{})
}
}
// TODO: multipart support for donut is broken, since we haven't finalized the format in which
@@ -125,7 +126,7 @@ func (donut API) AbortMultipartUpload(bucket, key, uploadID string, signature *S
}
// CreateObjectPart - create a part in a multipart session
func (donut API) CreateObjectPart(bucket, key, uploadID string, partID int, contentType, expectedMD5Sum string, size int64, data io.Reader, signature *Signature) (string, *probe.Error) {
func (donut API) CreateObjectPart(bucket, key, uploadID string, partID int, contentType, expectedMD5Sum string, size int64, data io.Reader, signature *signv4.Signature) (string, *probe.Error) {
donut.lock.Lock()
etag, err := donut.createObjectPart(bucket, key, uploadID, partID, "", expectedMD5Sum, size, data, signature)
donut.lock.Unlock()
@@ -136,7 +137,7 @@ func (donut API) CreateObjectPart(bucket, key, uploadID string, partID int, cont
}
// createObject - internal wrapper function called by CreateObjectPart
func (donut API) createObjectPart(bucket, key, uploadID string, partID int, contentType, expectedMD5Sum string, size int64, data io.Reader, signature *Signature) (string, *probe.Error) {
func (donut API) createObjectPart(bucket, key, uploadID string, partID int, contentType, expectedMD5Sum string, size int64, data io.Reader, signature *signv4.Signature) (string, *probe.Error) {
if !IsValidBucket(bucket) {
return "", probe.NewError(BucketNameInvalid{Bucket: bucket})
}
@@ -240,7 +241,7 @@ func (donut API) createObjectPart(bucket, key, uploadID string, partID int, cont
return "", err.Trace()
}
if !ok {
return "", probe.NewError(SignatureDoesNotMatch{})
return "", probe.NewError(signv4.DoesNotMatch{})
}
}
}
@@ -303,7 +304,7 @@ func (donut API) mergeMultipart(parts *CompleteMultipartUpload, uploadID string,
}
// CompleteMultipartUpload - complete a multipart upload and persist the data
func (donut API) CompleteMultipartUpload(bucket, key, uploadID string, data io.Reader, signature *Signature) (ObjectMetadata, *probe.Error) {
func (donut API) CompleteMultipartUpload(bucket, key, uploadID string, data io.Reader, signature *signv4.Signature) (ObjectMetadata, *probe.Error) {
donut.lock.Lock()
defer donut.lock.Unlock()
size := int64(donut.multiPartObjects[uploadID].Stats().Bytes)
@@ -321,7 +322,7 @@ func (donut API) CompleteMultipartUpload(bucket, key, uploadID string, data io.R
return objectMetadata, nil
}
func (donut API) completeMultipartUploadV2(bucket, key, uploadID string, data io.Reader, signature *Signature) (io.Reader, *probe.Error) {
func (donut API) completeMultipartUploadV2(bucket, key, uploadID string, data io.Reader, signature *signv4.Signature) (io.Reader, *probe.Error) {
if !IsValidBucket(bucket) {
return nil, probe.NewError(BucketNameInvalid{Bucket: bucket})
}
@@ -355,7 +356,7 @@ func (donut API) completeMultipartUploadV2(bucket, key, uploadID string, data io
return nil, err.Trace()
}
if !ok {
return nil, probe.NewError(SignatureDoesNotMatch{})
return nil, probe.NewError(signv4.DoesNotMatch{})
}
}
parts := &CompleteMultipartUpload{}
@@ -380,7 +381,7 @@ func (a byKey) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a byKey) Less(i, j int) bool { return a[i].Key < a[j].Key }
// ListMultipartUploads - list incomplete multipart sessions for a given bucket
func (donut API) ListMultipartUploads(bucket string, resources BucketMultipartResourcesMetadata, signature *Signature) (BucketMultipartResourcesMetadata, *probe.Error) {
func (donut API) ListMultipartUploads(bucket string, resources BucketMultipartResourcesMetadata, signature *signv4.Signature) (BucketMultipartResourcesMetadata, *probe.Error) {
// TODO handle delimiter, low priority
donut.lock.Lock()
defer donut.lock.Unlock()
@@ -391,7 +392,7 @@ func (donut API) ListMultipartUploads(bucket string, resources BucketMultipartRe
return BucketMultipartResourcesMetadata{}, err.Trace()
}
if !ok {
return BucketMultipartResourcesMetadata{}, probe.NewError(SignatureDoesNotMatch{})
return BucketMultipartResourcesMetadata{}, probe.NewError(signv4.DoesNotMatch{})
}
}
@@ -465,7 +466,7 @@ func (a partNumber) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a partNumber) Less(i, j int) bool { return a[i].PartNumber < a[j].PartNumber }
// ListObjectParts - list parts from incomplete multipart session for a given object
func (donut API) ListObjectParts(bucket, key string, resources ObjectResourcesMetadata, signature *Signature) (ObjectResourcesMetadata, *probe.Error) {
func (donut API) ListObjectParts(bucket, key string, resources ObjectResourcesMetadata, signature *signv4.Signature) (ObjectResourcesMetadata, *probe.Error) {
// Verify upload id
donut.lock.Lock()
defer donut.lock.Unlock()
@@ -476,7 +477,7 @@ func (donut API) ListObjectParts(bucket, key string, resources ObjectResourcesMe
return ObjectResourcesMetadata{}, err.Trace()
}
if !ok {
return ObjectResourcesMetadata{}, probe.NewError(SignatureDoesNotMatch{})
return ObjectResourcesMetadata{}, probe.NewError(signv4.DoesNotMatch{})
}
}

View File

@@ -22,7 +22,6 @@ package erasure
import "C"
import (
"errors"
"io"
"unsafe"
)
@@ -195,53 +194,3 @@ func (e *Erasure) Encode(inputData []byte) (encodedBlocks [][]byte, err error) {
return encodedBlocks, nil
}
// EncodeStream erasure codes a block of data in "k" data blocks and "m" parity blocks.
// Output is [k+m][]blocks of data and parity slices.
func (e *Erasure) EncodeStream(data io.Reader, size int64) ([][]byte, []byte, error) {
k := int(e.params.K) // "k" data blocks
m := int(e.params.M) // "m" parity blocks
n := k + m // "n" total encoded blocks
// Length of a single encoded chunk.
// Total number of encoded chunks = "k" data + "m" parity blocks
encodedBlockLen := GetEncodedBlockLen(int(size), uint8(k))
// Length of total number of "n" data chunks
encodedDataBlocksLen := encodedBlockLen * n
// allocate byte array for encodedBlock length
inputData := make([]byte, size, encodedDataBlocksLen)
_, err := io.ReadFull(data, inputData)
if err != nil {
// do not check for io.ErrUnexpectedEOF, we know the right amount of size
// to be read if its a short read we need to throw error since reader could
// have been prematurely closed.
if err != io.EOF {
return nil, nil, err
}
}
// Allocate memory to the "encoded blocks" return buffer
encodedBlocks := make([][]byte, n) // Return buffer
// Neccessary to bridge Go to the C world. C requires 2D arry of pointers to
// byte array. "encodedBlocks" is a 2D slice.
pointersToEncodedBlock := make([]*byte, n) // Pointers to encoded blocks.
// Copy data block slices to encoded block buffer
for i := 0; i < n; i++ {
encodedBlocks[i] = inputData[i*encodedBlockLen : (i+1)*encodedBlockLen]
pointersToEncodedBlock[i] = &encodedBlocks[i][0]
}
// Erasure code the data into K data blocks and M parity
// blocks. Only the parity blocks are filled. Data blocks remain
// intact.
C.ec_encode_data(C.int(encodedBlockLen), C.int(k), C.int(m), e.encodeTbls,
(**C.uchar)(unsafe.Pointer(&pointersToEncodedBlock[:k][0])), // Pointers to data blocks
(**C.uchar)(unsafe.Pointer(&pointersToEncodedBlock[k:][0]))) // Pointers to parity blocks
return encodedBlocks, inputData[0:size], nil
}

32
pkg/signature/errors.go Normal file
View File

@@ -0,0 +1,32 @@
package signature
// MissingDateHeader date header missing
type MissingDateHeader struct{}
func (e MissingDateHeader) Error() string {
return "Missing date header"
}
// MissingExpiresQuery expires query string missing
type MissingExpiresQuery struct{}
func (e MissingExpiresQuery) Error() string {
return "Missing expires query string"
}
// ExpiredPresignedRequest request already expired
type ExpiredPresignedRequest struct{}
func (e ExpiredPresignedRequest) Error() string {
return "Presigned request already expired"
}
// DoesNotMatch invalid signature
type DoesNotMatch struct {
SignatureSent string
SignatureCalculated string
}
func (e DoesNotMatch) Error() string {
return "The request signature we calculated does not match the signature you provided"
}

View File

@@ -0,0 +1,141 @@
package signature
import (
"encoding/json"
"fmt"
"reflect"
"time"
"github.com/minio/minio/pkg/probe"
)
// toString - Safely convert interface to string without causing panic.
func toString(val interface{}) string {
switch v := val.(type) {
case string:
return v
}
return ""
}
// toInteger _ Safely convert interface to integer without causing panic.
func toInteger(val interface{}) int {
switch v := val.(type) {
case int:
return v
}
return 0
}
// isString - Safely check if val is of type string without causing panic.
func isString(val interface{}) bool {
switch val.(type) {
case string:
return true
}
return false
}
// PostPolicyForm provides strict static type conversion and validation for Amazon S3's POST policy JSON string.
type PostPolicyForm struct {
Expiration time.Time // Expiration date and time of the POST policy.
Conditions struct { // Conditional policy structure.
Policies map[string]struct {
Operator string
Value string
}
ContentLengthRange struct {
Min int
Max int
}
}
}
// ParsePostPolicyForm - Parse JSON policy string into typed POostPolicyForm structure.
func ParsePostPolicyForm(policy string) (PostPolicyForm, *probe.Error) {
// Convert po into interfaces and
// perform strict type conversion using reflection.
var rawPolicy struct {
Expiration string `json:"expiration"`
Conditions []interface{} `json:"conditions"`
}
e := json.Unmarshal([]byte(policy), &rawPolicy)
if e != nil {
return PostPolicyForm{}, probe.NewError(e)
}
parsedPolicy := PostPolicyForm{}
// Parse expiry time.
parsedPolicy.Expiration, e = time.Parse(time.RFC3339Nano, rawPolicy.Expiration)
if e != nil {
return PostPolicyForm{}, probe.NewError(e)
}
parsedPolicy.Conditions.Policies = make(map[string]struct {
Operator string
Value string
})
// Parse conditions.
for _, val := range rawPolicy.Conditions {
switch condt := val.(type) {
case map[string]interface{}: // Handle key:value map types.
for k, v := range condt {
if !isString(v) { // Pre-check value type.
// All values must be of type string.
return parsedPolicy, probe.NewError(fmt.Errorf("Unknown type ‘%s’ of conditional field value ‘%s’ found in POST policy form.",
reflect.TypeOf(condt).String(), condt))
}
// {"acl": "public-read" } is an alternate way to indicate - [ "eq", "$acl", "public-read" ]
// In this case we will just collapse this into "eq" for all use cases.
parsedPolicy.Conditions.Policies["$"+k] = struct {
Operator string
Value string
}{
Operator: "eq",
Value: toString(v),
}
}
case []interface{}: // Handle array types.
if len(condt) != 3 { // Return error if we have insufficient elements.
return parsedPolicy, probe.NewError(fmt.Errorf("Malformed conditional fields ‘%s’ of type ‘%s’ found in POST policy form.",
condt, reflect.TypeOf(condt).String()))
}
switch toString(condt[0]) {
case "eq", "starts-with":
for _, v := range condt { // Pre-check all values for type.
if !isString(v) {
// All values must be of type string.
return parsedPolicy, probe.NewError(fmt.Errorf("Unknown type ‘%s’ of conditional field value ‘%s’ found in POST policy form.",
reflect.TypeOf(condt).String(), condt))
}
}
operator, matchType, value := toString(condt[0]), toString(condt[1]), toString(condt[2])
parsedPolicy.Conditions.Policies[matchType] = struct {
Operator string
Value string
}{
Operator: operator,
Value: value,
}
case "content-length-range":
parsedPolicy.Conditions.ContentLengthRange = struct {
Min int
Max int
}{
Min: toInteger(condt[1]),
Max: toInteger(condt[2]),
}
default:
// Condition should be valid.
return parsedPolicy, probe.NewError(fmt.Errorf("Unknown type ‘%s’ of conditional field value ‘%s’ found in POST policy form.",
reflect.TypeOf(condt).String(), condt))
}
default:
return parsedPolicy, probe.NewError(fmt.Errorf("Unknown field ‘%s’ of type ‘%s’ found in POST policy form.",
condt, reflect.TypeOf(condt).String()))
}
}
return parsedPolicy, nil
}

View File

@@ -14,7 +14,7 @@
* limitations under the License.
*/
package donut
package signature
import (
"bytes"
@@ -38,7 +38,7 @@ type Signature struct {
AccessKeyID string
SecretAccessKey string
Presigned bool
PresignedPolicy bool
PresignedPolicy []byte
SignedHeaders []string
Signature string
Request *http.Request
@@ -57,18 +57,18 @@ func sumHMAC(key []byte, data []byte) []byte {
return hash.Sum(nil)
}
// urlEncodedName encode the strings from UTF-8 byte representations to HTML hex escape sequences
// getURLEncodedName encode the strings from UTF-8 byte representations to HTML hex escape sequences
//
// This is necessary since regular url.Parse() and url.Encode() functions do not support UTF-8
// non english characters cannot be parsed due to the nature in which url.Encode() is written
//
// This function on the other hand is a direct replacement for url.Encode() technique to support
// pretty much every UTF-8 character.
func urlEncodeName(name string) (string, *probe.Error) {
func getURLEncodedName(name string) string {
// if object matches reserved string, no need to encode them
reservedNames := regexp.MustCompile("^[a-zA-Z0-9-_.~/]+$")
if reservedNames.MatchString(name) {
return name, nil
return name
}
var encodedName string
for _, s := range name {
@@ -83,7 +83,7 @@ func urlEncodeName(name string) (string, *probe.Error) {
default:
len := utf8.RuneLen(s)
if len < 0 {
return "", probe.NewError(InvalidArgument{})
return name
}
u := make([]byte, len)
utf8.EncodeRune(u, s)
@@ -93,7 +93,7 @@ func urlEncodeName(name string) (string, *probe.Error) {
}
}
}
return encodedName, nil
return encodedName
}
// getCanonicalHeaders generate a list of request headers with their values
@@ -166,7 +166,7 @@ func (r Signature) extractSignedHeaders() map[string][]string {
func (r *Signature) getCanonicalRequest() string {
payload := r.Request.Header.Get(http.CanonicalHeaderKey("x-amz-content-sha256"))
r.Request.URL.RawQuery = strings.Replace(r.Request.URL.Query().Encode(), "+", "%20", -1)
encodedPath, _ := urlEncodeName(r.Request.URL.Path)
encodedPath := getURLEncodedName(r.Request.URL.Path)
// convert any space strings back to "+"
encodedPath = strings.Replace(encodedPath, "+", "%20", -1)
canonicalRequest := strings.Join([]string{
@@ -192,7 +192,7 @@ func (r *Signature) getCanonicalRequest() string {
//
func (r *Signature) getPresignedCanonicalRequest(presignedQuery string) string {
rawQuery := strings.Replace(presignedQuery, "+", "%20", -1)
encodedPath, _ := urlEncodeName(r.Request.URL.Path)
encodedPath := getURLEncodedName(r.Request.URL.Path)
// convert any space strings back to "+"
encodedPath = strings.Replace(encodedPath, "+", "%20", -1)
canonicalRequest := strings.Join([]string{
@@ -243,8 +243,17 @@ func (r *Signature) getSignature(signingKey []byte, stringToSign string) string
// DoesPolicySignatureMatch - Verify query headers with post policy
// - http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-HTTPPOSTConstructPolicy.html
// returns true if matches, false otherwise. if error is not nil then it is always false
func (r *Signature) DoesPolicySignatureMatch() (bool, *probe.Error) {
// FIXME: Implement this
func (r *Signature) DoesPolicySignatureMatch(date string) (bool, *probe.Error) {
t, err := time.Parse(iso8601Format, date)
if err != nil {
return false, probe.NewError(err)
}
signingKey := r.getSigningKey(t)
stringToSign := string(r.PresignedPolicy)
newSignature := r.getSignature(signingKey, stringToSign)
if newSignature != r.Signature {
return false, nil
}
return true, nil
}