Implement gateway S3 support (#3940)

This commit is contained in:
Remco Verhoef
2017-04-27 20:26:00 +02:00
committed by Harshavardhana
parent 57c5c75611
commit 3a539ce660
61 changed files with 9607 additions and 199 deletions

284
vendor/github.com/minio/minio-go/pkg/encrypt/cbc.go generated vendored Normal file
View File

@@ -0,0 +1,284 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package encrypt
import (
"bytes"
"crypto/aes"
"crypto/cipher"
"crypto/rand"
"encoding/base64"
"errors"
"io"
)
// Crypt mode - encryption or decryption
type cryptMode int
const (
encryptMode cryptMode = iota
decryptMode
)
// CBCSecureMaterials encrypts/decrypts data using AES CBC algorithm
type CBCSecureMaterials struct {
// Data stream to encrypt/decrypt
stream io.Reader
// Last internal error
err error
// End of file reached
eof bool
// Holds initial data
srcBuf *bytes.Buffer
// Holds transformed data (encrypted or decrypted)
dstBuf *bytes.Buffer
// Encryption algorithm
encryptionKey Key
// Key to encrypts/decrypts data
contentKey []byte
// Encrypted form of contentKey
cryptedKey []byte
// Initialization vector
iv []byte
// matDesc - currently unused
matDesc []byte
// Indicate if we are going to encrypt or decrypt
cryptMode cryptMode
// Helper that encrypts/decrypts data
blockMode cipher.BlockMode
}
// NewCBCSecureMaterials builds new CBC crypter module with
// the specified encryption key (symmetric or asymmetric)
func NewCBCSecureMaterials(key Key) (*CBCSecureMaterials, error) {
if key == nil {
return nil, errors.New("Unable to recognize empty encryption properties")
}
return &CBCSecureMaterials{
srcBuf: bytes.NewBuffer([]byte{}),
dstBuf: bytes.NewBuffer([]byte{}),
encryptionKey: key,
matDesc: []byte("{}"),
}, nil
}
// SetupEncryptMode - tells CBC that we are going to encrypt data
func (s *CBCSecureMaterials) SetupEncryptMode(stream io.Reader) error {
// Set mode to encrypt
s.cryptMode = encryptMode
// Set underlying reader
s.stream = stream
s.eof = false
s.srcBuf.Reset()
s.dstBuf.Reset()
var err error
// Generate random content key
s.contentKey = make([]byte, aes.BlockSize*2)
if _, err := rand.Read(s.contentKey); err != nil {
return err
}
// Encrypt content key
s.cryptedKey, err = s.encryptionKey.Encrypt(s.contentKey)
if err != nil {
return err
}
// Generate random IV
s.iv = make([]byte, aes.BlockSize)
if _, err = rand.Read(s.iv); err != nil {
return err
}
// New cipher
encryptContentBlock, err := aes.NewCipher(s.contentKey)
if err != nil {
return err
}
s.blockMode = cipher.NewCBCEncrypter(encryptContentBlock, s.iv)
return nil
}
// SetupDecryptMode - tells CBC that we are going to decrypt data
func (s *CBCSecureMaterials) SetupDecryptMode(stream io.Reader, iv string, key string) error {
// Set mode to decrypt
s.cryptMode = decryptMode
// Set underlying reader
s.stream = stream
// Reset
s.eof = false
s.srcBuf.Reset()
s.dstBuf.Reset()
var err error
// Get IV
s.iv, err = base64.StdEncoding.DecodeString(iv)
if err != nil {
return err
}
// Get encrypted content key
s.cryptedKey, err = base64.StdEncoding.DecodeString(key)
if err != nil {
return err
}
// Decrypt content key
s.contentKey, err = s.encryptionKey.Decrypt(s.cryptedKey)
if err != nil {
return err
}
// New cipher
decryptContentBlock, err := aes.NewCipher(s.contentKey)
if err != nil {
return err
}
s.blockMode = cipher.NewCBCDecrypter(decryptContentBlock, s.iv)
return nil
}
// GetIV - return randomly generated IV (per S3 object), base64 encoded.
func (s *CBCSecureMaterials) GetIV() string {
return base64.StdEncoding.EncodeToString(s.iv)
}
// GetKey - return content encrypting key (cek) in encrypted form, base64 encoded.
func (s *CBCSecureMaterials) GetKey() string {
return base64.StdEncoding.EncodeToString(s.cryptedKey)
}
// GetDesc - user provided encryption material description in JSON (UTF8) format.
func (s *CBCSecureMaterials) GetDesc() string {
return string(s.matDesc)
}
// Fill buf with encrypted/decrypted data
func (s *CBCSecureMaterials) Read(buf []byte) (n int, err error) {
// Always fill buf from bufChunk at the end of this function
defer func() {
if s.err != nil {
n, err = 0, s.err
} else {
n, err = s.dstBuf.Read(buf)
}
}()
// Return
if s.eof {
return
}
// Fill dest buffer if its length is less than buf
for !s.eof && s.dstBuf.Len() < len(buf) {
srcPart := make([]byte, aes.BlockSize)
dstPart := make([]byte, aes.BlockSize)
// Fill src buffer
for s.srcBuf.Len() < aes.BlockSize*2 {
_, err = io.CopyN(s.srcBuf, s.stream, aes.BlockSize)
if err != nil {
break
}
}
// Quit immediately for errors other than io.EOF
if err != nil && err != io.EOF {
s.err = err
return
}
// Mark current encrypting/decrypting as finished
s.eof = (err == io.EOF)
if s.eof && s.cryptMode == encryptMode {
if srcPart, err = pkcs5Pad(s.srcBuf.Bytes(), aes.BlockSize); err != nil {
s.err = err
return
}
} else {
_, _ = s.srcBuf.Read(srcPart)
}
// Crypt srcPart content
for len(srcPart) > 0 {
// Crypt current part
s.blockMode.CryptBlocks(dstPart, srcPart[:aes.BlockSize])
// Unpad when this is the last part and we are decrypting
if s.eof && s.cryptMode == decryptMode {
dstPart, err = pkcs5Unpad(dstPart, aes.BlockSize)
if err != nil {
s.err = err
return
}
}
// Send crypted data to dstBuf
if _, wErr := s.dstBuf.Write(dstPart); wErr != nil {
s.err = wErr
return
}
// Move to the next part
srcPart = srcPart[aes.BlockSize:]
}
}
return
}
// Unpad a set of bytes following PKCS5 algorithm
func pkcs5Unpad(buf []byte, blockSize int) ([]byte, error) {
len := len(buf)
if len == 0 {
return nil, errors.New("buffer is empty")
}
pad := int(buf[len-1])
if pad > len || pad > blockSize {
return nil, errors.New("invalid padding size")
}
return buf[:len-pad], nil
}
// Pad a set of bytes following PKCS5 algorithm
func pkcs5Pad(buf []byte, blockSize int) ([]byte, error) {
len := len(buf)
pad := blockSize - (len % blockSize)
padText := bytes.Repeat([]byte{byte(pad)}, pad)
return append(buf, padText...), nil
}

View File

@@ -0,0 +1,50 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// Package encrypt implements a generic interface to encrypt any stream of data.
// currently this package implements two types of encryption
// - Symmetric encryption using AES.
// - Asymmetric encrytion using RSA.
package encrypt
import "io"
// Materials - provides generic interface to encrypt any stream of data.
type Materials interface {
// Returns encrypted/decrypted data, io.Reader compatible.
Read(b []byte) (int, error)
// Get randomly generated IV, base64 encoded.
GetIV() (iv string)
// Get content encrypting key (cek) in encrypted form, base64 encoded.
GetKey() (key string)
// Get user provided encryption material description in
// JSON (UTF8) format. This is not used, kept for future.
GetDesc() (desc string)
// Setup encrypt mode, further calls of Read() function
// will return the encrypted form of data streamed
// by the passed reader
SetupEncryptMode(stream io.Reader) error
// Setup decrypted mode, further calls of Read() function
// will return the decrypted form of data streamed
// by the passed reader
SetupDecryptMode(stream io.Reader, iv string, key string) error
}

165
vendor/github.com/minio/minio-go/pkg/encrypt/keys.go generated vendored Normal file
View File

@@ -0,0 +1,165 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package encrypt
import (
"crypto/aes"
"crypto/rand"
"crypto/rsa"
"crypto/x509"
"errors"
)
// Key - generic interface to encrypt/decrypt a key.
// We use it to encrypt/decrypt content key which is the key
// that encrypt/decrypt object data.
type Key interface {
// Encrypt data using to the set encryption key
Encrypt([]byte) ([]byte, error)
// Decrypt data using to the set encryption key
Decrypt([]byte) ([]byte, error)
}
// SymmetricKey - encrypts data with a symmetric master key
type SymmetricKey struct {
masterKey []byte
}
// Encrypt passed bytes
func (s *SymmetricKey) Encrypt(plain []byte) ([]byte, error) {
// Initialize an AES encryptor using a master key
keyBlock, err := aes.NewCipher(s.masterKey)
if err != nil {
return []byte{}, err
}
// Pad the key before encryption
plain, _ = pkcs5Pad(plain, aes.BlockSize)
encKey := []byte{}
encPart := make([]byte, aes.BlockSize)
// Encrypt the passed key by block
for {
if len(plain) < aes.BlockSize {
break
}
// Encrypt the passed key
keyBlock.Encrypt(encPart, plain[:aes.BlockSize])
// Add the encrypted block to the total encrypted key
encKey = append(encKey, encPart...)
// Pass to the next plain block
plain = plain[aes.BlockSize:]
}
return encKey, nil
}
// Decrypt passed bytes
func (s *SymmetricKey) Decrypt(cipher []byte) ([]byte, error) {
// Initialize AES decrypter
keyBlock, err := aes.NewCipher(s.masterKey)
if err != nil {
return nil, err
}
var plain []byte
plainPart := make([]byte, aes.BlockSize)
// Decrypt the encrypted data block by block
for {
if len(cipher) < aes.BlockSize {
break
}
keyBlock.Decrypt(plainPart, cipher[:aes.BlockSize])
// Add the decrypted block to the total result
plain = append(plain, plainPart...)
// Pass to the next cipher block
cipher = cipher[aes.BlockSize:]
}
// Unpad the resulted plain data
plain, err = pkcs5Unpad(plain, aes.BlockSize)
if err != nil {
return nil, err
}
return plain, nil
}
// NewSymmetricKey generates a new encrypt/decrypt crypto using
// an AES master key password
func NewSymmetricKey(b []byte) *SymmetricKey {
return &SymmetricKey{masterKey: b}
}
// AsymmetricKey - struct which encrypts/decrypts data
// using RSA public/private certificates
type AsymmetricKey struct {
publicKey *rsa.PublicKey
privateKey *rsa.PrivateKey
}
// Encrypt data using public key
func (a *AsymmetricKey) Encrypt(plain []byte) ([]byte, error) {
cipher, err := rsa.EncryptPKCS1v15(rand.Reader, a.publicKey, plain)
if err != nil {
return nil, err
}
return cipher, nil
}
// Decrypt data using public key
func (a *AsymmetricKey) Decrypt(cipher []byte) ([]byte, error) {
cipher, err := rsa.DecryptPKCS1v15(rand.Reader, a.privateKey, cipher)
if err != nil {
return nil, err
}
return cipher, nil
}
// NewAsymmetricKey - generates a crypto module able to encrypt/decrypt
// data using a pair for private and public key
func NewAsymmetricKey(privData []byte, pubData []byte) (*AsymmetricKey, error) {
// Parse private key from passed data
priv, err := x509.ParsePKCS8PrivateKey(privData)
if err != nil {
return nil, err
}
privKey, ok := priv.(*rsa.PrivateKey)
if !ok {
return nil, errors.New("not a valid private key")
}
// Parse public key from passed data
pub, err := x509.ParsePKIXPublicKey(pubData)
if err != nil {
return nil, err
}
pubKey, ok := pub.(*rsa.PublicKey)
if !ok {
return nil, errors.New("not a valid public key")
}
// Associate the private key with the passed public key
privKey.PublicKey = *pubKey
return &AsymmetricKey{
publicKey: pubKey,
privateKey: privKey,
}, nil
}

View File

@@ -34,7 +34,7 @@ const (
BucketPolicyWriteOnly = "writeonly"
)
// isValidBucketPolicy - Is provided policy value supported.
// IsValidBucketPolicy - returns true if policy is valid and supported, false otherwise.
func (p BucketPolicy) IsValidBucketPolicy() bool {
switch p {
case BucketPolicyNone, BucketPolicyReadOnly, BucketPolicyReadWrite, BucketPolicyWriteOnly:
@@ -508,7 +508,7 @@ func getObjectPolicy(statement Statement) (readOnly bool, writeOnly bool) {
return readOnly, writeOnly
}
// Returns policy of given bucket name, prefix in given statements.
// GetPolicy - Returns policy of given bucket name, prefix in given statements.
func GetPolicy(statements []Statement, bucketName string, prefix string) BucketPolicy {
bucketResource := awsResourcePrefix + bucketName
objectResource := awsResourcePrefix + bucketName + "/" + prefix + "*"
@@ -563,7 +563,7 @@ func GetPolicy(statements []Statement, bucketName string, prefix string) BucketP
return policy
}
// GetPolicies returns a map of policies rules of given bucket name, prefix in given statements.
// GetPolicies - returns a map of policies rules of given bucket name, prefix in given statements.
func GetPolicies(statements []Statement, bucketName string) map[string]BucketPolicy {
policyRules := map[string]BucketPolicy{}
objResources := set.NewStringSet()
@@ -590,8 +590,7 @@ func GetPolicies(statements []Statement, bucketName string) map[string]BucketPol
return policyRules
}
// Returns new statements containing policy of given bucket name and
// prefix are appended.
// SetPolicy - Returns new statements containing policy of given bucket name and prefix are appended.
func SetPolicy(statements []Statement, policy BucketPolicy, bucketName string, prefix string) []Statement {
out := removeStatements(statements, bucketName, prefix)
// fmt.Println("out = ")

View File

@@ -0,0 +1,285 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package s3signer
import (
"bytes"
"encoding/hex"
"fmt"
"io"
"net/http"
"strconv"
"strings"
"time"
)
// Reference for constants used below -
// http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html#example-signature-calculations-streaming
const (
streamingSignAlgorithm = "STREAMING-AWS4-HMAC-SHA256-PAYLOAD"
streamingEncoding = "aws-chunked"
streamingPayloadHdr = "AWS4-HMAC-SHA256-PAYLOAD"
emptySHA256 = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
payloadChunkSize = 64 * 1024
chunkSigConstLen = 17 // ";chunk-signature="
signatureStrLen = 64 // e.g. "f2ca1bb6c7e907d06dafe4687e579fce76b37e4e93b7605022da52e6ccc26fd2"
crlfLen = 2 // CRLF
)
// Request headers to be ignored while calculating seed signature for
// a request.
var ignoredStreamingHeaders = map[string]bool{
"Authorization": true,
"User-Agent": true,
"Content-Type": true,
}
// getSignedChunkLength - calculates the length of chunk metadata
func getSignedChunkLength(chunkDataSize int64) int64 {
return int64(len(fmt.Sprintf("%x", chunkDataSize))) +
chunkSigConstLen +
signatureStrLen +
crlfLen +
chunkDataSize +
crlfLen
}
// getStreamLength - calculates the length of the overall stream (data + metadata)
func getStreamLength(dataLen, chunkSize int64) int64 {
if dataLen <= 0 {
return 0
}
chunksCount := int64(dataLen / chunkSize)
remainingBytes := int64(dataLen % chunkSize)
streamLen := int64(0)
streamLen += chunksCount * getSignedChunkLength(chunkSize)
if remainingBytes > 0 {
streamLen += getSignedChunkLength(remainingBytes)
}
streamLen += getSignedChunkLength(0)
return streamLen
}
// buildChunkStringToSign - returns the string to sign given chunk data
// and previous signature.
func buildChunkStringToSign(t time.Time, region, previousSig string, chunkData []byte) string {
stringToSignParts := []string{
streamingPayloadHdr,
t.Format(iso8601DateFormat),
getScope(region, t),
previousSig,
emptySHA256,
hex.EncodeToString(sum256(chunkData)),
}
return strings.Join(stringToSignParts, "\n")
}
// prepareStreamingRequest - prepares a request with appropriate
// headers before computing the seed signature.
func prepareStreamingRequest(req *http.Request, dataLen int64, timestamp time.Time) {
// Set x-amz-content-sha256 header.
req.Header.Set("X-Amz-Content-Sha256", streamingSignAlgorithm)
req.Header.Set("Content-Encoding", streamingEncoding)
req.Header.Set("X-Amz-Date", timestamp.Format(iso8601DateFormat))
// Set content length with streaming signature for each chunk included.
req.ContentLength = getStreamLength(dataLen, int64(payloadChunkSize))
req.Header.Set("x-amz-decoded-content-length", strconv.FormatInt(dataLen, 10))
}
// buildChunkHeader - returns the chunk header.
// e.g string(IntHexBase(chunk-size)) + ";chunk-signature=" + signature + \r\n + chunk-data + \r\n
func buildChunkHeader(chunkLen int64, signature string) []byte {
return []byte(strconv.FormatInt(chunkLen, 16) + ";chunk-signature=" + signature + "\r\n")
}
// buildChunkSignature - returns chunk signature for a given chunk and previous signature.
func buildChunkSignature(chunkData []byte, reqTime time.Time, region,
previousSignature, secretAccessKey string) string {
chunkStringToSign := buildChunkStringToSign(reqTime, region,
previousSignature, chunkData)
signingKey := getSigningKey(secretAccessKey, region, reqTime)
return getSignature(signingKey, chunkStringToSign)
}
// getSeedSignature - returns the seed signature for a given request.
func (s *StreamingReader) setSeedSignature(req *http.Request) {
// Get canonical request
canonicalRequest := getCanonicalRequest(*req, ignoredStreamingHeaders)
// Get string to sign from canonical request.
stringToSign := getStringToSignV4(s.reqTime, s.region, canonicalRequest)
signingKey := getSigningKey(s.secretAccessKey, s.region, s.reqTime)
// Calculate signature.
s.seedSignature = getSignature(signingKey, stringToSign)
}
// StreamingReader implements chunked upload signature as a reader on
// top of req.Body's ReaderCloser chunk header;data;... repeat
type StreamingReader struct {
accessKeyID string
secretAccessKey string
region string
prevSignature string
seedSignature string
contentLen int64 // Content-Length from req header
baseReadCloser io.ReadCloser // underlying io.Reader
bytesRead int64 // bytes read from underlying io.Reader
buf bytes.Buffer // holds signed chunk
chunkBuf []byte // holds raw data read from req Body
chunkBufLen int // no. of bytes read so far into chunkBuf
done bool // done reading the underlying reader to EOF
reqTime time.Time
chunkNum int
totalChunks int
lastChunkSize int
}
// signChunk - signs a chunk read from s.baseReader of chunkLen size.
func (s *StreamingReader) signChunk(chunkLen int) {
// Compute chunk signature for next header
signature := buildChunkSignature(s.chunkBuf[:chunkLen], s.reqTime,
s.region, s.prevSignature, s.secretAccessKey)
// For next chunk signature computation
s.prevSignature = signature
// Write chunk header into streaming buffer
chunkHdr := buildChunkHeader(int64(chunkLen), signature)
s.buf.Write(chunkHdr)
// Write chunk data into streaming buffer
s.buf.Write(s.chunkBuf[:chunkLen])
// Write the chunk trailer.
s.buf.Write([]byte("\r\n"))
// Reset chunkBufLen for next chunk read.
s.chunkBufLen = 0
s.chunkNum++
}
// setStreamingAuthHeader - builds and sets authorization header value
// for streaming signature.
func (s *StreamingReader) setStreamingAuthHeader(req *http.Request) {
credential := GetCredential(s.accessKeyID, s.region, s.reqTime)
authParts := []string{
signV4Algorithm + " Credential=" + credential,
"SignedHeaders=" + getSignedHeaders(*req, ignoredStreamingHeaders),
"Signature=" + s.seedSignature,
}
// Set authorization header.
auth := strings.Join(authParts, ",")
req.Header.Set("Authorization", auth)
}
// StreamingSignV4 - provides chunked upload signatureV4 support by
// implementing io.Reader.
func StreamingSignV4(req *http.Request, accessKeyID, secretAccessKey,
region string, dataLen int64, reqTime time.Time) *http.Request {
// Set headers needed for streaming signature.
prepareStreamingRequest(req, dataLen, reqTime)
stReader := &StreamingReader{
baseReadCloser: req.Body,
accessKeyID: accessKeyID,
secretAccessKey: secretAccessKey,
region: region,
reqTime: reqTime,
chunkBuf: make([]byte, payloadChunkSize),
contentLen: dataLen,
chunkNum: 1,
totalChunks: int((dataLen+payloadChunkSize-1)/payloadChunkSize) + 1,
lastChunkSize: int(dataLen % payloadChunkSize),
}
// Add the request headers required for chunk upload signing.
// Compute the seed signature.
stReader.setSeedSignature(req)
// Set the authorization header with the seed signature.
stReader.setStreamingAuthHeader(req)
// Set seed signature as prevSignature for subsequent
// streaming signing process.
stReader.prevSignature = stReader.seedSignature
req.Body = stReader
return req
}
// Read - this method performs chunk upload signature providing a
// io.Reader interface.
func (s *StreamingReader) Read(buf []byte) (int, error) {
switch {
// After the last chunk is read from underlying reader, we
// never re-fill s.buf.
case s.done:
// s.buf will be (re-)filled with next chunk when has lesser
// bytes than asked for.
case s.buf.Len() < len(buf):
s.chunkBufLen = 0
for {
n1, err := s.baseReadCloser.Read(s.chunkBuf[s.chunkBufLen:])
if err == nil || err == io.ErrUnexpectedEOF {
s.chunkBufLen += n1
s.bytesRead += int64(n1)
if s.chunkBufLen == payloadChunkSize ||
(s.chunkNum == s.totalChunks-1 &&
s.chunkBufLen == s.lastChunkSize) {
// Sign the chunk and write it to s.buf.
s.signChunk(s.chunkBufLen)
break
}
} else if err == io.EOF {
// No more data left in baseReader - last chunk.
// Done reading the last chunk from baseReader.
s.done = true
// bytes read from baseReader different than
// content length provided.
if s.bytesRead != s.contentLen {
return 0, io.ErrUnexpectedEOF
}
// Sign the chunk and write it to s.buf.
s.signChunk(0)
break
} else {
return 0, err
}
}
}
return s.buf.Read(buf)
}
// Close - this method makes underlying io.ReadCloser's Close method available.
func (s *StreamingReader) Close() error {
return s.baseReadCloser.Close()
}

View File

@@ -29,6 +29,8 @@ import (
"strconv"
"strings"
"time"
"github.com/minio/minio-go/pkg/s3utils"
)
// Signature and API related constants.
@@ -45,16 +47,16 @@ func encodeURL2Path(u *url.URL) (path string) {
bucketName := hostSplits[0]
path = "/" + bucketName
path += u.Path
path = urlEncodePath(path)
path = s3utils.EncodePath(path)
return
}
if strings.HasSuffix(u.Host, ".storage.googleapis.com") {
path = "/" + strings.TrimSuffix(u.Host, ".storage.googleapis.com")
path += u.Path
path = urlEncodePath(path)
path = s3utils.EncodePath(path)
return
}
path = urlEncodePath(u.Path)
path = s3utils.EncodePath(u.Path)
return
}
@@ -95,10 +97,10 @@ func PreSignV2(req http.Request, accessKeyID, secretAccessKey string, expires in
query.Set("Expires", strconv.FormatInt(epochExpires, 10))
// Encode query and save.
req.URL.RawQuery = queryEncode(query)
req.URL.RawQuery = s3utils.QueryEncode(query)
// Save signature finally.
req.URL.RawQuery += "&Signature=" + urlEncodePath(signature)
req.URL.RawQuery += "&Signature=" + s3utils.EncodePath(signature)
// Return.
return &req
@@ -287,7 +289,7 @@ func writeCanonicalizedResource(buf *bytes.Buffer, req http.Request, isPreSign b
// Get encoded URL path.
if len(requestURL.Query()) > 0 {
// Keep the usual queries unescaped for string to sign.
query, _ := url.QueryUnescape(queryEncode(requestURL.Query()))
query, _ := url.QueryUnescape(s3utils.QueryEncode(requestURL.Query()))
path = path + "?" + query
}
buf.WriteString(path)
@@ -314,7 +316,7 @@ func writeCanonicalizedResource(buf *bytes.Buffer, req http.Request, isPreSign b
// Request parameters
if len(vv[0]) > 0 {
buf.WriteByte('=')
buf.WriteString(strings.Replace(url.QueryEscape(vv[0]), "+", "%20", -1))
buf.WriteString(vv[0])
}
}
}

View File

@@ -24,6 +24,8 @@ import (
"strconv"
"strings"
"time"
"github.com/minio/minio-go/pkg/s3utils"
)
// Signature and API related constants.
@@ -68,7 +70,7 @@ const (
///
/// Is skipped for obvious reasons
///
var ignoredHeaders = map[string]bool{
var v4IgnoredHeaders = map[string]bool{
"Authorization": true,
"Content-Type": true,
"Content-Length": true,
@@ -120,7 +122,7 @@ func getHashedPayload(req http.Request) string {
// getCanonicalHeaders generate a list of request headers for
// signature.
func getCanonicalHeaders(req http.Request) string {
func getCanonicalHeaders(req http.Request, ignoredHeaders map[string]bool) string {
var headers []string
vals := make(map[string][]string)
for k, vv := range req.Header {
@@ -159,7 +161,7 @@ func getCanonicalHeaders(req http.Request) string {
// getSignedHeaders generate all signed request headers.
// i.e lexically sorted, semicolon-separated list of lowercase
// request header names.
func getSignedHeaders(req http.Request) string {
func getSignedHeaders(req http.Request, ignoredHeaders map[string]bool) string {
var headers []string
for k := range req.Header {
if _, ok := ignoredHeaders[http.CanonicalHeaderKey(k)]; ok {
@@ -181,14 +183,14 @@ func getSignedHeaders(req http.Request) string {
// <CanonicalHeaders>\n
// <SignedHeaders>\n
// <HashedPayload>
func getCanonicalRequest(req http.Request) string {
func getCanonicalRequest(req http.Request, ignoredHeaders map[string]bool) string {
req.URL.RawQuery = strings.Replace(req.URL.Query().Encode(), "+", "%20", -1)
canonicalRequest := strings.Join([]string{
req.Method,
urlEncodePath(req.URL.Path),
s3utils.EncodePath(req.URL.Path),
req.URL.RawQuery,
getCanonicalHeaders(req),
getSignedHeaders(req),
getCanonicalHeaders(req, ignoredHeaders),
getSignedHeaders(req, ignoredHeaders),
getHashedPayload(req),
}, "\n")
return canonicalRequest
@@ -217,7 +219,7 @@ func PreSignV4(req http.Request, accessKeyID, secretAccessKey, location string,
credential := GetCredential(accessKeyID, location, t)
// Get all signed headers.
signedHeaders := getSignedHeaders(req)
signedHeaders := getSignedHeaders(req, v4IgnoredHeaders)
// Set URL query.
query := req.URL.Query()
@@ -229,7 +231,7 @@ func PreSignV4(req http.Request, accessKeyID, secretAccessKey, location string,
req.URL.RawQuery = query.Encode()
// Get canonical request.
canonicalRequest := getCanonicalRequest(req)
canonicalRequest := getCanonicalRequest(req, v4IgnoredHeaders)
// Get string to sign from canonical request.
stringToSign := getStringToSignV4(t, location, canonicalRequest)
@@ -271,7 +273,7 @@ func SignV4(req http.Request, accessKeyID, secretAccessKey, location string) *ht
req.Header.Set("X-Amz-Date", t.Format(iso8601DateFormat))
// Get canonical request.
canonicalRequest := getCanonicalRequest(req)
canonicalRequest := getCanonicalRequest(req, v4IgnoredHeaders)
// Get string to sign from canonical request.
stringToSign := getStringToSignV4(t, location, canonicalRequest)
@@ -283,7 +285,7 @@ func SignV4(req http.Request, accessKeyID, secretAccessKey, location string) *ht
credential := GetCredential(accessKeyID, location, t)
// Get all signed headers.
signedHeaders := getSignedHeaders(req)
signedHeaders := getSignedHeaders(req, v4IgnoredHeaders)
// Calculate signature.
signature := getSignature(signingKey, stringToSign)

View File

@@ -17,15 +17,8 @@
package s3signer
import (
"bytes"
"crypto/hmac"
"crypto/sha256"
"encoding/hex"
"net/url"
"regexp"
"sort"
"strings"
"unicode/utf8"
)
// unsignedPayload - value to be set to X-Amz-Content-Sha256 header when
@@ -44,75 +37,3 @@ func sumHMAC(key []byte, data []byte) []byte {
hash.Write(data)
return hash.Sum(nil)
}
//expects ascii encoded strings - from output of urlEncodePath
func percentEncodeSlash(s string) string {
return strings.Replace(s, "/", "%2F", -1)
}
// queryEncode - encodes query values in their URL encoded form. In
// addition to the percent encoding performed by urlEncodePath() used
// here, it also percent encodes '/' (forward slash)
func queryEncode(v url.Values) string {
if v == nil {
return ""
}
var buf bytes.Buffer
keys := make([]string, 0, len(v))
for k := range v {
keys = append(keys, k)
}
sort.Strings(keys)
for _, k := range keys {
vs := v[k]
prefix := percentEncodeSlash(urlEncodePath(k)) + "="
for _, v := range vs {
if buf.Len() > 0 {
buf.WriteByte('&')
}
buf.WriteString(prefix)
buf.WriteString(percentEncodeSlash(urlEncodePath(v)))
}
}
return buf.String()
}
// urlEncodePath encode the strings from UTF-8 byte representations to HTML hex escape sequences
//
// This is necessary since regular url.Parse() and url.Encode() functions do not support UTF-8
// non english characters cannot be parsed due to the nature in which url.Encode() is written
//
// This function on the other hand is a direct replacement for url.Encode() technique to support
// pretty much every UTF-8 character.
func urlEncodePath(pathName string) string {
// if object matches reserved string, no need to encode them
reservedNames := regexp.MustCompile("^[a-zA-Z0-9-_.~/]+$")
if reservedNames.MatchString(pathName) {
return pathName
}
var encodedPathname string
for _, s := range pathName {
if 'A' <= s && s <= 'Z' || 'a' <= s && s <= 'z' || '0' <= s && s <= '9' { // §2.3 Unreserved characters (mark)
encodedPathname = encodedPathname + string(s)
continue
}
switch s {
case '-', '_', '.', '~', '/': // §2.3 Unreserved characters (mark)
encodedPathname = encodedPathname + string(s)
continue
default:
len := utf8.RuneLen(s)
if len < 0 {
// if utf8 cannot convert return the same string as is
return pathName
}
u := make([]byte, len)
utf8.EncodeRune(u, s)
for _, r := range u {
hex := hex.EncodeToString([]byte{r})
encodedPathname = encodedPathname + "%" + strings.ToUpper(hex)
}
}
}
return encodedPathname
}

View File

@@ -85,10 +85,6 @@ func IsAmazonEndpoint(endpointURL url.URL) bool {
return true
}
if IsAmazonS3AccelerateEndpoint(endpointURL) {
return true
}
return endpointURL.Host == "s3.amazonaws.com"
}
@@ -105,11 +101,6 @@ func IsAmazonChinaEndpoint(endpointURL url.URL) bool {
return endpointURL.Host == "s3.cn-north-1.amazonaws.com.cn"
}
// IsAmazonS3AccelerateEndpoint - Match if it is an Amazon S3 Accelerate
func IsAmazonS3AccelerateEndpoint(endpointURL url.URL) bool {
return strings.HasSuffix(endpointURL.Host, ".s3-accelerate.amazonaws.com")
}
// IsGoogleEndpoint - Match if it is exactly Google cloud storage endpoint.
func IsGoogleEndpoint(endpointURL url.URL) bool {
if endpointURL == sentinelURL {