Refactor streaming signatureV4 w/ state machine (#2862)

* Refactor streaming signatureV4 w/ state machine

- Used state machine to make transitions between reading chunk header,
  chunk data and trailer explicit.

* debug: add print/panic statements to gather more info on CI failure

* Persist lastChunk status between Read() on ChunkReader

... remove panic() which was added as interim aid for debugging.

* Add unit-tests to cover v4 streaming signature
This commit is contained in:
Krishnan Parthasarathi 2016-10-10 14:12:32 +05:30 committed by Harshavardhana
parent 3cfb23750a
commit 2d5e988a6d
5 changed files with 253 additions and 80 deletions

View File

@ -387,8 +387,9 @@ func (fs fsObjects) PutObject(bucket string, object string, size int64, data io.
var bytesWritten int64
bytesWritten, err = fsCreateFile(fs.storage, teeReader, buf, minioMetaBucket, tempObj)
if err != nil {
errorIf(err, "Failed to create object %s/%s", bucket, object)
fs.storage.DeleteFile(minioMetaBucket, tempObj)
return ObjectInfo{}, toObjectErr(traceError(err), bucket, object)
return ObjectInfo{}, toObjectErr(err, bucket, object)
}
// Should return IncompleteBody{} error when reader has fewer

View File

@ -215,6 +215,20 @@ func testAPIPutObjectStreamSigV4Handler(obj ObjectLayer, instanceType, bucketNam
objectName := "test-object"
bytesDataLen := 65 * 1024
bytesData := bytes.Repeat([]byte{'a'}, bytesDataLen)
oneKData := bytes.Repeat([]byte("a"), 1024)
err := initEventNotifier(obj)
if err != nil {
t.Fatalf("[%s] - Failed to initialize event notifiers <ERROR> %v", instanceType, err)
}
type streamFault int
const (
None streamFault = iota
malformedEncoding
unexpectedEOF
signatureMismatch
)
// byte data for PutObject.
// test cases with inputs and expected result for GetObject.
@ -232,6 +246,7 @@ func testAPIPutObjectStreamSigV4Handler(obj ObjectLayer, instanceType, bucketNam
secretKey string
shouldPass bool
removeAuthHeader bool
fault streamFault
}{
// Test case - 1.
// Fetching the entire object and validating its contents.
@ -304,6 +319,51 @@ func testAPIPutObjectStreamSigV4Handler(obj ObjectLayer, instanceType, bucketNam
secretKey: credentials.SecretAccessKey,
shouldPass: false,
},
// Test case - 6
// Chunk with malformed encoding.
{
bucketName: bucketName,
objectName: objectName,
data: oneKData,
dataLen: 1024,
chunkSize: 1024,
expectedContent: []byte{},
expectedRespStatus: http.StatusInternalServerError,
accessKey: credentials.AccessKeyID,
secretKey: credentials.SecretAccessKey,
shouldPass: false,
fault: malformedEncoding,
},
// Test case - 7
// Chunk with shorter than advertised chunk data.
{
bucketName: bucketName,
objectName: objectName,
data: oneKData,
dataLen: 1024,
chunkSize: 1024,
expectedContent: []byte{},
expectedRespStatus: http.StatusBadRequest,
accessKey: credentials.AccessKeyID,
secretKey: credentials.SecretAccessKey,
shouldPass: false,
fault: unexpectedEOF,
},
// Test case - 8
// Chunk with first chunk data byte tampered.
{
bucketName: bucketName,
objectName: objectName,
data: oneKData,
dataLen: 1024,
chunkSize: 1024,
expectedContent: []byte{},
expectedRespStatus: http.StatusForbidden,
accessKey: credentials.AccessKeyID,
secretKey: credentials.SecretAccessKey,
shouldPass: false,
fault: signatureMismatch,
},
}
// Iterating over the cases, fetching the object validating the response.
for i, testCase := range testCases {
@ -321,12 +381,21 @@ func testAPIPutObjectStreamSigV4Handler(obj ObjectLayer, instanceType, bucketNam
if testCase.removeAuthHeader {
req.Header.Del("Authorization")
}
switch testCase.fault {
case malformedEncoding:
req, err = malformChunkSizeSigV4(req, testCase.chunkSize-1)
case signatureMismatch:
req, err = malformDataSigV4(req, 'z')
case unexpectedEOF:
req, err = truncateChunkByHalfSigv4(req)
}
// Since `apiRouter` satisfies `http.Handler` it has a ServeHTTP to execute the logic of the handler.
// Call the ServeHTTP to execute the handler,`func (api objectAPIHandlers) GetObjectHandler` handles the request.
apiRouter.ServeHTTP(rec, req)
// Assert the response code with the expected status.
if rec.Code != testCase.expectedRespStatus {
t.Errorf("Test %d: Expected the response status to be `%d`, but instead found `%d`", i+1, testCase.expectedRespStatus, rec.Code)
t.Errorf("Test %d %s: Expected the response status to be `%d`, but instead found `%d`",
i+1, instanceType, testCase.expectedRespStatus, rec.Code)
}
// read the response body.
actualContent, err := ioutil.ReadAll(rec.Body)
@ -337,6 +406,7 @@ func testAPIPutObjectStreamSigV4Handler(obj ObjectLayer, instanceType, bucketNam
// Verify whether the bucket obtained object is same as the one created.
if !bytes.Equal(testCase.expectedContent, actualContent) {
t.Errorf("Test %d: %s: Object content differs from expected value.: %s", i+1, instanceType, string(actualContent))
continue
}
buffer := new(bytes.Buffer)

View File

@ -175,6 +175,7 @@ func newSignV4ChunkedReader(req *http.Request) (io.Reader, APIErrorCode) {
seedSignature: seedSignature,
seedDate: seedDate,
chunkSHA256Writer: sha256.New(),
state: readChunkHeader,
}, ErrNone
}
@ -184,7 +185,8 @@ type s3ChunkedReader struct {
reader *bufio.Reader
seedSignature string
seedDate time.Time
dataChunkRead bool
state chunkState
lastChunk bool
chunkSignature string
chunkSHA256Writer hash.Hash // Calculates sha256 of chunk data.
n uint64 // Unread bytes in chunk
@ -207,99 +209,125 @@ func (cr *s3ChunkedReader) readS3ChunkHeader() {
if cr.n == 0 {
cr.err = io.EOF
}
// is the data part already read?, set this to false.
cr.dataChunkRead = false
// Reset sha256 hasher for a fresh start.
cr.chunkSHA256Writer.Reset()
// Save the incoming chunk signature.
cr.chunkSignature = string(hexChunkSignature)
}
// Validate if the underlying buffer has chunk header.
func (cr *s3ChunkedReader) s3ChunkHeaderAvailable() bool {
n := cr.reader.Buffered()
if n > 0 {
// Peek without seeking to look for trailing '\n'.
peek, _ := cr.reader.Peek(n)
return bytes.IndexByte(peek, '\n') >= 0
type chunkState int
const (
readChunkHeader chunkState = iota
readChunkTrailer
readChunk
verifyChunk
)
func (cs chunkState) String() string {
stateString := ""
switch cs {
case readChunkHeader:
stateString = "readChunkHeader"
case readChunkTrailer:
stateString = "readChunkTrailer"
case readChunk:
stateString = "readChunk"
case verifyChunk:
stateString = "verifyChunk"
}
return false
return stateString
}
// Read - implements `io.Reader`, which transparently decodes
// the incoming AWS Signature V4 streaming signature.
func (cr *s3ChunkedReader) Read(buf []byte) (n int, err error) {
for cr.err == nil {
if cr.n == 0 {
// For no chunk header available, we don't have to
// proceed to read again.
if n > 0 && !cr.s3ChunkHeaderAvailable() {
// We've read enough. Don't potentially block
// reading a new chunk header.
break
}
// If the chunk has been read, proceed to validate the rolling signature.
if cr.dataChunkRead {
// Calculate the hashed chunk.
hashedChunk := hex.EncodeToString(cr.chunkSHA256Writer.Sum(nil))
// Calculate the chunk signature.
newSignature := getChunkSignature(cr.seedSignature, cr.seedDate, hashedChunk)
if cr.chunkSignature != newSignature {
// Chunk signature doesn't match we return signature does not match.
cr.err = errSignatureMismatch
break
}
// Newly calculated signature becomes the seed for the next chunk
// this follows the chaining.
cr.seedSignature = newSignature
}
// Proceed to read the next chunk header.
for {
switch cr.state {
case readChunkHeader:
cr.readS3ChunkHeader()
continue
}
// With requested buffer of zero length, no need to read further.
if len(buf) == 0 {
break
}
rbuf := buf
// Make sure to read only the specified payload size, stagger
// the rest for subsequent requests.
if uint64(len(rbuf)) > cr.n {
rbuf = rbuf[:cr.n]
}
var n0 int
n0, cr.err = cr.reader.Read(rbuf)
// If we're at the end of a chunk.
if cr.n == 0 && cr.err == io.EOF {
cr.state = readChunkTrailer
cr.lastChunk = true
continue
}
if cr.err != nil {
return 0, cr.err
}
cr.state = readChunk
case readChunkTrailer:
cr.err = readCRLF(cr.reader)
if cr.err != nil {
return 0, errMalformedEncoding
}
cr.state = verifyChunk
case readChunk:
// There is no more space left in the request buffer.
if len(buf) == 0 {
return n, nil
}
rbuf := buf
// The request buffer is larger than the current chunk size.
// Read only the current chunk from the underlying reader.
if uint64(len(rbuf)) > cr.n {
rbuf = rbuf[:cr.n]
}
var n0 int
n0, cr.err = cr.reader.Read(rbuf)
if cr.err != nil {
// We have lesser than chunk size advertised in chunkHeader, this is 'unexpected'.
if cr.err == io.EOF {
cr.err = io.ErrUnexpectedEOF
}
return 0, cr.err
}
// Calculate sha256.
cr.chunkSHA256Writer.Write(rbuf[:n0])
// Set since we have read the chunk read.
cr.dataChunkRead = true
// Calculate sha256.
cr.chunkSHA256Writer.Write(rbuf[:n0])
// Update the bytes read into request buffer so far.
n += n0
buf = buf[n0:]
// Update bytes to be read of the current chunk before verifying chunk's signature.
cr.n -= uint64(n0)
n += n0
buf = buf[n0:]
// Decrements the 'cr.n' for future reads.
cr.n -= uint64(n0)
// If we're at the end of a chunk.
if cr.n == 0 && cr.err == nil {
// Read the next two bytes to verify if they are "\r\n".
cr.err = checkCRLF(cr.reader)
// If we're at the end of a chunk.
if cr.n == 0 {
cr.state = readChunkTrailer
continue
}
case verifyChunk:
// Calculate the hashed chunk.
hashedChunk := hex.EncodeToString(cr.chunkSHA256Writer.Sum(nil))
// Calculate the chunk signature.
newSignature := getChunkSignature(cr.seedSignature, cr.seedDate, hashedChunk)
if cr.chunkSignature != newSignature {
// Chunk signature doesn't match we return signature does not match.
cr.err = errSignatureMismatch
return 0, cr.err
}
// Newly calculated signature becomes the seed for the next chunk
// this follows the chaining.
cr.seedSignature = newSignature
cr.chunkSHA256Writer.Reset()
cr.state = readChunkHeader
if cr.lastChunk {
return n, nil
}
}
}
// Return number of bytes read, and error if any.
return n, cr.err
}
// checkCRLF - check if reader only has '\r\n' CRLF character.
// readCRLF - check if reader only has '\r\n' CRLF character.
// returns malformed encoding if it doesn't.
func checkCRLF(reader io.Reader) (err error) {
var buf = make([]byte, 2)
if _, err = io.ReadFull(reader, buf[:2]); err == nil {
if buf[0] != '\r' || buf[1] != '\n' {
err = errMalformedEncoding
}
func readCRLF(reader io.Reader) error {
buf := make([]byte, 2)
_, err := io.ReadFull(reader, buf[:2])
if err != nil {
return err
}
return err
if buf[0] != '\r' || buf[1] != '\n' {
return errMalformedEncoding
}
return nil
}
// Read a line of bytes (up to \n) from b.

View File

@ -136,8 +136,8 @@ func TestParseS3ChunkExtension(t *testing.T) {
}
}
// Test check CRLF characters on input reader.
func TestCheckCRLF(t *testing.T) {
// Test read CRLF characters on input reader.
func TestReadCRLF(t *testing.T) {
type testCase struct {
reader io.Reader
expectedErr error
@ -153,7 +153,7 @@ func TestCheckCRLF(t *testing.T) {
{bytes.NewReader([]byte("h")), io.ErrUnexpectedEOF},
}
for i, tt := range tests {
err := checkCRLF(tt.reader)
err := readCRLF(tt.reader)
if err != tt.expectedErr {
t.Errorf("Test %d: Expected %s, got %s this", i+1, tt.expectedErr, err)
}

View File

@ -17,6 +17,7 @@
package cmd
import (
"bufio"
"bytes"
"crypto/hmac"
"crypto/sha1"
@ -324,6 +325,79 @@ func (testServer TestServer) Stop() {
testServer.Server.Close()
}
// Truncate request to simulate unexpected EOF for a request signed using streaming signature v4.
func truncateChunkByHalfSigv4(req *http.Request) (*http.Request, error) {
bufReader := bufio.NewReader(req.Body)
hexChunkSize, chunkSignature, err := readChunkLine(bufReader)
if err != nil {
return nil, err
}
newChunkHdr := []byte(fmt.Sprintf("%s"+s3ChunkSignatureStr+"%s\r\n",
hexChunkSize, chunkSignature))
newChunk, err := ioutil.ReadAll(bufReader)
if err != nil {
return nil, err
}
newReq := req
newReq.Body = ioutil.NopCloser(
bytes.NewReader(bytes.Join([][]byte{newChunkHdr, newChunk[:len(newChunk)/2]},
[]byte(""))),
)
return newReq, nil
}
// Malform data given a request signed using streaming signature V4.
func malformDataSigV4(req *http.Request, newByte byte) (*http.Request, error) {
bufReader := bufio.NewReader(req.Body)
hexChunkSize, chunkSignature, err := readChunkLine(bufReader)
if err != nil {
return nil, err
}
newChunkHdr := []byte(fmt.Sprintf("%s"+s3ChunkSignatureStr+"%s\r\n",
hexChunkSize, chunkSignature))
newChunk, err := ioutil.ReadAll(bufReader)
if err != nil {
return nil, err
}
newChunk[0] = newByte
newReq := req
newReq.Body = ioutil.NopCloser(
bytes.NewReader(bytes.Join([][]byte{newChunkHdr, newChunk},
[]byte(""))),
)
return newReq, nil
}
// Malform chunk size given a request signed using streaming signatureV4.
func malformChunkSizeSigV4(req *http.Request, badSize int64) (*http.Request, error) {
bufReader := bufio.NewReader(req.Body)
_, chunkSignature, err := readChunkLine(bufReader)
if err != nil {
return nil, err
}
n := badSize
newHexChunkSize := []byte(fmt.Sprintf("%x", n))
newChunkHdr := []byte(fmt.Sprintf("%s"+s3ChunkSignatureStr+"%s\r\n",
newHexChunkSize, chunkSignature))
newChunk, err := ioutil.ReadAll(bufReader)
if err != nil {
return nil, err
}
newReq := req
newReq.Body = ioutil.NopCloser(
bytes.NewReader(bytes.Join([][]byte{newChunkHdr, newChunk},
[]byte(""))),
)
return newReq, nil
}
// Sign given request using Signature V4.
func signStreamingRequest(req *http.Request, accessKey, secretKey string) (string, error) {
// Get hashed payload.