Refactor streaming signatureV4 w/ state machine (#2862)

* Refactor streaming signatureV4 w/ state machine

- Used state machine to make transitions between reading chunk header,
  chunk data and trailer explicit.

* debug: add print/panic statements to gather more info on CI failure

* Persist lastChunk status between Read() on ChunkReader

... remove panic() which was added as interim aid for debugging.

* Add unit-tests to cover v4 streaming signature
This commit is contained in:
Krishnan Parthasarathi 2016-10-10 14:12:32 +05:30 committed by Harshavardhana
parent 3cfb23750a
commit 2d5e988a6d
5 changed files with 253 additions and 80 deletions

View File

@ -387,8 +387,9 @@ func (fs fsObjects) PutObject(bucket string, object string, size int64, data io.
var bytesWritten int64 var bytesWritten int64
bytesWritten, err = fsCreateFile(fs.storage, teeReader, buf, minioMetaBucket, tempObj) bytesWritten, err = fsCreateFile(fs.storage, teeReader, buf, minioMetaBucket, tempObj)
if err != nil { if err != nil {
errorIf(err, "Failed to create object %s/%s", bucket, object)
fs.storage.DeleteFile(minioMetaBucket, tempObj) fs.storage.DeleteFile(minioMetaBucket, tempObj)
return ObjectInfo{}, toObjectErr(traceError(err), bucket, object) return ObjectInfo{}, toObjectErr(err, bucket, object)
} }
// Should return IncompleteBody{} error when reader has fewer // Should return IncompleteBody{} error when reader has fewer

View File

@ -215,6 +215,20 @@ func testAPIPutObjectStreamSigV4Handler(obj ObjectLayer, instanceType, bucketNam
objectName := "test-object" objectName := "test-object"
bytesDataLen := 65 * 1024 bytesDataLen := 65 * 1024
bytesData := bytes.Repeat([]byte{'a'}, bytesDataLen) bytesData := bytes.Repeat([]byte{'a'}, bytesDataLen)
oneKData := bytes.Repeat([]byte("a"), 1024)
err := initEventNotifier(obj)
if err != nil {
t.Fatalf("[%s] - Failed to initialize event notifiers <ERROR> %v", instanceType, err)
}
type streamFault int
const (
None streamFault = iota
malformedEncoding
unexpectedEOF
signatureMismatch
)
// byte data for PutObject. // byte data for PutObject.
// test cases with inputs and expected result for GetObject. // test cases with inputs and expected result for GetObject.
@ -232,6 +246,7 @@ func testAPIPutObjectStreamSigV4Handler(obj ObjectLayer, instanceType, bucketNam
secretKey string secretKey string
shouldPass bool shouldPass bool
removeAuthHeader bool removeAuthHeader bool
fault streamFault
}{ }{
// Test case - 1. // Test case - 1.
// Fetching the entire object and validating its contents. // Fetching the entire object and validating its contents.
@ -304,6 +319,51 @@ func testAPIPutObjectStreamSigV4Handler(obj ObjectLayer, instanceType, bucketNam
secretKey: credentials.SecretAccessKey, secretKey: credentials.SecretAccessKey,
shouldPass: false, shouldPass: false,
}, },
// Test case - 6
// Chunk with malformed encoding.
{
bucketName: bucketName,
objectName: objectName,
data: oneKData,
dataLen: 1024,
chunkSize: 1024,
expectedContent: []byte{},
expectedRespStatus: http.StatusInternalServerError,
accessKey: credentials.AccessKeyID,
secretKey: credentials.SecretAccessKey,
shouldPass: false,
fault: malformedEncoding,
},
// Test case - 7
// Chunk with shorter than advertised chunk data.
{
bucketName: bucketName,
objectName: objectName,
data: oneKData,
dataLen: 1024,
chunkSize: 1024,
expectedContent: []byte{},
expectedRespStatus: http.StatusBadRequest,
accessKey: credentials.AccessKeyID,
secretKey: credentials.SecretAccessKey,
shouldPass: false,
fault: unexpectedEOF,
},
// Test case - 8
// Chunk with first chunk data byte tampered.
{
bucketName: bucketName,
objectName: objectName,
data: oneKData,
dataLen: 1024,
chunkSize: 1024,
expectedContent: []byte{},
expectedRespStatus: http.StatusForbidden,
accessKey: credentials.AccessKeyID,
secretKey: credentials.SecretAccessKey,
shouldPass: false,
fault: signatureMismatch,
},
} }
// Iterating over the cases, fetching the object validating the response. // Iterating over the cases, fetching the object validating the response.
for i, testCase := range testCases { for i, testCase := range testCases {
@ -321,12 +381,21 @@ func testAPIPutObjectStreamSigV4Handler(obj ObjectLayer, instanceType, bucketNam
if testCase.removeAuthHeader { if testCase.removeAuthHeader {
req.Header.Del("Authorization") req.Header.Del("Authorization")
} }
switch testCase.fault {
case malformedEncoding:
req, err = malformChunkSizeSigV4(req, testCase.chunkSize-1)
case signatureMismatch:
req, err = malformDataSigV4(req, 'z')
case unexpectedEOF:
req, err = truncateChunkByHalfSigv4(req)
}
// Since `apiRouter` satisfies `http.Handler` it has a ServeHTTP to execute the logic of the handler. // Since `apiRouter` satisfies `http.Handler` it has a ServeHTTP to execute the logic of the handler.
// Call the ServeHTTP to execute the handler,`func (api objectAPIHandlers) GetObjectHandler` handles the request. // Call the ServeHTTP to execute the handler,`func (api objectAPIHandlers) GetObjectHandler` handles the request.
apiRouter.ServeHTTP(rec, req) apiRouter.ServeHTTP(rec, req)
// Assert the response code with the expected status. // Assert the response code with the expected status.
if rec.Code != testCase.expectedRespStatus { if rec.Code != testCase.expectedRespStatus {
t.Errorf("Test %d: Expected the response status to be `%d`, but instead found `%d`", i+1, testCase.expectedRespStatus, rec.Code) t.Errorf("Test %d %s: Expected the response status to be `%d`, but instead found `%d`",
i+1, instanceType, testCase.expectedRespStatus, rec.Code)
} }
// read the response body. // read the response body.
actualContent, err := ioutil.ReadAll(rec.Body) actualContent, err := ioutil.ReadAll(rec.Body)
@ -337,6 +406,7 @@ func testAPIPutObjectStreamSigV4Handler(obj ObjectLayer, instanceType, bucketNam
// Verify whether the bucket obtained object is same as the one created. // Verify whether the bucket obtained object is same as the one created.
if !bytes.Equal(testCase.expectedContent, actualContent) { if !bytes.Equal(testCase.expectedContent, actualContent) {
t.Errorf("Test %d: %s: Object content differs from expected value.: %s", i+1, instanceType, string(actualContent)) t.Errorf("Test %d: %s: Object content differs from expected value.: %s", i+1, instanceType, string(actualContent))
continue
} }
buffer := new(bytes.Buffer) buffer := new(bytes.Buffer)

View File

@ -175,6 +175,7 @@ func newSignV4ChunkedReader(req *http.Request) (io.Reader, APIErrorCode) {
seedSignature: seedSignature, seedSignature: seedSignature,
seedDate: seedDate, seedDate: seedDate,
chunkSHA256Writer: sha256.New(), chunkSHA256Writer: sha256.New(),
state: readChunkHeader,
}, ErrNone }, ErrNone
} }
@ -184,7 +185,8 @@ type s3ChunkedReader struct {
reader *bufio.Reader reader *bufio.Reader
seedSignature string seedSignature string
seedDate time.Time seedDate time.Time
dataChunkRead bool state chunkState
lastChunk bool
chunkSignature string chunkSignature string
chunkSHA256Writer hash.Hash // Calculates sha256 of chunk data. chunkSHA256Writer hash.Hash // Calculates sha256 of chunk data.
n uint64 // Unread bytes in chunk n uint64 // Unread bytes in chunk
@ -207,99 +209,125 @@ func (cr *s3ChunkedReader) readS3ChunkHeader() {
if cr.n == 0 { if cr.n == 0 {
cr.err = io.EOF cr.err = io.EOF
} }
// is the data part already read?, set this to false.
cr.dataChunkRead = false
// Reset sha256 hasher for a fresh start.
cr.chunkSHA256Writer.Reset()
// Save the incoming chunk signature. // Save the incoming chunk signature.
cr.chunkSignature = string(hexChunkSignature) cr.chunkSignature = string(hexChunkSignature)
} }
// Validate if the underlying buffer has chunk header. type chunkState int
func (cr *s3ChunkedReader) s3ChunkHeaderAvailable() bool {
n := cr.reader.Buffered() const (
if n > 0 { readChunkHeader chunkState = iota
// Peek without seeking to look for trailing '\n'. readChunkTrailer
peek, _ := cr.reader.Peek(n) readChunk
return bytes.IndexByte(peek, '\n') >= 0 verifyChunk
)
func (cs chunkState) String() string {
stateString := ""
switch cs {
case readChunkHeader:
stateString = "readChunkHeader"
case readChunkTrailer:
stateString = "readChunkTrailer"
case readChunk:
stateString = "readChunk"
case verifyChunk:
stateString = "verifyChunk"
} }
return false return stateString
} }
// Read - implements `io.Reader`, which transparently decodes // Read - implements `io.Reader`, which transparently decodes
// the incoming AWS Signature V4 streaming signature. // the incoming AWS Signature V4 streaming signature.
func (cr *s3ChunkedReader) Read(buf []byte) (n int, err error) { func (cr *s3ChunkedReader) Read(buf []byte) (n int, err error) {
for cr.err == nil { for {
if cr.n == 0 { switch cr.state {
// For no chunk header available, we don't have to case readChunkHeader:
// proceed to read again.
if n > 0 && !cr.s3ChunkHeaderAvailable() {
// We've read enough. Don't potentially block
// reading a new chunk header.
break
}
// If the chunk has been read, proceed to validate the rolling signature.
if cr.dataChunkRead {
// Calculate the hashed chunk.
hashedChunk := hex.EncodeToString(cr.chunkSHA256Writer.Sum(nil))
// Calculate the chunk signature.
newSignature := getChunkSignature(cr.seedSignature, cr.seedDate, hashedChunk)
if cr.chunkSignature != newSignature {
// Chunk signature doesn't match we return signature does not match.
cr.err = errSignatureMismatch
break
}
// Newly calculated signature becomes the seed for the next chunk
// this follows the chaining.
cr.seedSignature = newSignature
}
// Proceed to read the next chunk header.
cr.readS3ChunkHeader() cr.readS3ChunkHeader()
continue // If we're at the end of a chunk.
} if cr.n == 0 && cr.err == io.EOF {
// With requested buffer of zero length, no need to read further. cr.state = readChunkTrailer
if len(buf) == 0 { cr.lastChunk = true
break continue
} }
rbuf := buf if cr.err != nil {
// Make sure to read only the specified payload size, stagger return 0, cr.err
// the rest for subsequent requests. }
if uint64(len(rbuf)) > cr.n { cr.state = readChunk
rbuf = rbuf[:cr.n] case readChunkTrailer:
} cr.err = readCRLF(cr.reader)
var n0 int if cr.err != nil {
n0, cr.err = cr.reader.Read(rbuf) return 0, errMalformedEncoding
}
cr.state = verifyChunk
case readChunk:
// There is no more space left in the request buffer.
if len(buf) == 0 {
return n, nil
}
rbuf := buf
// The request buffer is larger than the current chunk size.
// Read only the current chunk from the underlying reader.
if uint64(len(rbuf)) > cr.n {
rbuf = rbuf[:cr.n]
}
var n0 int
n0, cr.err = cr.reader.Read(rbuf)
if cr.err != nil {
// We have lesser than chunk size advertised in chunkHeader, this is 'unexpected'.
if cr.err == io.EOF {
cr.err = io.ErrUnexpectedEOF
}
return 0, cr.err
}
// Calculate sha256. // Calculate sha256.
cr.chunkSHA256Writer.Write(rbuf[:n0]) cr.chunkSHA256Writer.Write(rbuf[:n0])
// Set since we have read the chunk read. // Update the bytes read into request buffer so far.
cr.dataChunkRead = true n += n0
buf = buf[n0:]
// Update bytes to be read of the current chunk before verifying chunk's signature.
cr.n -= uint64(n0)
n += n0 // If we're at the end of a chunk.
buf = buf[n0:] if cr.n == 0 {
// Decrements the 'cr.n' for future reads. cr.state = readChunkTrailer
cr.n -= uint64(n0) continue
}
// If we're at the end of a chunk. case verifyChunk:
if cr.n == 0 && cr.err == nil { // Calculate the hashed chunk.
// Read the next two bytes to verify if they are "\r\n". hashedChunk := hex.EncodeToString(cr.chunkSHA256Writer.Sum(nil))
cr.err = checkCRLF(cr.reader) // Calculate the chunk signature.
newSignature := getChunkSignature(cr.seedSignature, cr.seedDate, hashedChunk)
if cr.chunkSignature != newSignature {
// Chunk signature doesn't match we return signature does not match.
cr.err = errSignatureMismatch
return 0, cr.err
}
// Newly calculated signature becomes the seed for the next chunk
// this follows the chaining.
cr.seedSignature = newSignature
cr.chunkSHA256Writer.Reset()
cr.state = readChunkHeader
if cr.lastChunk {
return n, nil
}
} }
} }
// Return number of bytes read, and error if any.
return n, cr.err
} }
// checkCRLF - check if reader only has '\r\n' CRLF character. // readCRLF - check if reader only has '\r\n' CRLF character.
// returns malformed encoding if it doesn't. // returns malformed encoding if it doesn't.
func checkCRLF(reader io.Reader) (err error) { func readCRLF(reader io.Reader) error {
var buf = make([]byte, 2) buf := make([]byte, 2)
if _, err = io.ReadFull(reader, buf[:2]); err == nil { _, err := io.ReadFull(reader, buf[:2])
if buf[0] != '\r' || buf[1] != '\n' { if err != nil {
err = errMalformedEncoding return err
}
} }
return err if buf[0] != '\r' || buf[1] != '\n' {
return errMalformedEncoding
}
return nil
} }
// Read a line of bytes (up to \n) from b. // Read a line of bytes (up to \n) from b.

View File

@ -136,8 +136,8 @@ func TestParseS3ChunkExtension(t *testing.T) {
} }
} }
// Test check CRLF characters on input reader. // Test read CRLF characters on input reader.
func TestCheckCRLF(t *testing.T) { func TestReadCRLF(t *testing.T) {
type testCase struct { type testCase struct {
reader io.Reader reader io.Reader
expectedErr error expectedErr error
@ -153,7 +153,7 @@ func TestCheckCRLF(t *testing.T) {
{bytes.NewReader([]byte("h")), io.ErrUnexpectedEOF}, {bytes.NewReader([]byte("h")), io.ErrUnexpectedEOF},
} }
for i, tt := range tests { for i, tt := range tests {
err := checkCRLF(tt.reader) err := readCRLF(tt.reader)
if err != tt.expectedErr { if err != tt.expectedErr {
t.Errorf("Test %d: Expected %s, got %s this", i+1, tt.expectedErr, err) t.Errorf("Test %d: Expected %s, got %s this", i+1, tt.expectedErr, err)
} }

View File

@ -17,6 +17,7 @@
package cmd package cmd
import ( import (
"bufio"
"bytes" "bytes"
"crypto/hmac" "crypto/hmac"
"crypto/sha1" "crypto/sha1"
@ -324,6 +325,79 @@ func (testServer TestServer) Stop() {
testServer.Server.Close() testServer.Server.Close()
} }
// Truncate request to simulate unexpected EOF for a request signed using streaming signature v4.
func truncateChunkByHalfSigv4(req *http.Request) (*http.Request, error) {
bufReader := bufio.NewReader(req.Body)
hexChunkSize, chunkSignature, err := readChunkLine(bufReader)
if err != nil {
return nil, err
}
newChunkHdr := []byte(fmt.Sprintf("%s"+s3ChunkSignatureStr+"%s\r\n",
hexChunkSize, chunkSignature))
newChunk, err := ioutil.ReadAll(bufReader)
if err != nil {
return nil, err
}
newReq := req
newReq.Body = ioutil.NopCloser(
bytes.NewReader(bytes.Join([][]byte{newChunkHdr, newChunk[:len(newChunk)/2]},
[]byte(""))),
)
return newReq, nil
}
// Malform data given a request signed using streaming signature V4.
func malformDataSigV4(req *http.Request, newByte byte) (*http.Request, error) {
bufReader := bufio.NewReader(req.Body)
hexChunkSize, chunkSignature, err := readChunkLine(bufReader)
if err != nil {
return nil, err
}
newChunkHdr := []byte(fmt.Sprintf("%s"+s3ChunkSignatureStr+"%s\r\n",
hexChunkSize, chunkSignature))
newChunk, err := ioutil.ReadAll(bufReader)
if err != nil {
return nil, err
}
newChunk[0] = newByte
newReq := req
newReq.Body = ioutil.NopCloser(
bytes.NewReader(bytes.Join([][]byte{newChunkHdr, newChunk},
[]byte(""))),
)
return newReq, nil
}
// Malform chunk size given a request signed using streaming signatureV4.
func malformChunkSizeSigV4(req *http.Request, badSize int64) (*http.Request, error) {
bufReader := bufio.NewReader(req.Body)
_, chunkSignature, err := readChunkLine(bufReader)
if err != nil {
return nil, err
}
n := badSize
newHexChunkSize := []byte(fmt.Sprintf("%x", n))
newChunkHdr := []byte(fmt.Sprintf("%s"+s3ChunkSignatureStr+"%s\r\n",
newHexChunkSize, chunkSignature))
newChunk, err := ioutil.ReadAll(bufReader)
if err != nil {
return nil, err
}
newReq := req
newReq.Body = ioutil.NopCloser(
bytes.NewReader(bytes.Join([][]byte{newChunkHdr, newChunk},
[]byte(""))),
)
return newReq, nil
}
// Sign given request using Signature V4. // Sign given request using Signature V4.
func signStreamingRequest(req *http.Request, accessKey, secretKey string) (string, error) { func signStreamingRequest(req *http.Request, accessKey, secretKey string) (string, error) {
// Get hashed payload. // Get hashed payload.