Support SSE-C multipart source objects in CopyObject (#5603)

Current code didn't implement the logic to support
decrypting encrypted multiple parts, this PR fixes
by supporting copying encrypted multipart objects.
This commit is contained in:
Harshavardhana 2018-03-02 17:24:02 -08:00 committed by GitHub
parent e4f6877c8b
commit 52eea7b9c1
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 149 additions and 59 deletions

View File

@ -28,6 +28,7 @@ import (
"io" "io"
"net/http" "net/http"
"github.com/minio/minio/pkg/ioutil"
sha256 "github.com/minio/sha256-simd" sha256 "github.com/minio/sha256-simd"
"github.com/minio/sio" "github.com/minio/sio"
) )
@ -426,6 +427,7 @@ func newDecryptWriterWithObjectKey(client io.Writer, objectEncryptionKey []byte,
delete(metadata, ServerSideEncryptionIV) delete(metadata, ServerSideEncryptionIV)
delete(metadata, ServerSideEncryptionSealAlgorithm) delete(metadata, ServerSideEncryptionSealAlgorithm)
delete(metadata, ServerSideEncryptionSealedKey) delete(metadata, ServerSideEncryptionSealedKey)
delete(metadata, ReservedMetadataPrefix+"Encrypted-Multipart")
return writer, nil return writer, nil
} }
@ -463,6 +465,7 @@ type DecryptBlocksWriter struct {
partEncRelOffset int64 partEncRelOffset int64
copySource bool
// Customer Key // Customer Key
customerKeyHeader string customerKeyHeader string
} }
@ -473,8 +476,15 @@ func (w *DecryptBlocksWriter) buildDecrypter(partID int) error {
m[k] = v m[k] = v
} }
// Initialize the first decrypter, new decrypters will be initialized in Write() operation as needed. // Initialize the first decrypter, new decrypters will be initialized in Write() operation as needed.
w.req.Header.Set(SSECustomerKey, w.customerKeyHeader) var key []byte
key, err := ParseSSECustomerRequest(w.req) var err error
if w.copySource {
w.req.Header.Set(SSECopyCustomerKey, w.customerKeyHeader)
key, err = ParseSSECopyCustomerRequest(w.req)
} else {
w.req.Header.Set(SSECustomerKey, w.customerKeyHeader)
key, err = ParseSSECustomerRequest(w.req)
}
if err != nil { if err != nil {
return err return err
} }
@ -491,14 +501,24 @@ func (w *DecryptBlocksWriter) buildDecrypter(partID int) error {
mac.Write(partIDbin[:]) mac.Write(partIDbin[:])
partEncryptionKey := mac.Sum(nil) partEncryptionKey := mac.Sum(nil)
delete(m, SSECustomerKey) // make sure we do not save the key by accident // make sure we do not save the key by accident
if w.copySource {
delete(m, SSECopyCustomerKey)
} else {
delete(m, SSECustomerKey)
}
decrypter, err := newDecryptWriterWithObjectKey(w.writer, partEncryptionKey, w.startSeqNum, m) // make sure to provide a NopCloser such that a Close
// on sio.decryptWriter doesn't close the underlying writer's
// close which perhaps can close the stream prematurely.
decrypter, err := newDecryptWriterWithObjectKey(ioutil.NopCloser(w.writer), partEncryptionKey, w.startSeqNum, m)
if err != nil { if err != nil {
return err return err
} }
if w.decrypter != nil { if w.decrypter != nil {
// Pro-actively close the writer such that any pending buffers
// are flushed already before we allocate a new decrypter.
err = w.decrypter.Close() err = w.decrypter.Close()
if err != nil { if err != nil {
return err return err
@ -561,9 +581,17 @@ func (w *DecryptBlocksWriter) Close() error {
return nil return nil
} }
// DecryptAllBlocksCopyRequest - setup a struct which can decrypt many concatenated encrypted data
// parts information helps to know the boundaries of each encrypted data block, this function decrypts
// all parts starting from part-1.
func DecryptAllBlocksCopyRequest(client io.Writer, r *http.Request, objInfo ObjectInfo) (io.WriteCloser, int64, error) {
w, _, size, err := DecryptBlocksRequest(client, r, 0, objInfo.Size, objInfo, true)
return w, size, err
}
// DecryptBlocksRequest - setup a struct which can decrypt many concatenated encrypted data // DecryptBlocksRequest - setup a struct which can decrypt many concatenated encrypted data
// parts information helps to know the boundaries of each encrypted data block. // parts information helps to know the boundaries of each encrypted data block.
func DecryptBlocksRequest(client io.Writer, r *http.Request, startOffset, length int64, objInfo ObjectInfo) (io.WriteCloser, int64, int64, error) { func DecryptBlocksRequest(client io.Writer, r *http.Request, startOffset, length int64, objInfo ObjectInfo, copySource bool) (io.WriteCloser, int64, int64, error) {
seqNumber, encStartOffset, encLength := getEncryptedStartOffset(startOffset, length) seqNumber, encStartOffset, encLength := getEncryptedStartOffset(startOffset, length)
// Encryption length cannot be bigger than the file size, if it is // Encryption length cannot be bigger than the file size, if it is
@ -573,11 +601,16 @@ func DecryptBlocksRequest(client io.Writer, r *http.Request, startOffset, length
} }
if len(objInfo.Parts) == 0 || !objInfo.IsEncryptedMultipart() { if len(objInfo.Parts) == 0 || !objInfo.IsEncryptedMultipart() {
writer, err := DecryptRequestWithSequenceNumber(client, r, seqNumber, objInfo.UserDefined) var writer io.WriteCloser
var err error
if copySource {
writer, err = DecryptCopyRequest(client, r, objInfo.UserDefined)
} else {
writer, err = DecryptRequestWithSequenceNumber(client, r, seqNumber, objInfo.UserDefined)
}
if err != nil { if err != nil {
return nil, 0, 0, err return nil, 0, 0, err
} }
return writer, encStartOffset, encLength, nil return writer, encStartOffset, encLength, nil
} }
@ -614,10 +647,28 @@ func DecryptBlocksRequest(client io.Writer, r *http.Request, startOffset, length
partIndex: partStartIndex, partIndex: partStartIndex,
req: r, req: r,
customerKeyHeader: r.Header.Get(SSECustomerKey), customerKeyHeader: r.Header.Get(SSECustomerKey),
metadata: objInfo.UserDefined, copySource: copySource,
} }
w.buildDecrypter(partStartIndex + 1) w.metadata = map[string]string{}
// Copy encryption metadata for internal use.
for k, v := range objInfo.UserDefined {
w.metadata[k] = v
}
// Purge all the encryption headers.
delete(objInfo.UserDefined, ServerSideEncryptionIV)
delete(objInfo.UserDefined, ServerSideEncryptionSealAlgorithm)
delete(objInfo.UserDefined, ServerSideEncryptionSealedKey)
delete(objInfo.UserDefined, ReservedMetadataPrefix+"Encrypted-Multipart")
if w.copySource {
w.customerKeyHeader = r.Header.Get(SSECopyCustomerKey)
}
if err := w.buildDecrypter(partStartIndex + 1); err != nil {
return nil, 0, 0, err
}
return w, encStartOffset, encLength, nil return w, encStartOffset, encLength, nil
} }

View File

@ -398,6 +398,10 @@ func (fs *FSObjects) CopyObject(srcBucket, srcObject, dstBucket, dstObject strin
// Save objects' metadata in `fs.json`. // Save objects' metadata in `fs.json`.
fsMeta := newFSMetaV1() fsMeta := newFSMetaV1()
if _, err = fsMeta.ReadFrom(wlk); err != nil {
return oi, toObjectErr(err, srcBucket, srcObject)
}
fsMeta.Meta = srcInfo.UserDefined fsMeta.Meta = srcInfo.UserDefined
if _, err = fsMeta.WriteTo(wlk); err != nil { if _, err = fsMeta.WriteTo(wlk); err != nil {
return oi, toObjectErr(err, srcBucket, srcObject) return oi, toObjectErr(err, srcBucket, srcObject)

View File

@ -152,7 +152,7 @@ func (api objectAPIHandlers) GetObjectHandler(w http.ResponseWriter, r *http.Req
// additionally also skipping mod(offset)64KiB boundaries. // additionally also skipping mod(offset)64KiB boundaries.
writer = ioutil.LimitedWriter(writer, startOffset%(64*1024), length) writer = ioutil.LimitedWriter(writer, startOffset%(64*1024), length)
writer, startOffset, length, err = DecryptBlocksRequest(writer, r, startOffset, length, objInfo) writer, startOffset, length, err = DecryptBlocksRequest(writer, r, startOffset, length, objInfo, false)
if err != nil { if err != nil {
writeErrorResponse(w, toAPIErrorCode(err), r.URL) writeErrorResponse(w, toAPIErrorCode(err), r.URL)
return return
@ -382,19 +382,23 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re
var writer io.WriteCloser = pipeWriter var writer io.WriteCloser = pipeWriter
var reader io.Reader = pipeReader var reader io.Reader = pipeReader
srcInfo.Reader, err = hash.NewReader(reader, srcInfo.Size, "", "")
if err != nil {
pipeWriter.CloseWithError(err)
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
return
}
// Save the original size for later use when we want to copy
// encrypted file into an unencrypted one.
size := srcInfo.Size
var encMetadata = make(map[string]string) var encMetadata = make(map[string]string)
if objectAPI.IsEncryptionSupported() { if objectAPI.IsEncryptionSupported() {
var oldKey, newKey []byte var oldKey, newKey []byte
sseCopyC := IsSSECopyCustomerRequest(r.Header) sseCopyC := IsSSECopyCustomerRequest(r.Header)
sseC := IsSSECustomerRequest(r.Header) sseC := IsSSECustomerRequest(r.Header)
if sseCopyC {
oldKey, err = ParseSSECopyCustomerRequest(r)
if err != nil {
pipeWriter.CloseWithError(err)
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
return
}
}
if sseC { if sseC {
newKey, err = ParseSSECustomerRequest(r) newKey, err = ParseSSECustomerRequest(r)
if err != nil { if err != nil {
@ -406,7 +410,14 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re
// AWS S3 implementation requires us to only rotate keys // AWS S3 implementation requires us to only rotate keys
// when/ both keys are provided and destination is same // when/ both keys are provided and destination is same
// otherwise we proceed to encrypt/decrypt. // otherwise we proceed to encrypt/decrypt.
if len(oldKey) > 0 && len(newKey) > 0 && cpSrcDstSame { if sseCopyC && sseC && cpSrcDstSame {
// Get the old key which needs to be rotated.
oldKey, err = ParseSSECopyCustomerRequest(r)
if err != nil {
pipeWriter.CloseWithError(err)
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
return
}
for k, v := range srcInfo.UserDefined { for k, v := range srcInfo.UserDefined {
encMetadata[k] = v encMetadata[k] = v
} }
@ -418,10 +429,8 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re
} else { } else {
if sseCopyC { if sseCopyC {
// Source is encrypted make sure to save the encrypted size. // Source is encrypted make sure to save the encrypted size.
if srcInfo.IsEncrypted() { writer = ioutil.LimitedWriter(writer, 0, srcInfo.Size)
srcInfo.Size = srcInfo.EncryptedSize() writer, srcInfo.Size, err = DecryptAllBlocksCopyRequest(writer, r, srcInfo)
}
writer, err = newDecryptWriter(pipeWriter, oldKey, 0, srcInfo.UserDefined)
if err != nil { if err != nil {
pipeWriter.CloseWithError(err) pipeWriter.CloseWithError(err)
writeErrorResponse(w, toAPIErrorCode(err), r.URL) writeErrorResponse(w, toAPIErrorCode(err), r.URL)
@ -431,11 +440,14 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re
// we are creating a new object at this point, even // we are creating a new object at this point, even
// if source and destination are same objects. // if source and destination are same objects.
srcInfo.metadataOnly = false srcInfo.metadataOnly = false
if sseC {
size = srcInfo.Size
}
} }
if sseC { if sseC {
reader, err = newEncryptReader(pipeReader, newKey, encMetadata) reader, err = newEncryptReader(pipeReader, newKey, encMetadata)
if err != nil { if err != nil {
pipeReader.CloseWithError(err) pipeWriter.CloseWithError(err)
writeErrorResponse(w, toAPIErrorCode(err), r.URL) writeErrorResponse(w, toAPIErrorCode(err), r.URL)
return return
} }
@ -443,6 +455,15 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re
// we are creating a new object at this point, even // we are creating a new object at this point, even
// if source and destination are same objects. // if source and destination are same objects.
srcInfo.metadataOnly = false srcInfo.metadataOnly = false
if !sseCopyC {
size = srcInfo.EncryptedSize()
}
}
srcInfo.Reader, err = hash.NewReader(reader, size, "", "") // do not try to verify encrypted content
if err != nil {
pipeWriter.CloseWithError(err)
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
return
} }
} }
} }
@ -450,7 +471,7 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re
srcInfo.UserDefined, err = getCpObjMetadataFromHeader(r.Header, srcInfo.UserDefined) srcInfo.UserDefined, err = getCpObjMetadataFromHeader(r.Header, srcInfo.UserDefined)
if err != nil { if err != nil {
pipeReader.CloseWithError(err) pipeWriter.CloseWithError(err)
errorIf(err, "found invalid http request header") errorIf(err, "found invalid http request header")
writeErrorResponse(w, ErrInternalError, r.URL) writeErrorResponse(w, ErrInternalError, r.URL)
return return
@ -469,21 +490,13 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re
// desination are same objects. Apply this restriction also when // desination are same objects. Apply this restriction also when
// metadataOnly is true indicating that we are not overwriting the object. // metadataOnly is true indicating that we are not overwriting the object.
if !isMetadataReplace(r.Header) && srcInfo.metadataOnly { if !isMetadataReplace(r.Header) && srcInfo.metadataOnly {
pipeReader.CloseWithError(fmt.Errorf("invalid copy dest")) pipeWriter.CloseWithError(fmt.Errorf("invalid copy dest"))
// If x-amz-metadata-directive is not set to REPLACE then we need // If x-amz-metadata-directive is not set to REPLACE then we need
// to error out if source and destination are same. // to error out if source and destination are same.
writeErrorResponse(w, ErrInvalidCopyDest, r.URL) writeErrorResponse(w, ErrInvalidCopyDest, r.URL)
return return
} }
hashReader, err := hash.NewReader(reader, srcInfo.Size, "", "") // do not try to verify encrypted content
if err != nil {
pipeReader.CloseWithError(err)
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
return
}
srcInfo.Reader = hashReader
// Copy source object to destination, if source and destination // Copy source object to destination, if source and destination
// object is same then only metadata is updated. // object is same then only metadata is updated.
objInfo, err := objectAPI.CopyObject(srcBucket, srcObject, dstBucket, dstObject, srcInfo) objInfo, err := objectAPI.CopyObject(srcBucket, srcObject, dstBucket, dstObject, srcInfo)
@ -876,6 +889,12 @@ func (api objectAPIHandlers) CopyObjectPartHandler(w http.ResponseWriter, r *htt
var writer io.WriteCloser = pipeWriter var writer io.WriteCloser = pipeWriter
var reader io.Reader = pipeReader var reader io.Reader = pipeReader
srcInfo.Reader, err = hash.NewReader(reader, length, "", "")
if err != nil {
pipeWriter.CloseWithError(err)
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
return
}
if objectAPI.IsEncryptionSupported() { if objectAPI.IsEncryptionSupported() {
var li ListPartsInfo var li ListPartsInfo
li, err = objectAPI.ListObjectParts(dstBucket, dstObject, uploadID, 0, 1) li, err = objectAPI.ListObjectParts(dstBucket, dstObject, uploadID, 0, 1)
@ -884,6 +903,18 @@ func (api objectAPIHandlers) CopyObjectPartHandler(w http.ResponseWriter, r *htt
writeErrorResponse(w, toAPIErrorCode(err), r.URL) writeErrorResponse(w, toAPIErrorCode(err), r.URL)
return return
} }
sseCopyC := IsSSECopyCustomerRequest(r.Header)
if sseCopyC {
// Response writer should be limited early on for decryption upto required length,
// additionally also skipping mod(offset)64KiB boundaries.
writer = ioutil.LimitedWriter(writer, startOffset%(64*1024), length)
writer, startOffset, length, err = DecryptBlocksRequest(pipeWriter, r, startOffset, length, srcInfo, true)
if err != nil {
pipeWriter.CloseWithError(err)
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
return
}
}
if li.IsEncrypted() { if li.IsEncrypted() {
if !IsSSECustomerRequest(r.Header) { if !IsSSECustomerRequest(r.Header) {
writeErrorResponse(w, ErrSSEMultipartEncrypted, r.URL) writeErrorResponse(w, ErrSSEMultipartEncrypted, r.URL)
@ -906,19 +937,20 @@ func (api objectAPIHandlers) CopyObjectPartHandler(w http.ResponseWriter, r *htt
return return
} }
reader, err = sio.EncryptReader(reader, sio.Config{Key: objectEncryptionKey}) reader, err = sio.EncryptReader(pipeReader, sio.Config{Key: objectEncryptionKey})
if err != nil { if err != nil {
pipeWriter.CloseWithError(err) pipeWriter.CloseWithError(err)
writeErrorResponse(w, toAPIErrorCode(err), r.URL) writeErrorResponse(w, toAPIErrorCode(err), r.URL)
return return
} }
}
if IsSSECopyCustomerRequest(r.Header) {
// Response writer should be limited early on for decryption upto required length,
// additionally also skipping mod(offset)64KiB boundaries.
writer = ioutil.LimitedWriter(writer, startOffset%(64*1024), length)
writer, startOffset, length, err = DecryptBlocksRequest(pipeWriter, r, startOffset, length, srcInfo) size := length
if !sseCopyC {
info := ObjectInfo{Size: length}
size = info.EncryptedSize()
}
srcInfo.Reader, err = hash.NewReader(reader, size, "", "")
if err != nil { if err != nil {
pipeWriter.CloseWithError(err) pipeWriter.CloseWithError(err)
writeErrorResponse(w, toAPIErrorCode(err), r.URL) writeErrorResponse(w, toAPIErrorCode(err), r.URL)
@ -926,14 +958,6 @@ func (api objectAPIHandlers) CopyObjectPartHandler(w http.ResponseWriter, r *htt
} }
} }
} }
hashReader, err := hash.NewReader(reader, length, "", "") // do not try to verify encrypted content
if err != nil {
pipeWriter.CloseWithError(err)
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
return
}
srcInfo.Reader = hashReader
srcInfo.Writer = writer srcInfo.Writer = writer
// Copy source object to destination, if source and destination // Copy source object to destination, if source and destination

View File

@ -83,7 +83,9 @@ func (xl xlObjects) CopyObject(srcBucket, srcObject, dstBucket, dstObject string
cpSrcDstSame := isStringEqual(pathJoin(srcBucket, srcObject), pathJoin(dstBucket, dstObject)) cpSrcDstSame := isStringEqual(pathJoin(srcBucket, srcObject), pathJoin(dstBucket, dstObject))
// Read metadata associated with the object from all disks. // Read metadata associated with the object from all disks.
metaArr, errs := readAllXLMetadata(xl.getDisks(), srcBucket, srcObject) storageDisks := xl.getDisks()
metaArr, errs := readAllXLMetadata(storageDisks, srcBucket, srcObject)
// get Quorum for this object // get Quorum for this object
readQuorum, writeQuorum, err := objectQuorumFromMeta(xl, metaArr, errs) readQuorum, writeQuorum, err := objectQuorumFromMeta(xl, metaArr, errs)
@ -96,7 +98,7 @@ func (xl xlObjects) CopyObject(srcBucket, srcObject, dstBucket, dstObject string
} }
// List all online disks. // List all online disks.
onlineDisks, modTime := listOnlineDisks(xl.getDisks(), metaArr, errs) _, modTime := listOnlineDisks(storageDisks, metaArr, errs)
// Pick latest valid metadata. // Pick latest valid metadata.
xlMeta, err := pickValidXLMeta(metaArr, modTime) xlMeta, err := pickValidXLMeta(metaArr, modTime)
@ -104,25 +106,22 @@ func (xl xlObjects) CopyObject(srcBucket, srcObject, dstBucket, dstObject string
return oi, toObjectErr(err, srcBucket, srcObject) return oi, toObjectErr(err, srcBucket, srcObject)
} }
// Reorder online disks based on erasure distribution order.
onlineDisks = shuffleDisks(onlineDisks, xlMeta.Erasure.Distribution)
// Length of the file to read. // Length of the file to read.
length := xlMeta.Stat.Size length := xlMeta.Stat.Size
// Check if this request is only metadata update. // Check if this request is only metadata update.
if cpSrcDstSame { if cpSrcDstSame {
xlMeta.Meta = srcInfo.UserDefined
partsMetadata := make([]xlMetaV1, len(xl.getDisks()))
// Update `xl.json` content on each disks. // Update `xl.json` content on each disks.
for index := range partsMetadata { for index := range metaArr {
partsMetadata[index] = xlMeta metaArr[index].Meta = srcInfo.UserDefined
} }
var onlineDisks []StorageAPI
tempObj := mustGetUUID() tempObj := mustGetUUID()
// Write unique `xl.json` for each disk. // Write unique `xl.json` for each disk.
if onlineDisks, err = writeUniqueXLMetadata(onlineDisks, minioMetaTmpBucket, tempObj, partsMetadata, writeQuorum); err != nil { if onlineDisks, err = writeUniqueXLMetadata(storageDisks, minioMetaTmpBucket, tempObj, metaArr, writeQuorum); err != nil {
return oi, toObjectErr(err, srcBucket, srcObject) return oi, toObjectErr(err, srcBucket, srcObject)
} }
// Rename atomically `xl.json` from tmp location to destination for each disk. // Rename atomically `xl.json` from tmp location to destination for each disk.

View File

@ -114,3 +114,15 @@ func (w *LimitWriter) Close() error {
func LimitedWriter(w io.Writer, skipBytes int64, limit int64) *LimitWriter { func LimitedWriter(w io.Writer, skipBytes int64, limit int64) *LimitWriter {
return &LimitWriter{w, skipBytes, limit} return &LimitWriter{w, skipBytes, limit}
} }
type nopCloser struct {
io.Writer
}
func (nopCloser) Close() error { return nil }
// NopCloser returns a WriteCloser with a no-op Close method wrapping
// the provided Writer w.
func NopCloser(w io.Writer) io.WriteCloser {
return nopCloser{w}
}