validate storage class across pools when setting config (#11320)

```
mc admin config set alias/ storage_class standard=EC:3
```

should only succeed if parity ratio is valid for all
server pools, if not we should fail proactively.

This PR also needs to bring other changes now that
we need to cater for variadic drive counts per pool.

Bonus fixes also various bugs reproduced with

- GetObjectWithPartNumber()
- CopyObjectPartWithOffsets()
- CopyObjectWithMetadata()
- PutObjectPart,PutObject with truncated streams
This commit is contained in:
Harshavardhana
2021-01-22 12:09:24 -08:00
committed by GitHub
parent a35cbb3ff3
commit a6c146bd00
27 changed files with 313 additions and 198 deletions

View File

@@ -605,15 +605,24 @@ func NewGetObjectReader(rs *HTTPRangeSpec, oi ObjectInfo, opts ObjectOptions, cl
if err != nil {
return nil, 0, 0, err
}
// if object is encrypted, transition content without decrypting.
if opts.TransitionStatus == lifecycle.TransitionPending && isEncrypted {
if opts.TransitionStatus == lifecycle.TransitionPending && (isEncrypted || isCompressed) {
isEncrypted = false
isCompressed = false
}
var firstPart = opts.PartNumber
var skipLen int64
// Calculate range to read (different for encrypted/compressed objects)
switch {
case isCompressed:
var firstPart int
if opts.PartNumber > 0 {
// firstPart is an index to Parts slice,
// make sure that PartNumber uses the
// index value properly.
firstPart = opts.PartNumber - 1
}
// If compressed, we start from the beginning of the part.
// Read the decompressed size from the meta.json.
actualSize, err := oi.GetActualSize()
@@ -631,7 +640,6 @@ func NewGetObjectReader(rs *HTTPRangeSpec, oi ObjectInfo, opts ObjectOptions, cl
off, decOff, firstPart = getCompressedOffsets(oi, off)
decLength = length
length = oi.Size - off
// For negative length we read everything.
if decLength < 0 {
decLength = actualSize - decOff
@@ -663,18 +671,18 @@ func NewGetObjectReader(rs *HTTPRangeSpec, oi ObjectInfo, opts ObjectOptions, cl
return nil, err
}
oi.Size = decLength
}
// Decompression reader.
s2Reader := s2.NewReader(inputReader)
// Apply the skipLen and limit on the decompressed stream.
err = s2Reader.Skip(decOff)
if err != nil {
// Call the cleanup funcs
for i := len(cFns) - 1; i >= 0; i-- {
cFns[i]()
if decOff > 0 {
if err = s2Reader.Skip(decOff); err != nil {
// Call the cleanup funcs
for i := len(cFns) - 1; i >= 0; i-- {
cFns[i]()
}
return nil, err
}
return nil, err
}
decReader := io.LimitReader(s2Reader, decLength)
@@ -702,6 +710,8 @@ func NewGetObjectReader(rs *HTTPRangeSpec, oi ObjectInfo, opts ObjectOptions, cl
case isEncrypted:
var seqNumber uint32
var partStart int
var skipLen int64
off, length, skipLen, seqNumber, partStart, err = oi.GetDecryptedRange(rs)
if err != nil {
return nil, 0, 0, err
@@ -899,20 +909,30 @@ func CleanMinioInternalMetadataKeys(metadata map[string]string) map[string]strin
// newS2CompressReader will read data from r, compress it and return the compressed data as a Reader.
// Use Close to ensure resources are released on incomplete streams.
func newS2CompressReader(r io.Reader) io.ReadCloser {
//
// input 'on' is always recommended such that this function works
// properly, because we do not wish to create an object even if
// client closed the stream prematurely.
func newS2CompressReader(r io.Reader, on int64) io.ReadCloser {
pr, pw := io.Pipe()
comp := s2.NewWriter(pw)
// Copy input to compressor
go func() {
_, err := io.Copy(comp, r)
cn, err := io.Copy(comp, r)
if err != nil {
comp.Close()
pw.CloseWithError(err)
return
}
if on > 0 && on != cn {
// if client didn't sent all data
// from the client verify here.
comp.Close()
pw.CloseWithError(IncompleteBody{})
return
}
// Close the stream.
err = comp.Close()
if err != nil {
if err = comp.Close(); err != nil {
pw.CloseWithError(err)
return
}