mirror of
https://github.com/minio/minio.git
synced 2024-12-24 06:05:55 -05:00
Add compressed file index (#15247)
This commit is contained in:
parent
3d969bd2b4
commit
911a17b149
@ -501,6 +501,7 @@ func (er erasureObjects) healObject(ctx context.Context, bucket string, object s
|
||||
partSize := latestMeta.Parts[partIndex].Size
|
||||
partActualSize := latestMeta.Parts[partIndex].ActualSize
|
||||
partNumber := latestMeta.Parts[partIndex].Number
|
||||
partIdx := latestMeta.Parts[partIndex].Index
|
||||
tillOffset := erasure.ShardFileOffset(0, partSize, partSize)
|
||||
readers := make([]io.ReaderAt, len(latestDisks))
|
||||
checksumAlgo := erasureInfo.GetChecksumInfo(partNumber).Algorithm
|
||||
@ -550,7 +551,7 @@ func (er erasureObjects) healObject(ctx context.Context, bucket string, object s
|
||||
}
|
||||
|
||||
partsMetadata[i].DataDir = dstDataDir
|
||||
partsMetadata[i].AddObjectPart(partNumber, "", partSize, partActualSize)
|
||||
partsMetadata[i].AddObjectPart(partNumber, "", partSize, partActualSize, partIdx)
|
||||
partsMetadata[i].Erasure.AddChecksumInfo(ChecksumInfo{
|
||||
PartNumber: partNumber,
|
||||
Algorithm: checksumAlgo,
|
||||
|
@ -237,12 +237,13 @@ func objectPartIndex(parts []ObjectPartInfo, partNumber int) int {
|
||||
}
|
||||
|
||||
// AddObjectPart - add a new object part in order.
|
||||
func (fi *FileInfo) AddObjectPart(partNumber int, partETag string, partSize int64, actualSize int64) {
|
||||
func (fi *FileInfo) AddObjectPart(partNumber int, partETag string, partSize, actualSize int64, idx []byte) {
|
||||
partInfo := ObjectPartInfo{
|
||||
Number: partNumber,
|
||||
ETag: partETag,
|
||||
Size: partSize,
|
||||
ActualSize: actualSize,
|
||||
Index: idx,
|
||||
}
|
||||
|
||||
// Update part info if it already exists.
|
||||
|
@ -23,7 +23,7 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
humanize "github.com/dustin/go-humanize"
|
||||
"github.com/dustin/go-humanize"
|
||||
)
|
||||
|
||||
const ActualSize = 1000
|
||||
@ -58,7 +58,7 @@ func TestAddObjectPart(t *testing.T) {
|
||||
for _, testCase := range testCases {
|
||||
if testCase.expectedIndex > -1 {
|
||||
partNumString := strconv.Itoa(testCase.partNum)
|
||||
fi.AddObjectPart(testCase.partNum, "etag."+partNumString, int64(testCase.partNum+humanize.MiByte), ActualSize)
|
||||
fi.AddObjectPart(testCase.partNum, "etag."+partNumString, int64(testCase.partNum+humanize.MiByte), ActualSize, nil)
|
||||
}
|
||||
|
||||
if index := objectPartIndex(fi.Parts, testCase.partNum); index != testCase.expectedIndex {
|
||||
@ -91,7 +91,7 @@ func TestObjectPartIndex(t *testing.T) {
|
||||
// Add some parts for testing.
|
||||
for _, testCase := range testCases {
|
||||
partNumString := strconv.Itoa(testCase.partNum)
|
||||
fi.AddObjectPart(testCase.partNum, "etag."+partNumString, int64(testCase.partNum+humanize.MiByte), ActualSize)
|
||||
fi.AddObjectPart(testCase.partNum, "etag."+partNumString, int64(testCase.partNum+humanize.MiByte), ActualSize, nil)
|
||||
}
|
||||
|
||||
// Add failure test case.
|
||||
@ -121,7 +121,7 @@ func TestObjectToPartOffset(t *testing.T) {
|
||||
// Total size of all parts is 5,242,899 bytes.
|
||||
for _, partNum := range []int{1, 2, 4, 5, 7} {
|
||||
partNumString := strconv.Itoa(partNum)
|
||||
fi.AddObjectPart(partNum, "etag."+partNumString, int64(partNum+humanize.MiByte), ActualSize)
|
||||
fi.AddObjectPart(partNum, "etag."+partNumString, int64(partNum+humanize.MiByte), ActualSize, nil)
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
@ -160,7 +160,7 @@ func TestObjectToPartOffset(t *testing.T) {
|
||||
func TestFindFileInfoInQuorum(t *testing.T) {
|
||||
getNFInfo := func(n int, quorum int, t int64, dataDir string) []FileInfo {
|
||||
fi := newFileInfo("test", 8, 8)
|
||||
fi.AddObjectPart(1, "etag", 100, 100)
|
||||
fi.AddObjectPart(1, "etag", 100, 100, nil)
|
||||
fi.ModTime = time.Unix(t, 0)
|
||||
fi.DataDir = dataDir
|
||||
fis := make([]FileInfo, n)
|
||||
|
@ -653,9 +653,13 @@ func (er erasureObjects) PutObjectPart(ctx context.Context, bucket, object, uplo
|
||||
fi.ModTime = UTCNow()
|
||||
|
||||
md5hex := r.MD5CurrentHexString()
|
||||
var index []byte
|
||||
if opts.IndexCB != nil {
|
||||
index = opts.IndexCB()
|
||||
}
|
||||
|
||||
// Add the current part.
|
||||
fi.AddObjectPart(partID, md5hex, n, data.ActualSize())
|
||||
fi.AddObjectPart(partID, md5hex, n, data.ActualSize(), index)
|
||||
|
||||
for i, disk := range onlineDisks {
|
||||
if disk == OfflineDisk {
|
||||
@ -947,6 +951,7 @@ func (er erasureObjects) CompleteMultipartUpload(ctx context.Context, bucket str
|
||||
Number: part.PartNumber,
|
||||
Size: currentFI.Parts[partIdx].Size,
|
||||
ActualSize: currentFI.Parts[partIdx].ActualSize,
|
||||
Index: currentFI.Parts[partIdx].Index,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -815,6 +815,10 @@ func (er erasureObjects) putMetacacheObject(ctx context.Context, key string, r *
|
||||
if n < data.Size() {
|
||||
return ObjectInfo{}, IncompleteBody{Bucket: minioMetaBucket, Object: key}
|
||||
}
|
||||
var index []byte
|
||||
if opts.IndexCB != nil {
|
||||
index = opts.IndexCB()
|
||||
}
|
||||
|
||||
for i, w := range writers {
|
||||
if w == nil {
|
||||
@ -823,7 +827,7 @@ func (er erasureObjects) putMetacacheObject(ctx context.Context, key string, r *
|
||||
continue
|
||||
}
|
||||
partsMetadata[i].Data = inlineBuffers[i].Bytes()
|
||||
partsMetadata[i].AddObjectPart(1, "", n, data.ActualSize())
|
||||
partsMetadata[i].AddObjectPart(1, "", n, data.ActualSize(), index)
|
||||
partsMetadata[i].Erasure.AddChecksumInfo(ChecksumInfo{
|
||||
PartNumber: 1,
|
||||
Algorithm: DefaultBitrotAlgorithm,
|
||||
@ -1071,6 +1075,10 @@ func (er erasureObjects) putObject(ctx context.Context, bucket string, object st
|
||||
return ObjectInfo{}, IncompleteBody{Bucket: bucket, Object: object}
|
||||
}
|
||||
|
||||
var compIndex []byte
|
||||
if opts.IndexCB != nil {
|
||||
compIndex = opts.IndexCB()
|
||||
}
|
||||
if !opts.NoLock {
|
||||
lk := er.NewNSLock(bucket, object)
|
||||
lkctx, err := lk.GetLock(ctx, globalOperationTimeout)
|
||||
@ -1091,7 +1099,7 @@ func (er erasureObjects) putObject(ctx context.Context, bucket string, object st
|
||||
} else {
|
||||
partsMetadata[i].Data = nil
|
||||
}
|
||||
partsMetadata[i].AddObjectPart(1, "", n, data.ActualSize())
|
||||
partsMetadata[i].AddObjectPart(1, "", n, data.ActualSize(), compIndex)
|
||||
partsMetadata[i].Erasure.AddChecksumInfo(ChecksumInfo{
|
||||
PartNumber: 1,
|
||||
Algorithm: DefaultBitrotAlgorithm,
|
||||
|
@ -849,6 +849,11 @@ func (es *erasureSingle) putMetacacheObject(ctx context.Context, key string, r *
|
||||
return ObjectInfo{}, IncompleteBody{Bucket: minioMetaBucket, Object: key}
|
||||
}
|
||||
|
||||
var index []byte
|
||||
if opts.IndexCB != nil {
|
||||
index = opts.IndexCB()
|
||||
}
|
||||
|
||||
for i, w := range writers {
|
||||
if w == nil {
|
||||
// Make sure to avoid writing to disks which we couldn't complete in erasure.Encode()
|
||||
@ -856,7 +861,7 @@ func (es *erasureSingle) putMetacacheObject(ctx context.Context, key string, r *
|
||||
continue
|
||||
}
|
||||
partsMetadata[i].Data = inlineBuffers[i].Bytes()
|
||||
partsMetadata[i].AddObjectPart(1, "", n, data.ActualSize())
|
||||
partsMetadata[i].AddObjectPart(1, "", n, data.ActualSize(), index)
|
||||
partsMetadata[i].Erasure.AddChecksumInfo(ChecksumInfo{
|
||||
PartNumber: 1,
|
||||
Algorithm: DefaultBitrotAlgorithm,
|
||||
@ -1082,6 +1087,11 @@ func (es *erasureSingle) putObject(ctx context.Context, bucket string, object st
|
||||
defer lk.Unlock(lkctx.Cancel)
|
||||
}
|
||||
|
||||
var index []byte
|
||||
if opts.IndexCB != nil {
|
||||
index = opts.IndexCB()
|
||||
}
|
||||
|
||||
for i, w := range writers {
|
||||
if w == nil {
|
||||
onlineDisks[i] = nil
|
||||
@ -1092,7 +1102,7 @@ func (es *erasureSingle) putObject(ctx context.Context, bucket string, object st
|
||||
} else {
|
||||
partsMetadata[i].Data = nil
|
||||
}
|
||||
partsMetadata[i].AddObjectPart(1, "", n, data.ActualSize())
|
||||
partsMetadata[i].AddObjectPart(1, "", n, data.ActualSize(), index)
|
||||
partsMetadata[i].Erasure.AddChecksumInfo(ChecksumInfo{
|
||||
PartNumber: 1,
|
||||
Algorithm: DefaultBitrotAlgorithm,
|
||||
@ -2369,8 +2379,13 @@ func (es *erasureSingle) PutObjectPart(ctx context.Context, bucket, object, uplo
|
||||
|
||||
md5hex := r.MD5CurrentHexString()
|
||||
|
||||
var index []byte
|
||||
if opts.IndexCB != nil {
|
||||
index = opts.IndexCB()
|
||||
}
|
||||
|
||||
// Add the current part.
|
||||
fi.AddObjectPart(partID, md5hex, n, data.ActualSize())
|
||||
fi.AddObjectPart(partID, md5hex, n, data.ActualSize(), index)
|
||||
|
||||
for i, disk := range onlineDisks {
|
||||
if disk == OfflineDisk {
|
||||
@ -2668,6 +2683,7 @@ func (es *erasureSingle) CompleteMultipartUpload(ctx context.Context, bucket str
|
||||
Number: part.PartNumber,
|
||||
Size: currentFI.Parts[partIdx].Size,
|
||||
ActualSize: currentFI.Parts[partIdx].ActualSize,
|
||||
Index: currentFI.Parts[partIdx].Index,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -78,6 +78,10 @@ type ObjectOptions struct {
|
||||
WalkAscending bool // return Walk results in ascending order of versions
|
||||
|
||||
PrefixEnabledFn func(prefix string) bool // function which returns true if versioning is enabled on prefix
|
||||
|
||||
// IndexCB will return any index created but the compression.
|
||||
// Object must have been read at this point.
|
||||
IndexCB func() []byte
|
||||
}
|
||||
|
||||
// ExpirationOptions represents object options for object expiration at objectLayer.
|
||||
|
@ -20,6 +20,8 @@ package cmd
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/hmac"
|
||||
"encoding/binary"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
@ -43,12 +45,15 @@ import (
|
||||
"github.com/minio/minio/internal/config/dns"
|
||||
"github.com/minio/minio/internal/config/storageclass"
|
||||
"github.com/minio/minio/internal/crypto"
|
||||
"github.com/minio/minio/internal/fips"
|
||||
"github.com/minio/minio/internal/hash"
|
||||
"github.com/minio/minio/internal/hash/sha256"
|
||||
xhttp "github.com/minio/minio/internal/http"
|
||||
"github.com/minio/minio/internal/ioutil"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/pkg/trie"
|
||||
"github.com/minio/pkg/wildcard"
|
||||
"github.com/minio/sio"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -513,12 +518,12 @@ func partNumberToRangeSpec(oi ObjectInfo, partNumber int) *HTTPRangeSpec {
|
||||
// Returns the compressed offset which should be skipped.
|
||||
// If encrypted offsets are adjusted for encrypted block headers/trailers.
|
||||
// Since de-compression is after decryption encryption overhead is only added to compressedOffset.
|
||||
func getCompressedOffsets(objectInfo ObjectInfo, offset int64) (compressedOffset int64, partSkip int64, firstPart int) {
|
||||
func getCompressedOffsets(oi ObjectInfo, offset int64, decrypt func([]byte) ([]byte, error)) (compressedOffset int64, partSkip int64, firstPart int, decryptSkip int64, seqNum uint32) {
|
||||
var skipLength int64
|
||||
var cumulativeActualSize int64
|
||||
var firstPartIdx int
|
||||
if len(objectInfo.Parts) > 0 {
|
||||
for i, part := range objectInfo.Parts {
|
||||
if len(oi.Parts) > 0 {
|
||||
for i, part := range oi.Parts {
|
||||
cumulativeActualSize += part.ActualSize
|
||||
if cumulativeActualSize <= offset {
|
||||
compressedOffset += part.Size
|
||||
@ -529,8 +534,52 @@ func getCompressedOffsets(objectInfo ObjectInfo, offset int64) (compressedOffset
|
||||
}
|
||||
}
|
||||
}
|
||||
partSkip = offset - skipLength
|
||||
|
||||
return compressedOffset, offset - skipLength, firstPartIdx
|
||||
// Load index and skip more if feasible.
|
||||
if partSkip > 0 && len(oi.Parts) > firstPartIdx && len(oi.Parts[firstPartIdx].Index) > 0 {
|
||||
_, isEncrypted := crypto.IsEncrypted(oi.UserDefined)
|
||||
if isEncrypted {
|
||||
dec, err := decrypt(oi.Parts[firstPartIdx].Index)
|
||||
if err == nil {
|
||||
// Load Index
|
||||
var idx s2.Index
|
||||
_, err := idx.Load(restoreIndexHeaders(dec))
|
||||
|
||||
// Find compressed/uncompressed offsets of our partskip
|
||||
compOff, uCompOff, err2 := idx.Find(partSkip)
|
||||
|
||||
if err == nil && err2 == nil && compOff > 0 {
|
||||
// Encrypted.
|
||||
const sseDAREEncPackageBlockSize = SSEDAREPackageBlockSize + SSEDAREPackageMetaSize
|
||||
// Number of full blocks in skipped area
|
||||
seqNum = uint32(compOff / SSEDAREPackageBlockSize)
|
||||
// Skip this many inside a decrypted block to get to compression block start
|
||||
decryptSkip = compOff % SSEDAREPackageBlockSize
|
||||
// Skip this number of full blocks.
|
||||
skipEnc := compOff / SSEDAREPackageBlockSize
|
||||
skipEnc *= sseDAREEncPackageBlockSize
|
||||
compressedOffset += skipEnc
|
||||
// Skip this number of uncompressed bytes.
|
||||
partSkip -= uCompOff
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Not encrypted
|
||||
var idx s2.Index
|
||||
_, err := idx.Load(restoreIndexHeaders(oi.Parts[firstPartIdx].Index))
|
||||
|
||||
// Find compressed/uncompressed offsets of our partskip
|
||||
compOff, uCompOff, err2 := idx.Find(partSkip)
|
||||
|
||||
if err == nil && err2 == nil && compOff > 0 {
|
||||
compressedOffset += compOff
|
||||
partSkip -= uCompOff
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return compressedOffset, partSkip, firstPartIdx, decryptSkip, seqNum
|
||||
}
|
||||
|
||||
// GetObjectReader is a type that wraps a reader with a lock to
|
||||
@ -618,6 +667,8 @@ func NewGetObjectReader(rs *HTTPRangeSpec, oi ObjectInfo, opts ObjectOptions) (
|
||||
if err != nil {
|
||||
return nil, 0, 0, err
|
||||
}
|
||||
var decryptSkip int64
|
||||
var seqNum uint32
|
||||
|
||||
off, length = int64(0), oi.Size
|
||||
decOff, decLength := int64(0), actualSize
|
||||
@ -626,10 +677,14 @@ func NewGetObjectReader(rs *HTTPRangeSpec, oi ObjectInfo, opts ObjectOptions) (
|
||||
if err != nil {
|
||||
return nil, 0, 0, err
|
||||
}
|
||||
|
||||
decrypt := func(b []byte) ([]byte, error) {
|
||||
return b, nil
|
||||
}
|
||||
if isEncrypted {
|
||||
decrypt = oi.compressionIndexDecrypt
|
||||
}
|
||||
// In case of range based queries on multiparts, the offset and length are reduced.
|
||||
off, decOff, firstPart = getCompressedOffsets(oi, off)
|
||||
|
||||
off, decOff, firstPart, decryptSkip, seqNum = getCompressedOffsets(oi, off, decrypt)
|
||||
decLength = length
|
||||
length = oi.Size - off
|
||||
// For negative length we read everything.
|
||||
@ -646,7 +701,7 @@ func NewGetObjectReader(rs *HTTPRangeSpec, oi ObjectInfo, opts ObjectOptions) (
|
||||
if isEncrypted {
|
||||
copySource := h.Get(xhttp.AmzServerSideEncryptionCopyCustomerAlgorithm) != ""
|
||||
// Attach decrypter on inputReader
|
||||
inputReader, err = DecryptBlocksRequestR(inputReader, h, 0, firstPart, oi, copySource)
|
||||
inputReader, err = DecryptBlocksRequestR(inputReader, h, seqNum, firstPart, oi, copySource)
|
||||
if err != nil {
|
||||
// Call the cleanup funcs
|
||||
for i := len(cFns) - 1; i >= 0; i-- {
|
||||
@ -654,10 +709,18 @@ func NewGetObjectReader(rs *HTTPRangeSpec, oi ObjectInfo, opts ObjectOptions) (
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
if decryptSkip > 0 {
|
||||
inputReader = ioutil.NewSkipReader(inputReader, decryptSkip)
|
||||
}
|
||||
oi.Size = decLength
|
||||
}
|
||||
// Decompression reader.
|
||||
s2Reader := s2.NewReader(inputReader)
|
||||
var dopts []s2.ReaderOption
|
||||
if off > 0 {
|
||||
// We are not starting at the beginning, so ignore stream identifiers.
|
||||
dopts = append(dopts, s2.ReaderIgnoreStreamIdentifier())
|
||||
}
|
||||
s2Reader := s2.NewReader(inputReader, dopts...)
|
||||
// Apply the skipLen and limit on the decompressed stream.
|
||||
if decOff > 0 {
|
||||
if err = s2Reader.Skip(decOff); err != nil {
|
||||
@ -778,6 +841,41 @@ func (g *GetObjectReader) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func compressionIndexEncrypter(key crypto.ObjectKey, input func() []byte) func() []byte {
|
||||
var data []byte
|
||||
var fetched bool
|
||||
return func() []byte {
|
||||
if !fetched {
|
||||
data = input()
|
||||
fetched = true
|
||||
}
|
||||
if len(data) == 0 {
|
||||
return data
|
||||
}
|
||||
var buffer bytes.Buffer
|
||||
mac := hmac.New(sha256.New, key[:])
|
||||
mac.Write([]byte("compression-index"))
|
||||
if _, err := sio.Encrypt(&buffer, bytes.NewReader(data), sio.Config{Key: mac.Sum(nil), CipherSuites: fips.DARECiphers()}); err != nil {
|
||||
logger.CriticalIf(context.Background(), errors.New("unable to encrypt compression index using object key"))
|
||||
}
|
||||
return buffer.Bytes()
|
||||
}
|
||||
}
|
||||
|
||||
func (o *ObjectInfo) compressionIndexDecrypt(input []byte) ([]byte, error) {
|
||||
if len(input) == 0 {
|
||||
return input, nil
|
||||
}
|
||||
|
||||
key, err := decryptObjectInfo(nil, o.Bucket, o.Name, o.UserDefined)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
mac := hmac.New(sha256.New, key)
|
||||
mac.Write([]byte("compression-index"))
|
||||
return sio.DecryptBuffer(nil, input, sio.Config{Key: mac.Sum(nil), CipherSuites: fips.DARECiphers()})
|
||||
}
|
||||
|
||||
// SealMD5CurrFn seals md5sum with object encryption key and returns sealed
|
||||
// md5sum
|
||||
type SealMD5CurrFn func([]byte) []byte
|
||||
@ -888,11 +986,13 @@ func init() {
|
||||
// input 'on' is always recommended such that this function works
|
||||
// properly, because we do not wish to create an object even if
|
||||
// client closed the stream prematurely.
|
||||
func newS2CompressReader(r io.Reader, on int64) io.ReadCloser {
|
||||
func newS2CompressReader(r io.Reader, on int64) (rc io.ReadCloser, idx func() []byte) {
|
||||
pr, pw := io.Pipe()
|
||||
// Copy input to compressor
|
||||
go func() {
|
||||
comp := s2.NewWriter(pw, compressOpts...)
|
||||
indexCh := make(chan []byte, 1)
|
||||
go func() {
|
||||
defer close(indexCh)
|
||||
cn, err := io.Copy(comp, r)
|
||||
if err != nil {
|
||||
comp.Close()
|
||||
@ -907,9 +1007,25 @@ func newS2CompressReader(r io.Reader, on int64) io.ReadCloser {
|
||||
return
|
||||
}
|
||||
// Close the stream.
|
||||
// If more than 8MB was written, generate index.
|
||||
if cn > 8<<20 {
|
||||
idx, err := comp.CloseIndex()
|
||||
idx = removeIndexHeaders(idx)
|
||||
indexCh <- idx
|
||||
pw.CloseWithError(err)
|
||||
return
|
||||
}
|
||||
pw.CloseWithError(comp.Close())
|
||||
}()
|
||||
return pr
|
||||
var gotIdx []byte
|
||||
return pr, func() []byte {
|
||||
if gotIdx != nil {
|
||||
return gotIdx
|
||||
}
|
||||
// Will get index or nil if closed.
|
||||
gotIdx = <-indexCh
|
||||
return gotIdx
|
||||
}
|
||||
}
|
||||
|
||||
// compressSelfTest performs a self-test to ensure that compression
|
||||
@ -933,7 +1049,7 @@ func compressSelfTest() {
|
||||
}
|
||||
}
|
||||
const skip = 2<<20 + 511
|
||||
r := newS2CompressReader(bytes.NewBuffer(data), int64(len(data)))
|
||||
r, _ := newS2CompressReader(bytes.NewBuffer(data), int64(len(data)))
|
||||
b, err := io.ReadAll(r)
|
||||
failOnErr(err)
|
||||
failOnErr(r.Close())
|
||||
@ -1012,3 +1128,65 @@ func hasSpaceFor(di []*DiskInfo, size int64) bool {
|
||||
wantLeft := uint64(float64(total) * (1.0 - diskFillFraction))
|
||||
return available > wantLeft
|
||||
}
|
||||
|
||||
// removeIndexHeaders will trim all headers and trailers from a given index.
|
||||
// This is expected to save 20 bytes.
|
||||
// These can be restored using RestoreIndexHeaders.
|
||||
// This removes a layer of security, but is the most compact representation.
|
||||
// Returns nil if headers contains errors.
|
||||
// The returned slice references the provided slice.
|
||||
func removeIndexHeaders(b []byte) []byte {
|
||||
const save = 4 + len(s2.S2IndexHeader) + len(s2.S2IndexTrailer) + 4
|
||||
if len(b) <= save {
|
||||
return nil
|
||||
}
|
||||
if b[0] != s2.ChunkTypeIndex {
|
||||
return nil
|
||||
}
|
||||
chunkLen := int(b[1]) | int(b[2])<<8 | int(b[3])<<16
|
||||
b = b[4:]
|
||||
|
||||
// Validate we have enough...
|
||||
if len(b) < chunkLen {
|
||||
return nil
|
||||
}
|
||||
b = b[:chunkLen]
|
||||
|
||||
if !bytes.Equal(b[:len(s2.S2IndexHeader)], []byte(s2.S2IndexHeader)) {
|
||||
return nil
|
||||
}
|
||||
b = b[len(s2.S2IndexHeader):]
|
||||
if !bytes.HasSuffix(b, []byte(s2.S2IndexTrailer)) {
|
||||
return nil
|
||||
}
|
||||
b = bytes.TrimSuffix(b, []byte(s2.S2IndexTrailer))
|
||||
|
||||
if len(b) < 4 {
|
||||
return nil
|
||||
}
|
||||
return b[:len(b)-4]
|
||||
}
|
||||
|
||||
// restoreIndexHeaders will index restore headers removed by RemoveIndexHeaders.
|
||||
// No error checking is performed on the input.
|
||||
func restoreIndexHeaders(in []byte) []byte {
|
||||
if len(in) == 0 {
|
||||
return nil
|
||||
}
|
||||
b := make([]byte, 0, 4+len(s2.S2IndexHeader)+len(in)+len(s2.S2IndexTrailer)+4)
|
||||
b = append(b, s2.ChunkTypeIndex, 0, 0, 0)
|
||||
b = append(b, []byte(s2.S2IndexHeader)...)
|
||||
b = append(b, in...)
|
||||
|
||||
var tmp [4]byte
|
||||
binary.LittleEndian.PutUint32(tmp[:], uint32(len(b)+4+len(s2.S2IndexTrailer)))
|
||||
b = append(b, tmp[:4]...)
|
||||
// Trailer
|
||||
b = append(b, []byte(s2.S2IndexTrailer)...)
|
||||
|
||||
chunkLen := len(b) - 4 /*skippableFrameHeader*/
|
||||
b[1] = uint8(chunkLen >> 0)
|
||||
b[2] = uint8(chunkLen >> 8)
|
||||
b[3] = uint8(chunkLen >> 16)
|
||||
return b
|
||||
}
|
||||
|
@ -593,7 +593,7 @@ func TestGetCompressedOffsets(t *testing.T) {
|
||||
},
|
||||
}
|
||||
for i, test := range testCases {
|
||||
startOffset, snappyStartOffset, firstPart := getCompressedOffsets(test.objInfo, test.offset)
|
||||
startOffset, snappyStartOffset, firstPart, _, _ := getCompressedOffsets(test.objInfo, test.offset, nil)
|
||||
if startOffset != test.startOffset {
|
||||
t.Errorf("Test %d - expected startOffset %d but received %d",
|
||||
i, test.startOffset, startOffset)
|
||||
@ -613,17 +613,18 @@ func TestS2CompressReader(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
data []byte
|
||||
wantIdx bool
|
||||
}{
|
||||
{name: "empty", data: nil},
|
||||
{name: "small", data: []byte("hello, world")},
|
||||
{name: "large", data: bytes.Repeat([]byte("hello, world"), 1000)},
|
||||
{name: "small", data: []byte("hello, world!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")},
|
||||
{name: "large", data: bytes.Repeat([]byte("hello, world"), 1000000), wantIdx: true},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
buf := make([]byte, 100) // make small buffer to ensure multiple reads are required for large case
|
||||
|
||||
r := newS2CompressReader(bytes.NewReader(tt.data), int64(len(tt.data)))
|
||||
r, idxCB := newS2CompressReader(bytes.NewReader(tt.data), int64(len(tt.data)))
|
||||
defer r.Close()
|
||||
|
||||
var rdrBuf bytes.Buffer
|
||||
@ -631,7 +632,26 @@ func TestS2CompressReader(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
r.Close()
|
||||
idx := idxCB()
|
||||
if !tt.wantIdx && len(idx) > 0 {
|
||||
t.Errorf("index returned above threshold")
|
||||
}
|
||||
if tt.wantIdx {
|
||||
if idx == nil {
|
||||
t.Errorf("no index returned")
|
||||
}
|
||||
var index s2.Index
|
||||
_, err = index.Load(restoreIndexHeaders(idx))
|
||||
if err != nil {
|
||||
t.Errorf("error loading index: %v", err)
|
||||
}
|
||||
t.Log("size:", len(idx))
|
||||
t.Log(string(index.JSON()))
|
||||
if index.TotalUncompressed != int64(len(tt.data)) {
|
||||
t.Errorf("Expected size %d, got %d", len(tt.data), index.TotalUncompressed)
|
||||
}
|
||||
}
|
||||
var stdBuf bytes.Buffer
|
||||
w := s2.NewWriter(&stdBuf)
|
||||
_, err = io.CopyBuffer(w, bytes.NewReader(tt.data), buf)
|
||||
|
@ -1164,7 +1164,8 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re
|
||||
compressMetadata[ReservedMetadataPrefix+"actual-size"] = strconv.FormatInt(actualSize, 10)
|
||||
|
||||
reader = etag.NewReader(reader, nil)
|
||||
s2c := newS2CompressReader(reader, actualSize)
|
||||
s2c, cb := newS2CompressReader(reader, actualSize)
|
||||
dstOpts.IndexCB = cb
|
||||
defer s2c.Close()
|
||||
reader = etag.Wrap(s2c, reader)
|
||||
length = -1
|
||||
@ -1308,6 +1309,9 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
if dstOpts.IndexCB != nil {
|
||||
dstOpts.IndexCB = compressionIndexEncrypter(objEncKey, dstOpts.IndexCB)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1715,6 +1719,7 @@ func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Req
|
||||
})
|
||||
|
||||
actualSize := size
|
||||
var idxCb func() []byte
|
||||
if objectAPI.IsCompressionSupported() && isCompressible(r.Header, object) && size > 0 {
|
||||
// Storing the compression metadata.
|
||||
metadata[ReservedMetadataPrefix+"compression"] = compressionAlgorithmV2
|
||||
@ -1727,8 +1732,10 @@ func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Req
|
||||
}
|
||||
|
||||
// Set compression metrics.
|
||||
s2c := newS2CompressReader(actualReader, actualSize)
|
||||
var s2c io.ReadCloser
|
||||
s2c, idxCb = newS2CompressReader(actualReader, actualSize)
|
||||
defer s2c.Close()
|
||||
|
||||
reader = etag.Wrap(s2c, actualReader)
|
||||
size = -1 // Since compressed size is un-predictable.
|
||||
md5hex = "" // Do not try to verify the content.
|
||||
@ -1751,6 +1758,7 @@ func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Req
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
opts.IndexCB = idxCb
|
||||
|
||||
if api.CacheAPI() != nil {
|
||||
putObject = api.CacheAPI().PutObject
|
||||
@ -1813,6 +1821,9 @@ func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Req
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
if opts.IndexCB != nil {
|
||||
opts.IndexCB = compressionIndexEncrypter(objectEncryptionKey, opts.IndexCB)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -2061,6 +2072,7 @@ func (api objectAPIHandlers) PutObjectExtractHandler(w http.ResponseWriter, r *h
|
||||
}
|
||||
|
||||
actualSize := size
|
||||
var idxCb func() []byte
|
||||
if objectAPI.IsCompressionSupported() && isCompressible(r.Header, object) && size > 0 {
|
||||
// Storing the compression metadata.
|
||||
metadata[ReservedMetadataPrefix+"compression"] = compressionAlgorithmV2
|
||||
@ -2072,8 +2084,9 @@ func (api objectAPIHandlers) PutObjectExtractHandler(w http.ResponseWriter, r *h
|
||||
}
|
||||
|
||||
// Set compression metrics.
|
||||
s2c := newS2CompressReader(actualReader, actualSize)
|
||||
s2c, cb := newS2CompressReader(actualReader, actualSize)
|
||||
defer s2c.Close()
|
||||
idxCb = cb
|
||||
reader = etag.Wrap(s2c, actualReader)
|
||||
size = -1 // Since compressed size is un-predictable.
|
||||
}
|
||||
@ -2100,6 +2113,7 @@ func (api objectAPIHandlers) PutObjectExtractHandler(w http.ResponseWriter, r *h
|
||||
return err
|
||||
}
|
||||
opts.MTime = info.ModTime()
|
||||
opts.IndexCB = idxCb
|
||||
|
||||
retentionMode, retentionDate, legalHold, s3err := checkPutObjectLockAllowed(ctx, r, bucket, object, getObjectInfo, retPerms, holdPerms)
|
||||
if s3err == ErrNone && retentionMode.Valid() {
|
||||
@ -2153,6 +2167,9 @@ func (api objectAPIHandlers) PutObjectExtractHandler(w http.ResponseWriter, r *h
|
||||
return err
|
||||
}
|
||||
}
|
||||
if opts.IndexCB != nil {
|
||||
opts.IndexCB = compressionIndexEncrypter(objectEncryptionKey, opts.IndexCB)
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure that metadata does not contain sensitive information
|
||||
@ -2571,8 +2588,10 @@ func (api objectAPIHandlers) CopyObjectPartHandler(w http.ResponseWriter, r *htt
|
||||
// Read compression metadata preserved in the init multipart for the decision.
|
||||
_, isCompressed := mi.UserDefined[ReservedMetadataPrefix+"compression"]
|
||||
// Compress only if the compression is enabled during initial multipart.
|
||||
var idxCb func() []byte
|
||||
if isCompressed {
|
||||
s2c := newS2CompressReader(reader, actualPartSize)
|
||||
s2c, cb := newS2CompressReader(reader, actualPartSize)
|
||||
idxCb = cb
|
||||
defer s2c.Close()
|
||||
reader = etag.Wrap(s2c, reader)
|
||||
length = -1
|
||||
@ -2589,6 +2608,7 @@ func (api objectAPIHandlers) CopyObjectPartHandler(w http.ResponseWriter, r *htt
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
dstOpts.IndexCB = idxCb
|
||||
|
||||
rawReader := srcInfo.Reader
|
||||
pReader := NewPutObjReader(rawReader)
|
||||
@ -2643,6 +2663,9 @@ func (api objectAPIHandlers) CopyObjectPartHandler(w http.ResponseWriter, r *htt
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
if dstOpts.IndexCB != nil {
|
||||
dstOpts.IndexCB = compressionIndexEncrypter(objectEncryptionKey, dstOpts.IndexCB)
|
||||
}
|
||||
}
|
||||
|
||||
srcInfo.PutObjReader = pReader
|
||||
@ -2821,6 +2844,7 @@ func (api objectAPIHandlers) PutObjectPartHandler(w http.ResponseWriter, r *http
|
||||
// Read compression metadata preserved in the init multipart for the decision.
|
||||
_, isCompressed := mi.UserDefined[ReservedMetadataPrefix+"compression"]
|
||||
|
||||
var idxCb func() []byte
|
||||
if objectAPI.IsCompressionSupported() && isCompressed {
|
||||
actualReader, err := hash.NewReader(reader, size, md5hex, sha256hex, actualSize)
|
||||
if err != nil {
|
||||
@ -2829,7 +2853,8 @@ func (api objectAPIHandlers) PutObjectPartHandler(w http.ResponseWriter, r *http
|
||||
}
|
||||
|
||||
// Set compression metrics.
|
||||
s2c := newS2CompressReader(actualReader, actualSize)
|
||||
s2c, cb := newS2CompressReader(actualReader, actualSize)
|
||||
idxCb = cb
|
||||
defer s2c.Close()
|
||||
reader = etag.Wrap(s2c, actualReader)
|
||||
size = -1 // Since compressed size is un-predictable.
|
||||
@ -2904,7 +2929,11 @@ func (api objectAPIHandlers) PutObjectPartHandler(w http.ResponseWriter, r *http
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
if idxCb != nil {
|
||||
idxCb = compressionIndexEncrypter(objectEncryptionKey, idxCb)
|
||||
}
|
||||
}
|
||||
opts.IndexCB = idxCb
|
||||
|
||||
putObjectPart := objectAPI.PutObjectPart
|
||||
if api.CacheAPI() != nil {
|
||||
|
@ -132,6 +132,7 @@ type ObjectPartInfo struct {
|
||||
Number int `json:"number"`
|
||||
Size int64 `json:"size"`
|
||||
ActualSize int64 `json:"actualSize"`
|
||||
Index []byte `json:"index,omitempty" msg:"index,omitempty"`
|
||||
}
|
||||
|
||||
// ChecksumInfo - carries checksums of individual scattered parts per disk.
|
||||
|
@ -593,6 +593,12 @@ func (z *ObjectPartInfo) DecodeMsg(dc *msgp.Reader) (err error) {
|
||||
err = msgp.WrapError(err, "ActualSize")
|
||||
return
|
||||
}
|
||||
case "index":
|
||||
z.Index, err = dc.ReadBytes(z.Index)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Index")
|
||||
return
|
||||
}
|
||||
default:
|
||||
err = dc.Skip()
|
||||
if err != nil {
|
||||
@ -606,9 +612,23 @@ func (z *ObjectPartInfo) DecodeMsg(dc *msgp.Reader) (err error) {
|
||||
|
||||
// EncodeMsg implements msgp.Encodable
|
||||
func (z *ObjectPartInfo) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
// map header, size 4
|
||||
// omitempty: check for empty values
|
||||
zb0001Len := uint32(5)
|
||||
var zb0001Mask uint8 /* 5 bits */
|
||||
if z.Index == nil {
|
||||
zb0001Len--
|
||||
zb0001Mask |= 0x10
|
||||
}
|
||||
// variable map header, size zb0001Len
|
||||
err = en.Append(0x80 | uint8(zb0001Len))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if zb0001Len == 0 {
|
||||
return
|
||||
}
|
||||
// write "ETag"
|
||||
err = en.Append(0x84, 0xa4, 0x45, 0x54, 0x61, 0x67)
|
||||
err = en.Append(0xa4, 0x45, 0x54, 0x61, 0x67)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
@ -647,15 +667,38 @@ func (z *ObjectPartInfo) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
err = msgp.WrapError(err, "ActualSize")
|
||||
return
|
||||
}
|
||||
if (zb0001Mask & 0x10) == 0 { // if not empty
|
||||
// write "index"
|
||||
err = en.Append(0xa5, 0x69, 0x6e, 0x64, 0x65, 0x78)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteBytes(z.Index)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Index")
|
||||
return
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// MarshalMsg implements msgp.Marshaler
|
||||
func (z *ObjectPartInfo) MarshalMsg(b []byte) (o []byte, err error) {
|
||||
o = msgp.Require(b, z.Msgsize())
|
||||
// map header, size 4
|
||||
// omitempty: check for empty values
|
||||
zb0001Len := uint32(5)
|
||||
var zb0001Mask uint8 /* 5 bits */
|
||||
if z.Index == nil {
|
||||
zb0001Len--
|
||||
zb0001Mask |= 0x10
|
||||
}
|
||||
// variable map header, size zb0001Len
|
||||
o = append(o, 0x80|uint8(zb0001Len))
|
||||
if zb0001Len == 0 {
|
||||
return
|
||||
}
|
||||
// string "ETag"
|
||||
o = append(o, 0x84, 0xa4, 0x45, 0x54, 0x61, 0x67)
|
||||
o = append(o, 0xa4, 0x45, 0x54, 0x61, 0x67)
|
||||
o = msgp.AppendString(o, z.ETag)
|
||||
// string "Number"
|
||||
o = append(o, 0xa6, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72)
|
||||
@ -666,6 +709,11 @@ func (z *ObjectPartInfo) MarshalMsg(b []byte) (o []byte, err error) {
|
||||
// string "ActualSize"
|
||||
o = append(o, 0xaa, 0x41, 0x63, 0x74, 0x75, 0x61, 0x6c, 0x53, 0x69, 0x7a, 0x65)
|
||||
o = msgp.AppendInt64(o, z.ActualSize)
|
||||
if (zb0001Mask & 0x10) == 0 { // if not empty
|
||||
// string "index"
|
||||
o = append(o, 0xa5, 0x69, 0x6e, 0x64, 0x65, 0x78)
|
||||
o = msgp.AppendBytes(o, z.Index)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@ -711,6 +759,12 @@ func (z *ObjectPartInfo) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
||||
err = msgp.WrapError(err, "ActualSize")
|
||||
return
|
||||
}
|
||||
case "index":
|
||||
z.Index, bts, err = msgp.ReadBytesBytes(bts, z.Index)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Index")
|
||||
return
|
||||
}
|
||||
default:
|
||||
bts, err = msgp.Skip(bts)
|
||||
if err != nil {
|
||||
@ -725,7 +779,7 @@ func (z *ObjectPartInfo) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
||||
|
||||
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
|
||||
func (z *ObjectPartInfo) Msgsize() (s int) {
|
||||
s = 1 + 5 + msgp.StringPrefixSize + len(z.ETag) + 7 + msgp.IntSize + 5 + msgp.Int64Size + 11 + msgp.Int64Size
|
||||
s = 1 + 5 + msgp.StringPrefixSize + len(z.ETag) + 7 + msgp.IntSize + 5 + msgp.Int64Size + 11 + msgp.Int64Size + 6 + msgp.BytesPrefixSize + len(z.Index)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -167,6 +167,7 @@ type xlMetaV2Object struct {
|
||||
PartETags []string `json:"PartETags" msg:"PartETags,allownil"` // Part ETags
|
||||
PartSizes []int64 `json:"PartSizes" msg:"PartSizes"` // Part Sizes
|
||||
PartActualSizes []int64 `json:"PartASizes,omitempty" msg:"PartASizes,allownil"` // Part ActualSizes (compression)
|
||||
PartIndices [][]byte `json:"PartIndices,omitempty" msg:"PartIdx,omitempty"` // Part Indexes (compression)
|
||||
Size int64 `json:"Size" msg:"Size"` // Object version size
|
||||
ModTime int64 `json:"MTime" msg:"MTime"` // Object version modified time
|
||||
MetaSys map[string][]byte `json:"MetaSys,omitempty" msg:"MetaSys,allownil"` // Object version internal metadata
|
||||
@ -574,6 +575,9 @@ func (j xlMetaV2Object) ToFileInfo(volume, path string) (FileInfo, error) {
|
||||
fi.Parts[i].ETag = j.PartETags[i]
|
||||
}
|
||||
fi.Parts[i].ActualSize = j.PartActualSizes[i]
|
||||
if len(j.PartIndices) > 0 {
|
||||
fi.Parts[i].Index = j.PartIndices[i]
|
||||
}
|
||||
}
|
||||
fi.Erasure.Checksums = make([]ChecksumInfo, len(j.PartSizes))
|
||||
for i := range fi.Parts {
|
||||
@ -1471,6 +1475,13 @@ func (x *xlMetaV2) AddVersion(fi FileInfo) error {
|
||||
break
|
||||
}
|
||||
}
|
||||
for i := range fi.Parts {
|
||||
// Only add indices if any.
|
||||
if len(fi.Parts[i].Index) > 0 {
|
||||
ventry.ObjectV2.PartIndices = make([][]byte, len(fi.Parts))
|
||||
break
|
||||
}
|
||||
}
|
||||
for i := range fi.Erasure.Distribution {
|
||||
ventry.ObjectV2.ErasureDist[i] = uint8(fi.Erasure.Distribution[i])
|
||||
}
|
||||
@ -1482,6 +1493,9 @@ func (x *xlMetaV2) AddVersion(fi FileInfo) error {
|
||||
}
|
||||
ventry.ObjectV2.PartNumbers[i] = fi.Parts[i].Number
|
||||
ventry.ObjectV2.PartActualSizes[i] = fi.Parts[i].ActualSize
|
||||
if len(ventry.ObjectV2.PartIndices) > 0 {
|
||||
ventry.ObjectV2.PartIndices[i] = fi.Parts[i].Index
|
||||
}
|
||||
}
|
||||
|
||||
tierFVIDKey := ReservedMetadataPrefixLower + tierFVID
|
||||
|
@ -935,6 +935,25 @@ func (z *xlMetaV2Object) DecodeMsg(dc *msgp.Reader) (err error) {
|
||||
}
|
||||
}
|
||||
}
|
||||
case "PartIdx":
|
||||
var zb0009 uint32
|
||||
zb0009, err = dc.ReadArrayHeader()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "PartIndices")
|
||||
return
|
||||
}
|
||||
if cap(z.PartIndices) >= int(zb0009) {
|
||||
z.PartIndices = (z.PartIndices)[:zb0009]
|
||||
} else {
|
||||
z.PartIndices = make([][]byte, zb0009)
|
||||
}
|
||||
for za0008 := range z.PartIndices {
|
||||
z.PartIndices[za0008], err = dc.ReadBytes(z.PartIndices[za0008])
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "PartIndices", za0008)
|
||||
return
|
||||
}
|
||||
}
|
||||
case "Size":
|
||||
z.Size, err = dc.ReadInt64()
|
||||
if err != nil {
|
||||
@ -956,34 +975,34 @@ func (z *xlMetaV2Object) DecodeMsg(dc *msgp.Reader) (err error) {
|
||||
}
|
||||
z.MetaSys = nil
|
||||
} else {
|
||||
var zb0009 uint32
|
||||
zb0009, err = dc.ReadMapHeader()
|
||||
var zb0010 uint32
|
||||
zb0010, err = dc.ReadMapHeader()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "MetaSys")
|
||||
return
|
||||
}
|
||||
if z.MetaSys == nil {
|
||||
z.MetaSys = make(map[string][]byte, zb0009)
|
||||
z.MetaSys = make(map[string][]byte, zb0010)
|
||||
} else if len(z.MetaSys) > 0 {
|
||||
for key := range z.MetaSys {
|
||||
delete(z.MetaSys, key)
|
||||
}
|
||||
}
|
||||
for zb0009 > 0 {
|
||||
zb0009--
|
||||
var za0008 string
|
||||
var za0009 []byte
|
||||
za0008, err = dc.ReadString()
|
||||
for zb0010 > 0 {
|
||||
zb0010--
|
||||
var za0009 string
|
||||
var za0010 []byte
|
||||
za0009, err = dc.ReadString()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "MetaSys")
|
||||
return
|
||||
}
|
||||
za0009, err = dc.ReadBytes(za0009)
|
||||
za0010, err = dc.ReadBytes(za0010)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "MetaSys", za0008)
|
||||
err = msgp.WrapError(err, "MetaSys", za0009)
|
||||
return
|
||||
}
|
||||
z.MetaSys[za0008] = za0009
|
||||
z.MetaSys[za0009] = za0010
|
||||
}
|
||||
}
|
||||
case "MetaUsr":
|
||||
@ -995,34 +1014,34 @@ func (z *xlMetaV2Object) DecodeMsg(dc *msgp.Reader) (err error) {
|
||||
}
|
||||
z.MetaUser = nil
|
||||
} else {
|
||||
var zb0010 uint32
|
||||
zb0010, err = dc.ReadMapHeader()
|
||||
var zb0011 uint32
|
||||
zb0011, err = dc.ReadMapHeader()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "MetaUser")
|
||||
return
|
||||
}
|
||||
if z.MetaUser == nil {
|
||||
z.MetaUser = make(map[string]string, zb0010)
|
||||
z.MetaUser = make(map[string]string, zb0011)
|
||||
} else if len(z.MetaUser) > 0 {
|
||||
for key := range z.MetaUser {
|
||||
delete(z.MetaUser, key)
|
||||
}
|
||||
}
|
||||
for zb0010 > 0 {
|
||||
zb0010--
|
||||
var za0010 string
|
||||
for zb0011 > 0 {
|
||||
zb0011--
|
||||
var za0011 string
|
||||
za0010, err = dc.ReadString()
|
||||
var za0012 string
|
||||
za0011, err = dc.ReadString()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "MetaUser")
|
||||
return
|
||||
}
|
||||
za0011, err = dc.ReadString()
|
||||
za0012, err = dc.ReadString()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "MetaUser", za0010)
|
||||
err = msgp.WrapError(err, "MetaUser", za0011)
|
||||
return
|
||||
}
|
||||
z.MetaUser[za0010] = za0011
|
||||
z.MetaUser[za0011] = za0012
|
||||
}
|
||||
}
|
||||
default:
|
||||
@ -1038,9 +1057,23 @@ func (z *xlMetaV2Object) DecodeMsg(dc *msgp.Reader) (err error) {
|
||||
|
||||
// EncodeMsg implements msgp.Encodable
|
||||
func (z *xlMetaV2Object) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
// map header, size 17
|
||||
// omitempty: check for empty values
|
||||
zb0001Len := uint32(18)
|
||||
var zb0001Mask uint32 /* 18 bits */
|
||||
if z.PartIndices == nil {
|
||||
zb0001Len--
|
||||
zb0001Mask |= 0x2000
|
||||
}
|
||||
// variable map header, size zb0001Len
|
||||
err = en.WriteMapHeader(zb0001Len)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if zb0001Len == 0 {
|
||||
return
|
||||
}
|
||||
// write "ID"
|
||||
err = en.Append(0xde, 0x0, 0x11, 0xa2, 0x49, 0x44)
|
||||
err = en.Append(0xa2, 0x49, 0x44)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
@ -1218,6 +1251,25 @@ func (z *xlMetaV2Object) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
}
|
||||
}
|
||||
}
|
||||
if (zb0001Mask & 0x2000) == 0 { // if not empty
|
||||
// write "PartIdx"
|
||||
err = en.Append(0xa7, 0x50, 0x61, 0x72, 0x74, 0x49, 0x64, 0x78)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteArrayHeader(uint32(len(z.PartIndices)))
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "PartIndices")
|
||||
return
|
||||
}
|
||||
for za0008 := range z.PartIndices {
|
||||
err = en.WriteBytes(z.PartIndices[za0008])
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "PartIndices", za0008)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
// write "Size"
|
||||
err = en.Append(0xa4, 0x53, 0x69, 0x7a, 0x65)
|
||||
if err != nil {
|
||||
@ -1254,15 +1306,15 @@ func (z *xlMetaV2Object) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
err = msgp.WrapError(err, "MetaSys")
|
||||
return
|
||||
}
|
||||
for za0008, za0009 := range z.MetaSys {
|
||||
err = en.WriteString(za0008)
|
||||
for za0009, za0010 := range z.MetaSys {
|
||||
err = en.WriteString(za0009)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "MetaSys")
|
||||
return
|
||||
}
|
||||
err = en.WriteBytes(za0009)
|
||||
err = en.WriteBytes(za0010)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "MetaSys", za0008)
|
||||
err = msgp.WrapError(err, "MetaSys", za0009)
|
||||
return
|
||||
}
|
||||
}
|
||||
@ -1283,15 +1335,15 @@ func (z *xlMetaV2Object) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
err = msgp.WrapError(err, "MetaUser")
|
||||
return
|
||||
}
|
||||
for za0010, za0011 := range z.MetaUser {
|
||||
err = en.WriteString(za0010)
|
||||
for za0011, za0012 := range z.MetaUser {
|
||||
err = en.WriteString(za0011)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "MetaUser")
|
||||
return
|
||||
}
|
||||
err = en.WriteString(za0011)
|
||||
err = en.WriteString(za0012)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "MetaUser", za0010)
|
||||
err = msgp.WrapError(err, "MetaUser", za0011)
|
||||
return
|
||||
}
|
||||
}
|
||||
@ -1302,9 +1354,20 @@ func (z *xlMetaV2Object) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
// MarshalMsg implements msgp.Marshaler
|
||||
func (z *xlMetaV2Object) MarshalMsg(b []byte) (o []byte, err error) {
|
||||
o = msgp.Require(b, z.Msgsize())
|
||||
// map header, size 17
|
||||
// omitempty: check for empty values
|
||||
zb0001Len := uint32(18)
|
||||
var zb0001Mask uint32 /* 18 bits */
|
||||
if z.PartIndices == nil {
|
||||
zb0001Len--
|
||||
zb0001Mask |= 0x2000
|
||||
}
|
||||
// variable map header, size zb0001Len
|
||||
o = msgp.AppendMapHeader(o, zb0001Len)
|
||||
if zb0001Len == 0 {
|
||||
return
|
||||
}
|
||||
// string "ID"
|
||||
o = append(o, 0xde, 0x0, 0x11, 0xa2, 0x49, 0x44)
|
||||
o = append(o, 0xa2, 0x49, 0x44)
|
||||
o = msgp.AppendBytes(o, (z.VersionID)[:])
|
||||
// string "DDir"
|
||||
o = append(o, 0xa4, 0x44, 0x44, 0x69, 0x72)
|
||||
@ -1365,6 +1428,14 @@ func (z *xlMetaV2Object) MarshalMsg(b []byte) (o []byte, err error) {
|
||||
o = msgp.AppendInt64(o, z.PartActualSizes[za0007])
|
||||
}
|
||||
}
|
||||
if (zb0001Mask & 0x2000) == 0 { // if not empty
|
||||
// string "PartIdx"
|
||||
o = append(o, 0xa7, 0x50, 0x61, 0x72, 0x74, 0x49, 0x64, 0x78)
|
||||
o = msgp.AppendArrayHeader(o, uint32(len(z.PartIndices)))
|
||||
for za0008 := range z.PartIndices {
|
||||
o = msgp.AppendBytes(o, z.PartIndices[za0008])
|
||||
}
|
||||
}
|
||||
// string "Size"
|
||||
o = append(o, 0xa4, 0x53, 0x69, 0x7a, 0x65)
|
||||
o = msgp.AppendInt64(o, z.Size)
|
||||
@ -1377,9 +1448,9 @@ func (z *xlMetaV2Object) MarshalMsg(b []byte) (o []byte, err error) {
|
||||
o = msgp.AppendNil(o)
|
||||
} else {
|
||||
o = msgp.AppendMapHeader(o, uint32(len(z.MetaSys)))
|
||||
for za0008, za0009 := range z.MetaSys {
|
||||
o = msgp.AppendString(o, za0008)
|
||||
o = msgp.AppendBytes(o, za0009)
|
||||
for za0009, za0010 := range z.MetaSys {
|
||||
o = msgp.AppendString(o, za0009)
|
||||
o = msgp.AppendBytes(o, za0010)
|
||||
}
|
||||
}
|
||||
// string "MetaUsr"
|
||||
@ -1388,9 +1459,9 @@ func (z *xlMetaV2Object) MarshalMsg(b []byte) (o []byte, err error) {
|
||||
o = msgp.AppendNil(o)
|
||||
} else {
|
||||
o = msgp.AppendMapHeader(o, uint32(len(z.MetaUser)))
|
||||
for za0010, za0011 := range z.MetaUser {
|
||||
o = msgp.AppendString(o, za0010)
|
||||
for za0011, za0012 := range z.MetaUser {
|
||||
o = msgp.AppendString(o, za0011)
|
||||
o = msgp.AppendString(o, za0012)
|
||||
}
|
||||
}
|
||||
return
|
||||
@ -1575,6 +1646,25 @@ func (z *xlMetaV2Object) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
||||
}
|
||||
}
|
||||
}
|
||||
case "PartIdx":
|
||||
var zb0009 uint32
|
||||
zb0009, bts, err = msgp.ReadArrayHeaderBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "PartIndices")
|
||||
return
|
||||
}
|
||||
if cap(z.PartIndices) >= int(zb0009) {
|
||||
z.PartIndices = (z.PartIndices)[:zb0009]
|
||||
} else {
|
||||
z.PartIndices = make([][]byte, zb0009)
|
||||
}
|
||||
for za0008 := range z.PartIndices {
|
||||
z.PartIndices[za0008], bts, err = msgp.ReadBytesBytes(bts, z.PartIndices[za0008])
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "PartIndices", za0008)
|
||||
return
|
||||
}
|
||||
}
|
||||
case "Size":
|
||||
z.Size, bts, err = msgp.ReadInt64Bytes(bts)
|
||||
if err != nil {
|
||||
@ -1592,34 +1682,34 @@ func (z *xlMetaV2Object) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
||||
bts = bts[1:]
|
||||
z.MetaSys = nil
|
||||
} else {
|
||||
var zb0009 uint32
|
||||
zb0009, bts, err = msgp.ReadMapHeaderBytes(bts)
|
||||
var zb0010 uint32
|
||||
zb0010, bts, err = msgp.ReadMapHeaderBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "MetaSys")
|
||||
return
|
||||
}
|
||||
if z.MetaSys == nil {
|
||||
z.MetaSys = make(map[string][]byte, zb0009)
|
||||
z.MetaSys = make(map[string][]byte, zb0010)
|
||||
} else if len(z.MetaSys) > 0 {
|
||||
for key := range z.MetaSys {
|
||||
delete(z.MetaSys, key)
|
||||
}
|
||||
}
|
||||
for zb0009 > 0 {
|
||||
var za0008 string
|
||||
var za0009 []byte
|
||||
zb0009--
|
||||
za0008, bts, err = msgp.ReadStringBytes(bts)
|
||||
for zb0010 > 0 {
|
||||
var za0009 string
|
||||
var za0010 []byte
|
||||
zb0010--
|
||||
za0009, bts, err = msgp.ReadStringBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "MetaSys")
|
||||
return
|
||||
}
|
||||
za0009, bts, err = msgp.ReadBytesBytes(bts, za0009)
|
||||
za0010, bts, err = msgp.ReadBytesBytes(bts, za0010)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "MetaSys", za0008)
|
||||
err = msgp.WrapError(err, "MetaSys", za0009)
|
||||
return
|
||||
}
|
||||
z.MetaSys[za0008] = za0009
|
||||
z.MetaSys[za0009] = za0010
|
||||
}
|
||||
}
|
||||
case "MetaUsr":
|
||||
@ -1627,34 +1717,34 @@ func (z *xlMetaV2Object) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
||||
bts = bts[1:]
|
||||
z.MetaUser = nil
|
||||
} else {
|
||||
var zb0010 uint32
|
||||
zb0010, bts, err = msgp.ReadMapHeaderBytes(bts)
|
||||
var zb0011 uint32
|
||||
zb0011, bts, err = msgp.ReadMapHeaderBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "MetaUser")
|
||||
return
|
||||
}
|
||||
if z.MetaUser == nil {
|
||||
z.MetaUser = make(map[string]string, zb0010)
|
||||
z.MetaUser = make(map[string]string, zb0011)
|
||||
} else if len(z.MetaUser) > 0 {
|
||||
for key := range z.MetaUser {
|
||||
delete(z.MetaUser, key)
|
||||
}
|
||||
}
|
||||
for zb0010 > 0 {
|
||||
var za0010 string
|
||||
for zb0011 > 0 {
|
||||
var za0011 string
|
||||
zb0010--
|
||||
za0010, bts, err = msgp.ReadStringBytes(bts)
|
||||
var za0012 string
|
||||
zb0011--
|
||||
za0011, bts, err = msgp.ReadStringBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "MetaUser")
|
||||
return
|
||||
}
|
||||
za0011, bts, err = msgp.ReadStringBytes(bts)
|
||||
za0012, bts, err = msgp.ReadStringBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "MetaUser", za0010)
|
||||
err = msgp.WrapError(err, "MetaUser", za0011)
|
||||
return
|
||||
}
|
||||
z.MetaUser[za0010] = za0011
|
||||
z.MetaUser[za0011] = za0012
|
||||
}
|
||||
}
|
||||
default:
|
||||
@ -1675,18 +1765,22 @@ func (z *xlMetaV2Object) Msgsize() (s int) {
|
||||
for za0005 := range z.PartETags {
|
||||
s += msgp.StringPrefixSize + len(z.PartETags[za0005])
|
||||
}
|
||||
s += 10 + msgp.ArrayHeaderSize + (len(z.PartSizes) * (msgp.Int64Size)) + 11 + msgp.ArrayHeaderSize + (len(z.PartActualSizes) * (msgp.Int64Size)) + 5 + msgp.Int64Size + 6 + msgp.Int64Size + 8 + msgp.MapHeaderSize
|
||||
s += 10 + msgp.ArrayHeaderSize + (len(z.PartSizes) * (msgp.Int64Size)) + 11 + msgp.ArrayHeaderSize + (len(z.PartActualSizes) * (msgp.Int64Size)) + 8 + msgp.ArrayHeaderSize
|
||||
for za0008 := range z.PartIndices {
|
||||
s += msgp.BytesPrefixSize + len(z.PartIndices[za0008])
|
||||
}
|
||||
s += 5 + msgp.Int64Size + 6 + msgp.Int64Size + 8 + msgp.MapHeaderSize
|
||||
if z.MetaSys != nil {
|
||||
for za0008, za0009 := range z.MetaSys {
|
||||
_ = za0009
|
||||
s += msgp.StringPrefixSize + len(za0008) + msgp.BytesPrefixSize + len(za0009)
|
||||
for za0009, za0010 := range z.MetaSys {
|
||||
_ = za0010
|
||||
s += msgp.StringPrefixSize + len(za0009) + msgp.BytesPrefixSize + len(za0010)
|
||||
}
|
||||
}
|
||||
s += 8 + msgp.MapHeaderSize
|
||||
if z.MetaUser != nil {
|
||||
for za0010, za0011 := range z.MetaUser {
|
||||
_ = za0011
|
||||
s += msgp.StringPrefixSize + len(za0010) + msgp.StringPrefixSize + len(za0011)
|
||||
for za0011, za0012 := range z.MetaUser {
|
||||
_ = za0012
|
||||
s += msgp.StringPrefixSize + len(za0011) + msgp.StringPrefixSize + len(za0012)
|
||||
}
|
||||
}
|
||||
return
|
||||
|
Loading…
Reference in New Issue
Block a user