mirror of
https://github.com/minio/minio.git
synced 2024-12-24 06:05:55 -05:00
Add compressed file index (#15247)
This commit is contained in:
parent
3d969bd2b4
commit
911a17b149
@ -501,6 +501,7 @@ func (er erasureObjects) healObject(ctx context.Context, bucket string, object s
|
|||||||
partSize := latestMeta.Parts[partIndex].Size
|
partSize := latestMeta.Parts[partIndex].Size
|
||||||
partActualSize := latestMeta.Parts[partIndex].ActualSize
|
partActualSize := latestMeta.Parts[partIndex].ActualSize
|
||||||
partNumber := latestMeta.Parts[partIndex].Number
|
partNumber := latestMeta.Parts[partIndex].Number
|
||||||
|
partIdx := latestMeta.Parts[partIndex].Index
|
||||||
tillOffset := erasure.ShardFileOffset(0, partSize, partSize)
|
tillOffset := erasure.ShardFileOffset(0, partSize, partSize)
|
||||||
readers := make([]io.ReaderAt, len(latestDisks))
|
readers := make([]io.ReaderAt, len(latestDisks))
|
||||||
checksumAlgo := erasureInfo.GetChecksumInfo(partNumber).Algorithm
|
checksumAlgo := erasureInfo.GetChecksumInfo(partNumber).Algorithm
|
||||||
@ -550,7 +551,7 @@ func (er erasureObjects) healObject(ctx context.Context, bucket string, object s
|
|||||||
}
|
}
|
||||||
|
|
||||||
partsMetadata[i].DataDir = dstDataDir
|
partsMetadata[i].DataDir = dstDataDir
|
||||||
partsMetadata[i].AddObjectPart(partNumber, "", partSize, partActualSize)
|
partsMetadata[i].AddObjectPart(partNumber, "", partSize, partActualSize, partIdx)
|
||||||
partsMetadata[i].Erasure.AddChecksumInfo(ChecksumInfo{
|
partsMetadata[i].Erasure.AddChecksumInfo(ChecksumInfo{
|
||||||
PartNumber: partNumber,
|
PartNumber: partNumber,
|
||||||
Algorithm: checksumAlgo,
|
Algorithm: checksumAlgo,
|
||||||
|
@ -237,12 +237,13 @@ func objectPartIndex(parts []ObjectPartInfo, partNumber int) int {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// AddObjectPart - add a new object part in order.
|
// AddObjectPart - add a new object part in order.
|
||||||
func (fi *FileInfo) AddObjectPart(partNumber int, partETag string, partSize int64, actualSize int64) {
|
func (fi *FileInfo) AddObjectPart(partNumber int, partETag string, partSize, actualSize int64, idx []byte) {
|
||||||
partInfo := ObjectPartInfo{
|
partInfo := ObjectPartInfo{
|
||||||
Number: partNumber,
|
Number: partNumber,
|
||||||
ETag: partETag,
|
ETag: partETag,
|
||||||
Size: partSize,
|
Size: partSize,
|
||||||
ActualSize: actualSize,
|
ActualSize: actualSize,
|
||||||
|
Index: idx,
|
||||||
}
|
}
|
||||||
|
|
||||||
// Update part info if it already exists.
|
// Update part info if it already exists.
|
||||||
|
@ -23,7 +23,7 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
humanize "github.com/dustin/go-humanize"
|
"github.com/dustin/go-humanize"
|
||||||
)
|
)
|
||||||
|
|
||||||
const ActualSize = 1000
|
const ActualSize = 1000
|
||||||
@ -58,7 +58,7 @@ func TestAddObjectPart(t *testing.T) {
|
|||||||
for _, testCase := range testCases {
|
for _, testCase := range testCases {
|
||||||
if testCase.expectedIndex > -1 {
|
if testCase.expectedIndex > -1 {
|
||||||
partNumString := strconv.Itoa(testCase.partNum)
|
partNumString := strconv.Itoa(testCase.partNum)
|
||||||
fi.AddObjectPart(testCase.partNum, "etag."+partNumString, int64(testCase.partNum+humanize.MiByte), ActualSize)
|
fi.AddObjectPart(testCase.partNum, "etag."+partNumString, int64(testCase.partNum+humanize.MiByte), ActualSize, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
if index := objectPartIndex(fi.Parts, testCase.partNum); index != testCase.expectedIndex {
|
if index := objectPartIndex(fi.Parts, testCase.partNum); index != testCase.expectedIndex {
|
||||||
@ -91,7 +91,7 @@ func TestObjectPartIndex(t *testing.T) {
|
|||||||
// Add some parts for testing.
|
// Add some parts for testing.
|
||||||
for _, testCase := range testCases {
|
for _, testCase := range testCases {
|
||||||
partNumString := strconv.Itoa(testCase.partNum)
|
partNumString := strconv.Itoa(testCase.partNum)
|
||||||
fi.AddObjectPart(testCase.partNum, "etag."+partNumString, int64(testCase.partNum+humanize.MiByte), ActualSize)
|
fi.AddObjectPart(testCase.partNum, "etag."+partNumString, int64(testCase.partNum+humanize.MiByte), ActualSize, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Add failure test case.
|
// Add failure test case.
|
||||||
@ -121,7 +121,7 @@ func TestObjectToPartOffset(t *testing.T) {
|
|||||||
// Total size of all parts is 5,242,899 bytes.
|
// Total size of all parts is 5,242,899 bytes.
|
||||||
for _, partNum := range []int{1, 2, 4, 5, 7} {
|
for _, partNum := range []int{1, 2, 4, 5, 7} {
|
||||||
partNumString := strconv.Itoa(partNum)
|
partNumString := strconv.Itoa(partNum)
|
||||||
fi.AddObjectPart(partNum, "etag."+partNumString, int64(partNum+humanize.MiByte), ActualSize)
|
fi.AddObjectPart(partNum, "etag."+partNumString, int64(partNum+humanize.MiByte), ActualSize, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
testCases := []struct {
|
testCases := []struct {
|
||||||
@ -160,7 +160,7 @@ func TestObjectToPartOffset(t *testing.T) {
|
|||||||
func TestFindFileInfoInQuorum(t *testing.T) {
|
func TestFindFileInfoInQuorum(t *testing.T) {
|
||||||
getNFInfo := func(n int, quorum int, t int64, dataDir string) []FileInfo {
|
getNFInfo := func(n int, quorum int, t int64, dataDir string) []FileInfo {
|
||||||
fi := newFileInfo("test", 8, 8)
|
fi := newFileInfo("test", 8, 8)
|
||||||
fi.AddObjectPart(1, "etag", 100, 100)
|
fi.AddObjectPart(1, "etag", 100, 100, nil)
|
||||||
fi.ModTime = time.Unix(t, 0)
|
fi.ModTime = time.Unix(t, 0)
|
||||||
fi.DataDir = dataDir
|
fi.DataDir = dataDir
|
||||||
fis := make([]FileInfo, n)
|
fis := make([]FileInfo, n)
|
||||||
|
@ -653,9 +653,13 @@ func (er erasureObjects) PutObjectPart(ctx context.Context, bucket, object, uplo
|
|||||||
fi.ModTime = UTCNow()
|
fi.ModTime = UTCNow()
|
||||||
|
|
||||||
md5hex := r.MD5CurrentHexString()
|
md5hex := r.MD5CurrentHexString()
|
||||||
|
var index []byte
|
||||||
|
if opts.IndexCB != nil {
|
||||||
|
index = opts.IndexCB()
|
||||||
|
}
|
||||||
|
|
||||||
// Add the current part.
|
// Add the current part.
|
||||||
fi.AddObjectPart(partID, md5hex, n, data.ActualSize())
|
fi.AddObjectPart(partID, md5hex, n, data.ActualSize(), index)
|
||||||
|
|
||||||
for i, disk := range onlineDisks {
|
for i, disk := range onlineDisks {
|
||||||
if disk == OfflineDisk {
|
if disk == OfflineDisk {
|
||||||
@ -947,6 +951,7 @@ func (er erasureObjects) CompleteMultipartUpload(ctx context.Context, bucket str
|
|||||||
Number: part.PartNumber,
|
Number: part.PartNumber,
|
||||||
Size: currentFI.Parts[partIdx].Size,
|
Size: currentFI.Parts[partIdx].Size,
|
||||||
ActualSize: currentFI.Parts[partIdx].ActualSize,
|
ActualSize: currentFI.Parts[partIdx].ActualSize,
|
||||||
|
Index: currentFI.Parts[partIdx].Index,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -815,6 +815,10 @@ func (er erasureObjects) putMetacacheObject(ctx context.Context, key string, r *
|
|||||||
if n < data.Size() {
|
if n < data.Size() {
|
||||||
return ObjectInfo{}, IncompleteBody{Bucket: minioMetaBucket, Object: key}
|
return ObjectInfo{}, IncompleteBody{Bucket: minioMetaBucket, Object: key}
|
||||||
}
|
}
|
||||||
|
var index []byte
|
||||||
|
if opts.IndexCB != nil {
|
||||||
|
index = opts.IndexCB()
|
||||||
|
}
|
||||||
|
|
||||||
for i, w := range writers {
|
for i, w := range writers {
|
||||||
if w == nil {
|
if w == nil {
|
||||||
@ -823,7 +827,7 @@ func (er erasureObjects) putMetacacheObject(ctx context.Context, key string, r *
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
partsMetadata[i].Data = inlineBuffers[i].Bytes()
|
partsMetadata[i].Data = inlineBuffers[i].Bytes()
|
||||||
partsMetadata[i].AddObjectPart(1, "", n, data.ActualSize())
|
partsMetadata[i].AddObjectPart(1, "", n, data.ActualSize(), index)
|
||||||
partsMetadata[i].Erasure.AddChecksumInfo(ChecksumInfo{
|
partsMetadata[i].Erasure.AddChecksumInfo(ChecksumInfo{
|
||||||
PartNumber: 1,
|
PartNumber: 1,
|
||||||
Algorithm: DefaultBitrotAlgorithm,
|
Algorithm: DefaultBitrotAlgorithm,
|
||||||
@ -1071,6 +1075,10 @@ func (er erasureObjects) putObject(ctx context.Context, bucket string, object st
|
|||||||
return ObjectInfo{}, IncompleteBody{Bucket: bucket, Object: object}
|
return ObjectInfo{}, IncompleteBody{Bucket: bucket, Object: object}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var compIndex []byte
|
||||||
|
if opts.IndexCB != nil {
|
||||||
|
compIndex = opts.IndexCB()
|
||||||
|
}
|
||||||
if !opts.NoLock {
|
if !opts.NoLock {
|
||||||
lk := er.NewNSLock(bucket, object)
|
lk := er.NewNSLock(bucket, object)
|
||||||
lkctx, err := lk.GetLock(ctx, globalOperationTimeout)
|
lkctx, err := lk.GetLock(ctx, globalOperationTimeout)
|
||||||
@ -1091,7 +1099,7 @@ func (er erasureObjects) putObject(ctx context.Context, bucket string, object st
|
|||||||
} else {
|
} else {
|
||||||
partsMetadata[i].Data = nil
|
partsMetadata[i].Data = nil
|
||||||
}
|
}
|
||||||
partsMetadata[i].AddObjectPart(1, "", n, data.ActualSize())
|
partsMetadata[i].AddObjectPart(1, "", n, data.ActualSize(), compIndex)
|
||||||
partsMetadata[i].Erasure.AddChecksumInfo(ChecksumInfo{
|
partsMetadata[i].Erasure.AddChecksumInfo(ChecksumInfo{
|
||||||
PartNumber: 1,
|
PartNumber: 1,
|
||||||
Algorithm: DefaultBitrotAlgorithm,
|
Algorithm: DefaultBitrotAlgorithm,
|
||||||
|
@ -849,6 +849,11 @@ func (es *erasureSingle) putMetacacheObject(ctx context.Context, key string, r *
|
|||||||
return ObjectInfo{}, IncompleteBody{Bucket: minioMetaBucket, Object: key}
|
return ObjectInfo{}, IncompleteBody{Bucket: minioMetaBucket, Object: key}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var index []byte
|
||||||
|
if opts.IndexCB != nil {
|
||||||
|
index = opts.IndexCB()
|
||||||
|
}
|
||||||
|
|
||||||
for i, w := range writers {
|
for i, w := range writers {
|
||||||
if w == nil {
|
if w == nil {
|
||||||
// Make sure to avoid writing to disks which we couldn't complete in erasure.Encode()
|
// Make sure to avoid writing to disks which we couldn't complete in erasure.Encode()
|
||||||
@ -856,7 +861,7 @@ func (es *erasureSingle) putMetacacheObject(ctx context.Context, key string, r *
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
partsMetadata[i].Data = inlineBuffers[i].Bytes()
|
partsMetadata[i].Data = inlineBuffers[i].Bytes()
|
||||||
partsMetadata[i].AddObjectPart(1, "", n, data.ActualSize())
|
partsMetadata[i].AddObjectPart(1, "", n, data.ActualSize(), index)
|
||||||
partsMetadata[i].Erasure.AddChecksumInfo(ChecksumInfo{
|
partsMetadata[i].Erasure.AddChecksumInfo(ChecksumInfo{
|
||||||
PartNumber: 1,
|
PartNumber: 1,
|
||||||
Algorithm: DefaultBitrotAlgorithm,
|
Algorithm: DefaultBitrotAlgorithm,
|
||||||
@ -1082,6 +1087,11 @@ func (es *erasureSingle) putObject(ctx context.Context, bucket string, object st
|
|||||||
defer lk.Unlock(lkctx.Cancel)
|
defer lk.Unlock(lkctx.Cancel)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var index []byte
|
||||||
|
if opts.IndexCB != nil {
|
||||||
|
index = opts.IndexCB()
|
||||||
|
}
|
||||||
|
|
||||||
for i, w := range writers {
|
for i, w := range writers {
|
||||||
if w == nil {
|
if w == nil {
|
||||||
onlineDisks[i] = nil
|
onlineDisks[i] = nil
|
||||||
@ -1092,7 +1102,7 @@ func (es *erasureSingle) putObject(ctx context.Context, bucket string, object st
|
|||||||
} else {
|
} else {
|
||||||
partsMetadata[i].Data = nil
|
partsMetadata[i].Data = nil
|
||||||
}
|
}
|
||||||
partsMetadata[i].AddObjectPart(1, "", n, data.ActualSize())
|
partsMetadata[i].AddObjectPart(1, "", n, data.ActualSize(), index)
|
||||||
partsMetadata[i].Erasure.AddChecksumInfo(ChecksumInfo{
|
partsMetadata[i].Erasure.AddChecksumInfo(ChecksumInfo{
|
||||||
PartNumber: 1,
|
PartNumber: 1,
|
||||||
Algorithm: DefaultBitrotAlgorithm,
|
Algorithm: DefaultBitrotAlgorithm,
|
||||||
@ -2369,8 +2379,13 @@ func (es *erasureSingle) PutObjectPart(ctx context.Context, bucket, object, uplo
|
|||||||
|
|
||||||
md5hex := r.MD5CurrentHexString()
|
md5hex := r.MD5CurrentHexString()
|
||||||
|
|
||||||
|
var index []byte
|
||||||
|
if opts.IndexCB != nil {
|
||||||
|
index = opts.IndexCB()
|
||||||
|
}
|
||||||
|
|
||||||
// Add the current part.
|
// Add the current part.
|
||||||
fi.AddObjectPart(partID, md5hex, n, data.ActualSize())
|
fi.AddObjectPart(partID, md5hex, n, data.ActualSize(), index)
|
||||||
|
|
||||||
for i, disk := range onlineDisks {
|
for i, disk := range onlineDisks {
|
||||||
if disk == OfflineDisk {
|
if disk == OfflineDisk {
|
||||||
@ -2668,6 +2683,7 @@ func (es *erasureSingle) CompleteMultipartUpload(ctx context.Context, bucket str
|
|||||||
Number: part.PartNumber,
|
Number: part.PartNumber,
|
||||||
Size: currentFI.Parts[partIdx].Size,
|
Size: currentFI.Parts[partIdx].Size,
|
||||||
ActualSize: currentFI.Parts[partIdx].ActualSize,
|
ActualSize: currentFI.Parts[partIdx].ActualSize,
|
||||||
|
Index: currentFI.Parts[partIdx].Index,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -78,6 +78,10 @@ type ObjectOptions struct {
|
|||||||
WalkAscending bool // return Walk results in ascending order of versions
|
WalkAscending bool // return Walk results in ascending order of versions
|
||||||
|
|
||||||
PrefixEnabledFn func(prefix string) bool // function which returns true if versioning is enabled on prefix
|
PrefixEnabledFn func(prefix string) bool // function which returns true if versioning is enabled on prefix
|
||||||
|
|
||||||
|
// IndexCB will return any index created but the compression.
|
||||||
|
// Object must have been read at this point.
|
||||||
|
IndexCB func() []byte
|
||||||
}
|
}
|
||||||
|
|
||||||
// ExpirationOptions represents object options for object expiration at objectLayer.
|
// ExpirationOptions represents object options for object expiration at objectLayer.
|
||||||
|
@ -20,6 +20,8 @@ package cmd
|
|||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
|
"crypto/hmac"
|
||||||
|
"encoding/binary"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
@ -43,12 +45,15 @@ import (
|
|||||||
"github.com/minio/minio/internal/config/dns"
|
"github.com/minio/minio/internal/config/dns"
|
||||||
"github.com/minio/minio/internal/config/storageclass"
|
"github.com/minio/minio/internal/config/storageclass"
|
||||||
"github.com/minio/minio/internal/crypto"
|
"github.com/minio/minio/internal/crypto"
|
||||||
|
"github.com/minio/minio/internal/fips"
|
||||||
"github.com/minio/minio/internal/hash"
|
"github.com/minio/minio/internal/hash"
|
||||||
|
"github.com/minio/minio/internal/hash/sha256"
|
||||||
xhttp "github.com/minio/minio/internal/http"
|
xhttp "github.com/minio/minio/internal/http"
|
||||||
"github.com/minio/minio/internal/ioutil"
|
"github.com/minio/minio/internal/ioutil"
|
||||||
"github.com/minio/minio/internal/logger"
|
"github.com/minio/minio/internal/logger"
|
||||||
"github.com/minio/pkg/trie"
|
"github.com/minio/pkg/trie"
|
||||||
"github.com/minio/pkg/wildcard"
|
"github.com/minio/pkg/wildcard"
|
||||||
|
"github.com/minio/sio"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@ -513,12 +518,12 @@ func partNumberToRangeSpec(oi ObjectInfo, partNumber int) *HTTPRangeSpec {
|
|||||||
// Returns the compressed offset which should be skipped.
|
// Returns the compressed offset which should be skipped.
|
||||||
// If encrypted offsets are adjusted for encrypted block headers/trailers.
|
// If encrypted offsets are adjusted for encrypted block headers/trailers.
|
||||||
// Since de-compression is after decryption encryption overhead is only added to compressedOffset.
|
// Since de-compression is after decryption encryption overhead is only added to compressedOffset.
|
||||||
func getCompressedOffsets(objectInfo ObjectInfo, offset int64) (compressedOffset int64, partSkip int64, firstPart int) {
|
func getCompressedOffsets(oi ObjectInfo, offset int64, decrypt func([]byte) ([]byte, error)) (compressedOffset int64, partSkip int64, firstPart int, decryptSkip int64, seqNum uint32) {
|
||||||
var skipLength int64
|
var skipLength int64
|
||||||
var cumulativeActualSize int64
|
var cumulativeActualSize int64
|
||||||
var firstPartIdx int
|
var firstPartIdx int
|
||||||
if len(objectInfo.Parts) > 0 {
|
if len(oi.Parts) > 0 {
|
||||||
for i, part := range objectInfo.Parts {
|
for i, part := range oi.Parts {
|
||||||
cumulativeActualSize += part.ActualSize
|
cumulativeActualSize += part.ActualSize
|
||||||
if cumulativeActualSize <= offset {
|
if cumulativeActualSize <= offset {
|
||||||
compressedOffset += part.Size
|
compressedOffset += part.Size
|
||||||
@ -529,8 +534,52 @@ func getCompressedOffsets(objectInfo ObjectInfo, offset int64) (compressedOffset
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
partSkip = offset - skipLength
|
||||||
|
|
||||||
return compressedOffset, offset - skipLength, firstPartIdx
|
// Load index and skip more if feasible.
|
||||||
|
if partSkip > 0 && len(oi.Parts) > firstPartIdx && len(oi.Parts[firstPartIdx].Index) > 0 {
|
||||||
|
_, isEncrypted := crypto.IsEncrypted(oi.UserDefined)
|
||||||
|
if isEncrypted {
|
||||||
|
dec, err := decrypt(oi.Parts[firstPartIdx].Index)
|
||||||
|
if err == nil {
|
||||||
|
// Load Index
|
||||||
|
var idx s2.Index
|
||||||
|
_, err := idx.Load(restoreIndexHeaders(dec))
|
||||||
|
|
||||||
|
// Find compressed/uncompressed offsets of our partskip
|
||||||
|
compOff, uCompOff, err2 := idx.Find(partSkip)
|
||||||
|
|
||||||
|
if err == nil && err2 == nil && compOff > 0 {
|
||||||
|
// Encrypted.
|
||||||
|
const sseDAREEncPackageBlockSize = SSEDAREPackageBlockSize + SSEDAREPackageMetaSize
|
||||||
|
// Number of full blocks in skipped area
|
||||||
|
seqNum = uint32(compOff / SSEDAREPackageBlockSize)
|
||||||
|
// Skip this many inside a decrypted block to get to compression block start
|
||||||
|
decryptSkip = compOff % SSEDAREPackageBlockSize
|
||||||
|
// Skip this number of full blocks.
|
||||||
|
skipEnc := compOff / SSEDAREPackageBlockSize
|
||||||
|
skipEnc *= sseDAREEncPackageBlockSize
|
||||||
|
compressedOffset += skipEnc
|
||||||
|
// Skip this number of uncompressed bytes.
|
||||||
|
partSkip -= uCompOff
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// Not encrypted
|
||||||
|
var idx s2.Index
|
||||||
|
_, err := idx.Load(restoreIndexHeaders(oi.Parts[firstPartIdx].Index))
|
||||||
|
|
||||||
|
// Find compressed/uncompressed offsets of our partskip
|
||||||
|
compOff, uCompOff, err2 := idx.Find(partSkip)
|
||||||
|
|
||||||
|
if err == nil && err2 == nil && compOff > 0 {
|
||||||
|
compressedOffset += compOff
|
||||||
|
partSkip -= uCompOff
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return compressedOffset, partSkip, firstPartIdx, decryptSkip, seqNum
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetObjectReader is a type that wraps a reader with a lock to
|
// GetObjectReader is a type that wraps a reader with a lock to
|
||||||
@ -618,6 +667,8 @@ func NewGetObjectReader(rs *HTTPRangeSpec, oi ObjectInfo, opts ObjectOptions) (
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, 0, 0, err
|
return nil, 0, 0, err
|
||||||
}
|
}
|
||||||
|
var decryptSkip int64
|
||||||
|
var seqNum uint32
|
||||||
|
|
||||||
off, length = int64(0), oi.Size
|
off, length = int64(0), oi.Size
|
||||||
decOff, decLength := int64(0), actualSize
|
decOff, decLength := int64(0), actualSize
|
||||||
@ -626,10 +677,14 @@ func NewGetObjectReader(rs *HTTPRangeSpec, oi ObjectInfo, opts ObjectOptions) (
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, 0, 0, err
|
return nil, 0, 0, err
|
||||||
}
|
}
|
||||||
|
decrypt := func(b []byte) ([]byte, error) {
|
||||||
|
return b, nil
|
||||||
|
}
|
||||||
|
if isEncrypted {
|
||||||
|
decrypt = oi.compressionIndexDecrypt
|
||||||
|
}
|
||||||
// In case of range based queries on multiparts, the offset and length are reduced.
|
// In case of range based queries on multiparts, the offset and length are reduced.
|
||||||
off, decOff, firstPart = getCompressedOffsets(oi, off)
|
off, decOff, firstPart, decryptSkip, seqNum = getCompressedOffsets(oi, off, decrypt)
|
||||||
|
|
||||||
decLength = length
|
decLength = length
|
||||||
length = oi.Size - off
|
length = oi.Size - off
|
||||||
// For negative length we read everything.
|
// For negative length we read everything.
|
||||||
@ -646,7 +701,7 @@ func NewGetObjectReader(rs *HTTPRangeSpec, oi ObjectInfo, opts ObjectOptions) (
|
|||||||
if isEncrypted {
|
if isEncrypted {
|
||||||
copySource := h.Get(xhttp.AmzServerSideEncryptionCopyCustomerAlgorithm) != ""
|
copySource := h.Get(xhttp.AmzServerSideEncryptionCopyCustomerAlgorithm) != ""
|
||||||
// Attach decrypter on inputReader
|
// Attach decrypter on inputReader
|
||||||
inputReader, err = DecryptBlocksRequestR(inputReader, h, 0, firstPart, oi, copySource)
|
inputReader, err = DecryptBlocksRequestR(inputReader, h, seqNum, firstPart, oi, copySource)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// Call the cleanup funcs
|
// Call the cleanup funcs
|
||||||
for i := len(cFns) - 1; i >= 0; i-- {
|
for i := len(cFns) - 1; i >= 0; i-- {
|
||||||
@ -654,10 +709,18 @@ func NewGetObjectReader(rs *HTTPRangeSpec, oi ObjectInfo, opts ObjectOptions) (
|
|||||||
}
|
}
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
if decryptSkip > 0 {
|
||||||
|
inputReader = ioutil.NewSkipReader(inputReader, decryptSkip)
|
||||||
|
}
|
||||||
oi.Size = decLength
|
oi.Size = decLength
|
||||||
}
|
}
|
||||||
// Decompression reader.
|
// Decompression reader.
|
||||||
s2Reader := s2.NewReader(inputReader)
|
var dopts []s2.ReaderOption
|
||||||
|
if off > 0 {
|
||||||
|
// We are not starting at the beginning, so ignore stream identifiers.
|
||||||
|
dopts = append(dopts, s2.ReaderIgnoreStreamIdentifier())
|
||||||
|
}
|
||||||
|
s2Reader := s2.NewReader(inputReader, dopts...)
|
||||||
// Apply the skipLen and limit on the decompressed stream.
|
// Apply the skipLen and limit on the decompressed stream.
|
||||||
if decOff > 0 {
|
if decOff > 0 {
|
||||||
if err = s2Reader.Skip(decOff); err != nil {
|
if err = s2Reader.Skip(decOff); err != nil {
|
||||||
@ -778,6 +841,41 @@ func (g *GetObjectReader) Close() error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func compressionIndexEncrypter(key crypto.ObjectKey, input func() []byte) func() []byte {
|
||||||
|
var data []byte
|
||||||
|
var fetched bool
|
||||||
|
return func() []byte {
|
||||||
|
if !fetched {
|
||||||
|
data = input()
|
||||||
|
fetched = true
|
||||||
|
}
|
||||||
|
if len(data) == 0 {
|
||||||
|
return data
|
||||||
|
}
|
||||||
|
var buffer bytes.Buffer
|
||||||
|
mac := hmac.New(sha256.New, key[:])
|
||||||
|
mac.Write([]byte("compression-index"))
|
||||||
|
if _, err := sio.Encrypt(&buffer, bytes.NewReader(data), sio.Config{Key: mac.Sum(nil), CipherSuites: fips.DARECiphers()}); err != nil {
|
||||||
|
logger.CriticalIf(context.Background(), errors.New("unable to encrypt compression index using object key"))
|
||||||
|
}
|
||||||
|
return buffer.Bytes()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (o *ObjectInfo) compressionIndexDecrypt(input []byte) ([]byte, error) {
|
||||||
|
if len(input) == 0 {
|
||||||
|
return input, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
key, err := decryptObjectInfo(nil, o.Bucket, o.Name, o.UserDefined)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
mac := hmac.New(sha256.New, key)
|
||||||
|
mac.Write([]byte("compression-index"))
|
||||||
|
return sio.DecryptBuffer(nil, input, sio.Config{Key: mac.Sum(nil), CipherSuites: fips.DARECiphers()})
|
||||||
|
}
|
||||||
|
|
||||||
// SealMD5CurrFn seals md5sum with object encryption key and returns sealed
|
// SealMD5CurrFn seals md5sum with object encryption key and returns sealed
|
||||||
// md5sum
|
// md5sum
|
||||||
type SealMD5CurrFn func([]byte) []byte
|
type SealMD5CurrFn func([]byte) []byte
|
||||||
@ -888,11 +986,13 @@ func init() {
|
|||||||
// input 'on' is always recommended such that this function works
|
// input 'on' is always recommended such that this function works
|
||||||
// properly, because we do not wish to create an object even if
|
// properly, because we do not wish to create an object even if
|
||||||
// client closed the stream prematurely.
|
// client closed the stream prematurely.
|
||||||
func newS2CompressReader(r io.Reader, on int64) io.ReadCloser {
|
func newS2CompressReader(r io.Reader, on int64) (rc io.ReadCloser, idx func() []byte) {
|
||||||
pr, pw := io.Pipe()
|
pr, pw := io.Pipe()
|
||||||
// Copy input to compressor
|
// Copy input to compressor
|
||||||
|
comp := s2.NewWriter(pw, compressOpts...)
|
||||||
|
indexCh := make(chan []byte, 1)
|
||||||
go func() {
|
go func() {
|
||||||
comp := s2.NewWriter(pw, compressOpts...)
|
defer close(indexCh)
|
||||||
cn, err := io.Copy(comp, r)
|
cn, err := io.Copy(comp, r)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
comp.Close()
|
comp.Close()
|
||||||
@ -907,9 +1007,25 @@ func newS2CompressReader(r io.Reader, on int64) io.ReadCloser {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
// Close the stream.
|
// Close the stream.
|
||||||
|
// If more than 8MB was written, generate index.
|
||||||
|
if cn > 8<<20 {
|
||||||
|
idx, err := comp.CloseIndex()
|
||||||
|
idx = removeIndexHeaders(idx)
|
||||||
|
indexCh <- idx
|
||||||
|
pw.CloseWithError(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
pw.CloseWithError(comp.Close())
|
pw.CloseWithError(comp.Close())
|
||||||
}()
|
}()
|
||||||
return pr
|
var gotIdx []byte
|
||||||
|
return pr, func() []byte {
|
||||||
|
if gotIdx != nil {
|
||||||
|
return gotIdx
|
||||||
|
}
|
||||||
|
// Will get index or nil if closed.
|
||||||
|
gotIdx = <-indexCh
|
||||||
|
return gotIdx
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// compressSelfTest performs a self-test to ensure that compression
|
// compressSelfTest performs a self-test to ensure that compression
|
||||||
@ -933,7 +1049,7 @@ func compressSelfTest() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
const skip = 2<<20 + 511
|
const skip = 2<<20 + 511
|
||||||
r := newS2CompressReader(bytes.NewBuffer(data), int64(len(data)))
|
r, _ := newS2CompressReader(bytes.NewBuffer(data), int64(len(data)))
|
||||||
b, err := io.ReadAll(r)
|
b, err := io.ReadAll(r)
|
||||||
failOnErr(err)
|
failOnErr(err)
|
||||||
failOnErr(r.Close())
|
failOnErr(r.Close())
|
||||||
@ -1012,3 +1128,65 @@ func hasSpaceFor(di []*DiskInfo, size int64) bool {
|
|||||||
wantLeft := uint64(float64(total) * (1.0 - diskFillFraction))
|
wantLeft := uint64(float64(total) * (1.0 - diskFillFraction))
|
||||||
return available > wantLeft
|
return available > wantLeft
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// removeIndexHeaders will trim all headers and trailers from a given index.
|
||||||
|
// This is expected to save 20 bytes.
|
||||||
|
// These can be restored using RestoreIndexHeaders.
|
||||||
|
// This removes a layer of security, but is the most compact representation.
|
||||||
|
// Returns nil if headers contains errors.
|
||||||
|
// The returned slice references the provided slice.
|
||||||
|
func removeIndexHeaders(b []byte) []byte {
|
||||||
|
const save = 4 + len(s2.S2IndexHeader) + len(s2.S2IndexTrailer) + 4
|
||||||
|
if len(b) <= save {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if b[0] != s2.ChunkTypeIndex {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
chunkLen := int(b[1]) | int(b[2])<<8 | int(b[3])<<16
|
||||||
|
b = b[4:]
|
||||||
|
|
||||||
|
// Validate we have enough...
|
||||||
|
if len(b) < chunkLen {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
b = b[:chunkLen]
|
||||||
|
|
||||||
|
if !bytes.Equal(b[:len(s2.S2IndexHeader)], []byte(s2.S2IndexHeader)) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
b = b[len(s2.S2IndexHeader):]
|
||||||
|
if !bytes.HasSuffix(b, []byte(s2.S2IndexTrailer)) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
b = bytes.TrimSuffix(b, []byte(s2.S2IndexTrailer))
|
||||||
|
|
||||||
|
if len(b) < 4 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return b[:len(b)-4]
|
||||||
|
}
|
||||||
|
|
||||||
|
// restoreIndexHeaders will index restore headers removed by RemoveIndexHeaders.
|
||||||
|
// No error checking is performed on the input.
|
||||||
|
func restoreIndexHeaders(in []byte) []byte {
|
||||||
|
if len(in) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
b := make([]byte, 0, 4+len(s2.S2IndexHeader)+len(in)+len(s2.S2IndexTrailer)+4)
|
||||||
|
b = append(b, s2.ChunkTypeIndex, 0, 0, 0)
|
||||||
|
b = append(b, []byte(s2.S2IndexHeader)...)
|
||||||
|
b = append(b, in...)
|
||||||
|
|
||||||
|
var tmp [4]byte
|
||||||
|
binary.LittleEndian.PutUint32(tmp[:], uint32(len(b)+4+len(s2.S2IndexTrailer)))
|
||||||
|
b = append(b, tmp[:4]...)
|
||||||
|
// Trailer
|
||||||
|
b = append(b, []byte(s2.S2IndexTrailer)...)
|
||||||
|
|
||||||
|
chunkLen := len(b) - 4 /*skippableFrameHeader*/
|
||||||
|
b[1] = uint8(chunkLen >> 0)
|
||||||
|
b[2] = uint8(chunkLen >> 8)
|
||||||
|
b[3] = uint8(chunkLen >> 16)
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
@ -593,7 +593,7 @@ func TestGetCompressedOffsets(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
for i, test := range testCases {
|
for i, test := range testCases {
|
||||||
startOffset, snappyStartOffset, firstPart := getCompressedOffsets(test.objInfo, test.offset)
|
startOffset, snappyStartOffset, firstPart, _, _ := getCompressedOffsets(test.objInfo, test.offset, nil)
|
||||||
if startOffset != test.startOffset {
|
if startOffset != test.startOffset {
|
||||||
t.Errorf("Test %d - expected startOffset %d but received %d",
|
t.Errorf("Test %d - expected startOffset %d but received %d",
|
||||||
i, test.startOffset, startOffset)
|
i, test.startOffset, startOffset)
|
||||||
@ -611,19 +611,20 @@ func TestGetCompressedOffsets(t *testing.T) {
|
|||||||
|
|
||||||
func TestS2CompressReader(t *testing.T) {
|
func TestS2CompressReader(t *testing.T) {
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
name string
|
name string
|
||||||
data []byte
|
data []byte
|
||||||
|
wantIdx bool
|
||||||
}{
|
}{
|
||||||
{name: "empty", data: nil},
|
{name: "empty", data: nil},
|
||||||
{name: "small", data: []byte("hello, world")},
|
{name: "small", data: []byte("hello, world!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")},
|
||||||
{name: "large", data: bytes.Repeat([]byte("hello, world"), 1000)},
|
{name: "large", data: bytes.Repeat([]byte("hello, world"), 1000000), wantIdx: true},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
buf := make([]byte, 100) // make small buffer to ensure multiple reads are required for large case
|
buf := make([]byte, 100) // make small buffer to ensure multiple reads are required for large case
|
||||||
|
|
||||||
r := newS2CompressReader(bytes.NewReader(tt.data), int64(len(tt.data)))
|
r, idxCB := newS2CompressReader(bytes.NewReader(tt.data), int64(len(tt.data)))
|
||||||
defer r.Close()
|
defer r.Close()
|
||||||
|
|
||||||
var rdrBuf bytes.Buffer
|
var rdrBuf bytes.Buffer
|
||||||
@ -631,7 +632,26 @@ func TestS2CompressReader(t *testing.T) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
r.Close()
|
||||||
|
idx := idxCB()
|
||||||
|
if !tt.wantIdx && len(idx) > 0 {
|
||||||
|
t.Errorf("index returned above threshold")
|
||||||
|
}
|
||||||
|
if tt.wantIdx {
|
||||||
|
if idx == nil {
|
||||||
|
t.Errorf("no index returned")
|
||||||
|
}
|
||||||
|
var index s2.Index
|
||||||
|
_, err = index.Load(restoreIndexHeaders(idx))
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("error loading index: %v", err)
|
||||||
|
}
|
||||||
|
t.Log("size:", len(idx))
|
||||||
|
t.Log(string(index.JSON()))
|
||||||
|
if index.TotalUncompressed != int64(len(tt.data)) {
|
||||||
|
t.Errorf("Expected size %d, got %d", len(tt.data), index.TotalUncompressed)
|
||||||
|
}
|
||||||
|
}
|
||||||
var stdBuf bytes.Buffer
|
var stdBuf bytes.Buffer
|
||||||
w := s2.NewWriter(&stdBuf)
|
w := s2.NewWriter(&stdBuf)
|
||||||
_, err = io.CopyBuffer(w, bytes.NewReader(tt.data), buf)
|
_, err = io.CopyBuffer(w, bytes.NewReader(tt.data), buf)
|
||||||
|
@ -1164,7 +1164,8 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re
|
|||||||
compressMetadata[ReservedMetadataPrefix+"actual-size"] = strconv.FormatInt(actualSize, 10)
|
compressMetadata[ReservedMetadataPrefix+"actual-size"] = strconv.FormatInt(actualSize, 10)
|
||||||
|
|
||||||
reader = etag.NewReader(reader, nil)
|
reader = etag.NewReader(reader, nil)
|
||||||
s2c := newS2CompressReader(reader, actualSize)
|
s2c, cb := newS2CompressReader(reader, actualSize)
|
||||||
|
dstOpts.IndexCB = cb
|
||||||
defer s2c.Close()
|
defer s2c.Close()
|
||||||
reader = etag.Wrap(s2c, reader)
|
reader = etag.Wrap(s2c, reader)
|
||||||
length = -1
|
length = -1
|
||||||
@ -1308,6 +1309,9 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re
|
|||||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
if dstOpts.IndexCB != nil {
|
||||||
|
dstOpts.IndexCB = compressionIndexEncrypter(objEncKey, dstOpts.IndexCB)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1715,6 +1719,7 @@ func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Req
|
|||||||
})
|
})
|
||||||
|
|
||||||
actualSize := size
|
actualSize := size
|
||||||
|
var idxCb func() []byte
|
||||||
if objectAPI.IsCompressionSupported() && isCompressible(r.Header, object) && size > 0 {
|
if objectAPI.IsCompressionSupported() && isCompressible(r.Header, object) && size > 0 {
|
||||||
// Storing the compression metadata.
|
// Storing the compression metadata.
|
||||||
metadata[ReservedMetadataPrefix+"compression"] = compressionAlgorithmV2
|
metadata[ReservedMetadataPrefix+"compression"] = compressionAlgorithmV2
|
||||||
@ -1727,8 +1732,10 @@ func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Req
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Set compression metrics.
|
// Set compression metrics.
|
||||||
s2c := newS2CompressReader(actualReader, actualSize)
|
var s2c io.ReadCloser
|
||||||
|
s2c, idxCb = newS2CompressReader(actualReader, actualSize)
|
||||||
defer s2c.Close()
|
defer s2c.Close()
|
||||||
|
|
||||||
reader = etag.Wrap(s2c, actualReader)
|
reader = etag.Wrap(s2c, actualReader)
|
||||||
size = -1 // Since compressed size is un-predictable.
|
size = -1 // Since compressed size is un-predictable.
|
||||||
md5hex = "" // Do not try to verify the content.
|
md5hex = "" // Do not try to verify the content.
|
||||||
@ -1751,6 +1758,7 @@ func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Req
|
|||||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
opts.IndexCB = idxCb
|
||||||
|
|
||||||
if api.CacheAPI() != nil {
|
if api.CacheAPI() != nil {
|
||||||
putObject = api.CacheAPI().PutObject
|
putObject = api.CacheAPI().PutObject
|
||||||
@ -1813,6 +1821,9 @@ func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Req
|
|||||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
if opts.IndexCB != nil {
|
||||||
|
opts.IndexCB = compressionIndexEncrypter(objectEncryptionKey, opts.IndexCB)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2061,6 +2072,7 @@ func (api objectAPIHandlers) PutObjectExtractHandler(w http.ResponseWriter, r *h
|
|||||||
}
|
}
|
||||||
|
|
||||||
actualSize := size
|
actualSize := size
|
||||||
|
var idxCb func() []byte
|
||||||
if objectAPI.IsCompressionSupported() && isCompressible(r.Header, object) && size > 0 {
|
if objectAPI.IsCompressionSupported() && isCompressible(r.Header, object) && size > 0 {
|
||||||
// Storing the compression metadata.
|
// Storing the compression metadata.
|
||||||
metadata[ReservedMetadataPrefix+"compression"] = compressionAlgorithmV2
|
metadata[ReservedMetadataPrefix+"compression"] = compressionAlgorithmV2
|
||||||
@ -2072,8 +2084,9 @@ func (api objectAPIHandlers) PutObjectExtractHandler(w http.ResponseWriter, r *h
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Set compression metrics.
|
// Set compression metrics.
|
||||||
s2c := newS2CompressReader(actualReader, actualSize)
|
s2c, cb := newS2CompressReader(actualReader, actualSize)
|
||||||
defer s2c.Close()
|
defer s2c.Close()
|
||||||
|
idxCb = cb
|
||||||
reader = etag.Wrap(s2c, actualReader)
|
reader = etag.Wrap(s2c, actualReader)
|
||||||
size = -1 // Since compressed size is un-predictable.
|
size = -1 // Since compressed size is un-predictable.
|
||||||
}
|
}
|
||||||
@ -2100,6 +2113,7 @@ func (api objectAPIHandlers) PutObjectExtractHandler(w http.ResponseWriter, r *h
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
opts.MTime = info.ModTime()
|
opts.MTime = info.ModTime()
|
||||||
|
opts.IndexCB = idxCb
|
||||||
|
|
||||||
retentionMode, retentionDate, legalHold, s3err := checkPutObjectLockAllowed(ctx, r, bucket, object, getObjectInfo, retPerms, holdPerms)
|
retentionMode, retentionDate, legalHold, s3err := checkPutObjectLockAllowed(ctx, r, bucket, object, getObjectInfo, retPerms, holdPerms)
|
||||||
if s3err == ErrNone && retentionMode.Valid() {
|
if s3err == ErrNone && retentionMode.Valid() {
|
||||||
@ -2153,6 +2167,9 @@ func (api objectAPIHandlers) PutObjectExtractHandler(w http.ResponseWriter, r *h
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if opts.IndexCB != nil {
|
||||||
|
opts.IndexCB = compressionIndexEncrypter(objectEncryptionKey, opts.IndexCB)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ensure that metadata does not contain sensitive information
|
// Ensure that metadata does not contain sensitive information
|
||||||
@ -2571,8 +2588,10 @@ func (api objectAPIHandlers) CopyObjectPartHandler(w http.ResponseWriter, r *htt
|
|||||||
// Read compression metadata preserved in the init multipart for the decision.
|
// Read compression metadata preserved in the init multipart for the decision.
|
||||||
_, isCompressed := mi.UserDefined[ReservedMetadataPrefix+"compression"]
|
_, isCompressed := mi.UserDefined[ReservedMetadataPrefix+"compression"]
|
||||||
// Compress only if the compression is enabled during initial multipart.
|
// Compress only if the compression is enabled during initial multipart.
|
||||||
|
var idxCb func() []byte
|
||||||
if isCompressed {
|
if isCompressed {
|
||||||
s2c := newS2CompressReader(reader, actualPartSize)
|
s2c, cb := newS2CompressReader(reader, actualPartSize)
|
||||||
|
idxCb = cb
|
||||||
defer s2c.Close()
|
defer s2c.Close()
|
||||||
reader = etag.Wrap(s2c, reader)
|
reader = etag.Wrap(s2c, reader)
|
||||||
length = -1
|
length = -1
|
||||||
@ -2589,6 +2608,7 @@ func (api objectAPIHandlers) CopyObjectPartHandler(w http.ResponseWriter, r *htt
|
|||||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
dstOpts.IndexCB = idxCb
|
||||||
|
|
||||||
rawReader := srcInfo.Reader
|
rawReader := srcInfo.Reader
|
||||||
pReader := NewPutObjReader(rawReader)
|
pReader := NewPutObjReader(rawReader)
|
||||||
@ -2643,6 +2663,9 @@ func (api objectAPIHandlers) CopyObjectPartHandler(w http.ResponseWriter, r *htt
|
|||||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
if dstOpts.IndexCB != nil {
|
||||||
|
dstOpts.IndexCB = compressionIndexEncrypter(objectEncryptionKey, dstOpts.IndexCB)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
srcInfo.PutObjReader = pReader
|
srcInfo.PutObjReader = pReader
|
||||||
@ -2821,6 +2844,7 @@ func (api objectAPIHandlers) PutObjectPartHandler(w http.ResponseWriter, r *http
|
|||||||
// Read compression metadata preserved in the init multipart for the decision.
|
// Read compression metadata preserved in the init multipart for the decision.
|
||||||
_, isCompressed := mi.UserDefined[ReservedMetadataPrefix+"compression"]
|
_, isCompressed := mi.UserDefined[ReservedMetadataPrefix+"compression"]
|
||||||
|
|
||||||
|
var idxCb func() []byte
|
||||||
if objectAPI.IsCompressionSupported() && isCompressed {
|
if objectAPI.IsCompressionSupported() && isCompressed {
|
||||||
actualReader, err := hash.NewReader(reader, size, md5hex, sha256hex, actualSize)
|
actualReader, err := hash.NewReader(reader, size, md5hex, sha256hex, actualSize)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -2829,7 +2853,8 @@ func (api objectAPIHandlers) PutObjectPartHandler(w http.ResponseWriter, r *http
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Set compression metrics.
|
// Set compression metrics.
|
||||||
s2c := newS2CompressReader(actualReader, actualSize)
|
s2c, cb := newS2CompressReader(actualReader, actualSize)
|
||||||
|
idxCb = cb
|
||||||
defer s2c.Close()
|
defer s2c.Close()
|
||||||
reader = etag.Wrap(s2c, actualReader)
|
reader = etag.Wrap(s2c, actualReader)
|
||||||
size = -1 // Since compressed size is un-predictable.
|
size = -1 // Since compressed size is un-predictable.
|
||||||
@ -2904,7 +2929,11 @@ func (api objectAPIHandlers) PutObjectPartHandler(w http.ResponseWriter, r *http
|
|||||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
if idxCb != nil {
|
||||||
|
idxCb = compressionIndexEncrypter(objectEncryptionKey, idxCb)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
opts.IndexCB = idxCb
|
||||||
|
|
||||||
putObjectPart := objectAPI.PutObjectPart
|
putObjectPart := objectAPI.PutObjectPart
|
||||||
if api.CacheAPI() != nil {
|
if api.CacheAPI() != nil {
|
||||||
|
@ -132,6 +132,7 @@ type ObjectPartInfo struct {
|
|||||||
Number int `json:"number"`
|
Number int `json:"number"`
|
||||||
Size int64 `json:"size"`
|
Size int64 `json:"size"`
|
||||||
ActualSize int64 `json:"actualSize"`
|
ActualSize int64 `json:"actualSize"`
|
||||||
|
Index []byte `json:"index,omitempty" msg:"index,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// ChecksumInfo - carries checksums of individual scattered parts per disk.
|
// ChecksumInfo - carries checksums of individual scattered parts per disk.
|
||||||
|
@ -593,6 +593,12 @@ func (z *ObjectPartInfo) DecodeMsg(dc *msgp.Reader) (err error) {
|
|||||||
err = msgp.WrapError(err, "ActualSize")
|
err = msgp.WrapError(err, "ActualSize")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
case "index":
|
||||||
|
z.Index, err = dc.ReadBytes(z.Index)
|
||||||
|
if err != nil {
|
||||||
|
err = msgp.WrapError(err, "Index")
|
||||||
|
return
|
||||||
|
}
|
||||||
default:
|
default:
|
||||||
err = dc.Skip()
|
err = dc.Skip()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -606,9 +612,23 @@ func (z *ObjectPartInfo) DecodeMsg(dc *msgp.Reader) (err error) {
|
|||||||
|
|
||||||
// EncodeMsg implements msgp.Encodable
|
// EncodeMsg implements msgp.Encodable
|
||||||
func (z *ObjectPartInfo) EncodeMsg(en *msgp.Writer) (err error) {
|
func (z *ObjectPartInfo) EncodeMsg(en *msgp.Writer) (err error) {
|
||||||
// map header, size 4
|
// omitempty: check for empty values
|
||||||
|
zb0001Len := uint32(5)
|
||||||
|
var zb0001Mask uint8 /* 5 bits */
|
||||||
|
if z.Index == nil {
|
||||||
|
zb0001Len--
|
||||||
|
zb0001Mask |= 0x10
|
||||||
|
}
|
||||||
|
// variable map header, size zb0001Len
|
||||||
|
err = en.Append(0x80 | uint8(zb0001Len))
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if zb0001Len == 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
// write "ETag"
|
// write "ETag"
|
||||||
err = en.Append(0x84, 0xa4, 0x45, 0x54, 0x61, 0x67)
|
err = en.Append(0xa4, 0x45, 0x54, 0x61, 0x67)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -647,15 +667,38 @@ func (z *ObjectPartInfo) EncodeMsg(en *msgp.Writer) (err error) {
|
|||||||
err = msgp.WrapError(err, "ActualSize")
|
err = msgp.WrapError(err, "ActualSize")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
if (zb0001Mask & 0x10) == 0 { // if not empty
|
||||||
|
// write "index"
|
||||||
|
err = en.Append(0xa5, 0x69, 0x6e, 0x64, 0x65, 0x78)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
err = en.WriteBytes(z.Index)
|
||||||
|
if err != nil {
|
||||||
|
err = msgp.WrapError(err, "Index")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// MarshalMsg implements msgp.Marshaler
|
// MarshalMsg implements msgp.Marshaler
|
||||||
func (z *ObjectPartInfo) MarshalMsg(b []byte) (o []byte, err error) {
|
func (z *ObjectPartInfo) MarshalMsg(b []byte) (o []byte, err error) {
|
||||||
o = msgp.Require(b, z.Msgsize())
|
o = msgp.Require(b, z.Msgsize())
|
||||||
// map header, size 4
|
// omitempty: check for empty values
|
||||||
|
zb0001Len := uint32(5)
|
||||||
|
var zb0001Mask uint8 /* 5 bits */
|
||||||
|
if z.Index == nil {
|
||||||
|
zb0001Len--
|
||||||
|
zb0001Mask |= 0x10
|
||||||
|
}
|
||||||
|
// variable map header, size zb0001Len
|
||||||
|
o = append(o, 0x80|uint8(zb0001Len))
|
||||||
|
if zb0001Len == 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
// string "ETag"
|
// string "ETag"
|
||||||
o = append(o, 0x84, 0xa4, 0x45, 0x54, 0x61, 0x67)
|
o = append(o, 0xa4, 0x45, 0x54, 0x61, 0x67)
|
||||||
o = msgp.AppendString(o, z.ETag)
|
o = msgp.AppendString(o, z.ETag)
|
||||||
// string "Number"
|
// string "Number"
|
||||||
o = append(o, 0xa6, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72)
|
o = append(o, 0xa6, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72)
|
||||||
@ -666,6 +709,11 @@ func (z *ObjectPartInfo) MarshalMsg(b []byte) (o []byte, err error) {
|
|||||||
// string "ActualSize"
|
// string "ActualSize"
|
||||||
o = append(o, 0xaa, 0x41, 0x63, 0x74, 0x75, 0x61, 0x6c, 0x53, 0x69, 0x7a, 0x65)
|
o = append(o, 0xaa, 0x41, 0x63, 0x74, 0x75, 0x61, 0x6c, 0x53, 0x69, 0x7a, 0x65)
|
||||||
o = msgp.AppendInt64(o, z.ActualSize)
|
o = msgp.AppendInt64(o, z.ActualSize)
|
||||||
|
if (zb0001Mask & 0x10) == 0 { // if not empty
|
||||||
|
// string "index"
|
||||||
|
o = append(o, 0xa5, 0x69, 0x6e, 0x64, 0x65, 0x78)
|
||||||
|
o = msgp.AppendBytes(o, z.Index)
|
||||||
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -711,6 +759,12 @@ func (z *ObjectPartInfo) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
|||||||
err = msgp.WrapError(err, "ActualSize")
|
err = msgp.WrapError(err, "ActualSize")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
case "index":
|
||||||
|
z.Index, bts, err = msgp.ReadBytesBytes(bts, z.Index)
|
||||||
|
if err != nil {
|
||||||
|
err = msgp.WrapError(err, "Index")
|
||||||
|
return
|
||||||
|
}
|
||||||
default:
|
default:
|
||||||
bts, err = msgp.Skip(bts)
|
bts, err = msgp.Skip(bts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -725,7 +779,7 @@ func (z *ObjectPartInfo) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
|||||||
|
|
||||||
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
|
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
|
||||||
func (z *ObjectPartInfo) Msgsize() (s int) {
|
func (z *ObjectPartInfo) Msgsize() (s int) {
|
||||||
s = 1 + 5 + msgp.StringPrefixSize + len(z.ETag) + 7 + msgp.IntSize + 5 + msgp.Int64Size + 11 + msgp.Int64Size
|
s = 1 + 5 + msgp.StringPrefixSize + len(z.ETag) + 7 + msgp.IntSize + 5 + msgp.Int64Size + 11 + msgp.Int64Size + 6 + msgp.BytesPrefixSize + len(z.Index)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -167,6 +167,7 @@ type xlMetaV2Object struct {
|
|||||||
PartETags []string `json:"PartETags" msg:"PartETags,allownil"` // Part ETags
|
PartETags []string `json:"PartETags" msg:"PartETags,allownil"` // Part ETags
|
||||||
PartSizes []int64 `json:"PartSizes" msg:"PartSizes"` // Part Sizes
|
PartSizes []int64 `json:"PartSizes" msg:"PartSizes"` // Part Sizes
|
||||||
PartActualSizes []int64 `json:"PartASizes,omitempty" msg:"PartASizes,allownil"` // Part ActualSizes (compression)
|
PartActualSizes []int64 `json:"PartASizes,omitempty" msg:"PartASizes,allownil"` // Part ActualSizes (compression)
|
||||||
|
PartIndices [][]byte `json:"PartIndices,omitempty" msg:"PartIdx,omitempty"` // Part Indexes (compression)
|
||||||
Size int64 `json:"Size" msg:"Size"` // Object version size
|
Size int64 `json:"Size" msg:"Size"` // Object version size
|
||||||
ModTime int64 `json:"MTime" msg:"MTime"` // Object version modified time
|
ModTime int64 `json:"MTime" msg:"MTime"` // Object version modified time
|
||||||
MetaSys map[string][]byte `json:"MetaSys,omitempty" msg:"MetaSys,allownil"` // Object version internal metadata
|
MetaSys map[string][]byte `json:"MetaSys,omitempty" msg:"MetaSys,allownil"` // Object version internal metadata
|
||||||
@ -574,6 +575,9 @@ func (j xlMetaV2Object) ToFileInfo(volume, path string) (FileInfo, error) {
|
|||||||
fi.Parts[i].ETag = j.PartETags[i]
|
fi.Parts[i].ETag = j.PartETags[i]
|
||||||
}
|
}
|
||||||
fi.Parts[i].ActualSize = j.PartActualSizes[i]
|
fi.Parts[i].ActualSize = j.PartActualSizes[i]
|
||||||
|
if len(j.PartIndices) > 0 {
|
||||||
|
fi.Parts[i].Index = j.PartIndices[i]
|
||||||
|
}
|
||||||
}
|
}
|
||||||
fi.Erasure.Checksums = make([]ChecksumInfo, len(j.PartSizes))
|
fi.Erasure.Checksums = make([]ChecksumInfo, len(j.PartSizes))
|
||||||
for i := range fi.Parts {
|
for i := range fi.Parts {
|
||||||
@ -1471,6 +1475,13 @@ func (x *xlMetaV2) AddVersion(fi FileInfo) error {
|
|||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
for i := range fi.Parts {
|
||||||
|
// Only add indices if any.
|
||||||
|
if len(fi.Parts[i].Index) > 0 {
|
||||||
|
ventry.ObjectV2.PartIndices = make([][]byte, len(fi.Parts))
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
for i := range fi.Erasure.Distribution {
|
for i := range fi.Erasure.Distribution {
|
||||||
ventry.ObjectV2.ErasureDist[i] = uint8(fi.Erasure.Distribution[i])
|
ventry.ObjectV2.ErasureDist[i] = uint8(fi.Erasure.Distribution[i])
|
||||||
}
|
}
|
||||||
@ -1482,6 +1493,9 @@ func (x *xlMetaV2) AddVersion(fi FileInfo) error {
|
|||||||
}
|
}
|
||||||
ventry.ObjectV2.PartNumbers[i] = fi.Parts[i].Number
|
ventry.ObjectV2.PartNumbers[i] = fi.Parts[i].Number
|
||||||
ventry.ObjectV2.PartActualSizes[i] = fi.Parts[i].ActualSize
|
ventry.ObjectV2.PartActualSizes[i] = fi.Parts[i].ActualSize
|
||||||
|
if len(ventry.ObjectV2.PartIndices) > 0 {
|
||||||
|
ventry.ObjectV2.PartIndices[i] = fi.Parts[i].Index
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
tierFVIDKey := ReservedMetadataPrefixLower + tierFVID
|
tierFVIDKey := ReservedMetadataPrefixLower + tierFVID
|
||||||
|
@ -935,6 +935,25 @@ func (z *xlMetaV2Object) DecodeMsg(dc *msgp.Reader) (err error) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
case "PartIdx":
|
||||||
|
var zb0009 uint32
|
||||||
|
zb0009, err = dc.ReadArrayHeader()
|
||||||
|
if err != nil {
|
||||||
|
err = msgp.WrapError(err, "PartIndices")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if cap(z.PartIndices) >= int(zb0009) {
|
||||||
|
z.PartIndices = (z.PartIndices)[:zb0009]
|
||||||
|
} else {
|
||||||
|
z.PartIndices = make([][]byte, zb0009)
|
||||||
|
}
|
||||||
|
for za0008 := range z.PartIndices {
|
||||||
|
z.PartIndices[za0008], err = dc.ReadBytes(z.PartIndices[za0008])
|
||||||
|
if err != nil {
|
||||||
|
err = msgp.WrapError(err, "PartIndices", za0008)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
case "Size":
|
case "Size":
|
||||||
z.Size, err = dc.ReadInt64()
|
z.Size, err = dc.ReadInt64()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -956,34 +975,34 @@ func (z *xlMetaV2Object) DecodeMsg(dc *msgp.Reader) (err error) {
|
|||||||
}
|
}
|
||||||
z.MetaSys = nil
|
z.MetaSys = nil
|
||||||
} else {
|
} else {
|
||||||
var zb0009 uint32
|
var zb0010 uint32
|
||||||
zb0009, err = dc.ReadMapHeader()
|
zb0010, err = dc.ReadMapHeader()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
err = msgp.WrapError(err, "MetaSys")
|
err = msgp.WrapError(err, "MetaSys")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if z.MetaSys == nil {
|
if z.MetaSys == nil {
|
||||||
z.MetaSys = make(map[string][]byte, zb0009)
|
z.MetaSys = make(map[string][]byte, zb0010)
|
||||||
} else if len(z.MetaSys) > 0 {
|
} else if len(z.MetaSys) > 0 {
|
||||||
for key := range z.MetaSys {
|
for key := range z.MetaSys {
|
||||||
delete(z.MetaSys, key)
|
delete(z.MetaSys, key)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
for zb0009 > 0 {
|
for zb0010 > 0 {
|
||||||
zb0009--
|
zb0010--
|
||||||
var za0008 string
|
var za0009 string
|
||||||
var za0009 []byte
|
var za0010 []byte
|
||||||
za0008, err = dc.ReadString()
|
za0009, err = dc.ReadString()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
err = msgp.WrapError(err, "MetaSys")
|
err = msgp.WrapError(err, "MetaSys")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
za0009, err = dc.ReadBytes(za0009)
|
za0010, err = dc.ReadBytes(za0010)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
err = msgp.WrapError(err, "MetaSys", za0008)
|
err = msgp.WrapError(err, "MetaSys", za0009)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
z.MetaSys[za0008] = za0009
|
z.MetaSys[za0009] = za0010
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
case "MetaUsr":
|
case "MetaUsr":
|
||||||
@ -995,34 +1014,34 @@ func (z *xlMetaV2Object) DecodeMsg(dc *msgp.Reader) (err error) {
|
|||||||
}
|
}
|
||||||
z.MetaUser = nil
|
z.MetaUser = nil
|
||||||
} else {
|
} else {
|
||||||
var zb0010 uint32
|
var zb0011 uint32
|
||||||
zb0010, err = dc.ReadMapHeader()
|
zb0011, err = dc.ReadMapHeader()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
err = msgp.WrapError(err, "MetaUser")
|
err = msgp.WrapError(err, "MetaUser")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if z.MetaUser == nil {
|
if z.MetaUser == nil {
|
||||||
z.MetaUser = make(map[string]string, zb0010)
|
z.MetaUser = make(map[string]string, zb0011)
|
||||||
} else if len(z.MetaUser) > 0 {
|
} else if len(z.MetaUser) > 0 {
|
||||||
for key := range z.MetaUser {
|
for key := range z.MetaUser {
|
||||||
delete(z.MetaUser, key)
|
delete(z.MetaUser, key)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
for zb0010 > 0 {
|
for zb0011 > 0 {
|
||||||
zb0010--
|
zb0011--
|
||||||
var za0010 string
|
|
||||||
var za0011 string
|
var za0011 string
|
||||||
za0010, err = dc.ReadString()
|
var za0012 string
|
||||||
|
za0011, err = dc.ReadString()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
err = msgp.WrapError(err, "MetaUser")
|
err = msgp.WrapError(err, "MetaUser")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
za0011, err = dc.ReadString()
|
za0012, err = dc.ReadString()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
err = msgp.WrapError(err, "MetaUser", za0010)
|
err = msgp.WrapError(err, "MetaUser", za0011)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
z.MetaUser[za0010] = za0011
|
z.MetaUser[za0011] = za0012
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
@ -1038,9 +1057,23 @@ func (z *xlMetaV2Object) DecodeMsg(dc *msgp.Reader) (err error) {
|
|||||||
|
|
||||||
// EncodeMsg implements msgp.Encodable
|
// EncodeMsg implements msgp.Encodable
|
||||||
func (z *xlMetaV2Object) EncodeMsg(en *msgp.Writer) (err error) {
|
func (z *xlMetaV2Object) EncodeMsg(en *msgp.Writer) (err error) {
|
||||||
// map header, size 17
|
// omitempty: check for empty values
|
||||||
|
zb0001Len := uint32(18)
|
||||||
|
var zb0001Mask uint32 /* 18 bits */
|
||||||
|
if z.PartIndices == nil {
|
||||||
|
zb0001Len--
|
||||||
|
zb0001Mask |= 0x2000
|
||||||
|
}
|
||||||
|
// variable map header, size zb0001Len
|
||||||
|
err = en.WriteMapHeader(zb0001Len)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if zb0001Len == 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
// write "ID"
|
// write "ID"
|
||||||
err = en.Append(0xde, 0x0, 0x11, 0xa2, 0x49, 0x44)
|
err = en.Append(0xa2, 0x49, 0x44)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -1218,6 +1251,25 @@ func (z *xlMetaV2Object) EncodeMsg(en *msgp.Writer) (err error) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if (zb0001Mask & 0x2000) == 0 { // if not empty
|
||||||
|
// write "PartIdx"
|
||||||
|
err = en.Append(0xa7, 0x50, 0x61, 0x72, 0x74, 0x49, 0x64, 0x78)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
err = en.WriteArrayHeader(uint32(len(z.PartIndices)))
|
||||||
|
if err != nil {
|
||||||
|
err = msgp.WrapError(err, "PartIndices")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
for za0008 := range z.PartIndices {
|
||||||
|
err = en.WriteBytes(z.PartIndices[za0008])
|
||||||
|
if err != nil {
|
||||||
|
err = msgp.WrapError(err, "PartIndices", za0008)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
// write "Size"
|
// write "Size"
|
||||||
err = en.Append(0xa4, 0x53, 0x69, 0x7a, 0x65)
|
err = en.Append(0xa4, 0x53, 0x69, 0x7a, 0x65)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -1254,15 +1306,15 @@ func (z *xlMetaV2Object) EncodeMsg(en *msgp.Writer) (err error) {
|
|||||||
err = msgp.WrapError(err, "MetaSys")
|
err = msgp.WrapError(err, "MetaSys")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
for za0008, za0009 := range z.MetaSys {
|
for za0009, za0010 := range z.MetaSys {
|
||||||
err = en.WriteString(za0008)
|
err = en.WriteString(za0009)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
err = msgp.WrapError(err, "MetaSys")
|
err = msgp.WrapError(err, "MetaSys")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
err = en.WriteBytes(za0009)
|
err = en.WriteBytes(za0010)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
err = msgp.WrapError(err, "MetaSys", za0008)
|
err = msgp.WrapError(err, "MetaSys", za0009)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1283,15 +1335,15 @@ func (z *xlMetaV2Object) EncodeMsg(en *msgp.Writer) (err error) {
|
|||||||
err = msgp.WrapError(err, "MetaUser")
|
err = msgp.WrapError(err, "MetaUser")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
for za0010, za0011 := range z.MetaUser {
|
for za0011, za0012 := range z.MetaUser {
|
||||||
err = en.WriteString(za0010)
|
err = en.WriteString(za0011)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
err = msgp.WrapError(err, "MetaUser")
|
err = msgp.WrapError(err, "MetaUser")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
err = en.WriteString(za0011)
|
err = en.WriteString(za0012)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
err = msgp.WrapError(err, "MetaUser", za0010)
|
err = msgp.WrapError(err, "MetaUser", za0011)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1302,9 +1354,20 @@ func (z *xlMetaV2Object) EncodeMsg(en *msgp.Writer) (err error) {
|
|||||||
// MarshalMsg implements msgp.Marshaler
|
// MarshalMsg implements msgp.Marshaler
|
||||||
func (z *xlMetaV2Object) MarshalMsg(b []byte) (o []byte, err error) {
|
func (z *xlMetaV2Object) MarshalMsg(b []byte) (o []byte, err error) {
|
||||||
o = msgp.Require(b, z.Msgsize())
|
o = msgp.Require(b, z.Msgsize())
|
||||||
// map header, size 17
|
// omitempty: check for empty values
|
||||||
|
zb0001Len := uint32(18)
|
||||||
|
var zb0001Mask uint32 /* 18 bits */
|
||||||
|
if z.PartIndices == nil {
|
||||||
|
zb0001Len--
|
||||||
|
zb0001Mask |= 0x2000
|
||||||
|
}
|
||||||
|
// variable map header, size zb0001Len
|
||||||
|
o = msgp.AppendMapHeader(o, zb0001Len)
|
||||||
|
if zb0001Len == 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
// string "ID"
|
// string "ID"
|
||||||
o = append(o, 0xde, 0x0, 0x11, 0xa2, 0x49, 0x44)
|
o = append(o, 0xa2, 0x49, 0x44)
|
||||||
o = msgp.AppendBytes(o, (z.VersionID)[:])
|
o = msgp.AppendBytes(o, (z.VersionID)[:])
|
||||||
// string "DDir"
|
// string "DDir"
|
||||||
o = append(o, 0xa4, 0x44, 0x44, 0x69, 0x72)
|
o = append(o, 0xa4, 0x44, 0x44, 0x69, 0x72)
|
||||||
@ -1365,6 +1428,14 @@ func (z *xlMetaV2Object) MarshalMsg(b []byte) (o []byte, err error) {
|
|||||||
o = msgp.AppendInt64(o, z.PartActualSizes[za0007])
|
o = msgp.AppendInt64(o, z.PartActualSizes[za0007])
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if (zb0001Mask & 0x2000) == 0 { // if not empty
|
||||||
|
// string "PartIdx"
|
||||||
|
o = append(o, 0xa7, 0x50, 0x61, 0x72, 0x74, 0x49, 0x64, 0x78)
|
||||||
|
o = msgp.AppendArrayHeader(o, uint32(len(z.PartIndices)))
|
||||||
|
for za0008 := range z.PartIndices {
|
||||||
|
o = msgp.AppendBytes(o, z.PartIndices[za0008])
|
||||||
|
}
|
||||||
|
}
|
||||||
// string "Size"
|
// string "Size"
|
||||||
o = append(o, 0xa4, 0x53, 0x69, 0x7a, 0x65)
|
o = append(o, 0xa4, 0x53, 0x69, 0x7a, 0x65)
|
||||||
o = msgp.AppendInt64(o, z.Size)
|
o = msgp.AppendInt64(o, z.Size)
|
||||||
@ -1377,9 +1448,9 @@ func (z *xlMetaV2Object) MarshalMsg(b []byte) (o []byte, err error) {
|
|||||||
o = msgp.AppendNil(o)
|
o = msgp.AppendNil(o)
|
||||||
} else {
|
} else {
|
||||||
o = msgp.AppendMapHeader(o, uint32(len(z.MetaSys)))
|
o = msgp.AppendMapHeader(o, uint32(len(z.MetaSys)))
|
||||||
for za0008, za0009 := range z.MetaSys {
|
for za0009, za0010 := range z.MetaSys {
|
||||||
o = msgp.AppendString(o, za0008)
|
o = msgp.AppendString(o, za0009)
|
||||||
o = msgp.AppendBytes(o, za0009)
|
o = msgp.AppendBytes(o, za0010)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// string "MetaUsr"
|
// string "MetaUsr"
|
||||||
@ -1388,9 +1459,9 @@ func (z *xlMetaV2Object) MarshalMsg(b []byte) (o []byte, err error) {
|
|||||||
o = msgp.AppendNil(o)
|
o = msgp.AppendNil(o)
|
||||||
} else {
|
} else {
|
||||||
o = msgp.AppendMapHeader(o, uint32(len(z.MetaUser)))
|
o = msgp.AppendMapHeader(o, uint32(len(z.MetaUser)))
|
||||||
for za0010, za0011 := range z.MetaUser {
|
for za0011, za0012 := range z.MetaUser {
|
||||||
o = msgp.AppendString(o, za0010)
|
|
||||||
o = msgp.AppendString(o, za0011)
|
o = msgp.AppendString(o, za0011)
|
||||||
|
o = msgp.AppendString(o, za0012)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
@ -1575,6 +1646,25 @@ func (z *xlMetaV2Object) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
case "PartIdx":
|
||||||
|
var zb0009 uint32
|
||||||
|
zb0009, bts, err = msgp.ReadArrayHeaderBytes(bts)
|
||||||
|
if err != nil {
|
||||||
|
err = msgp.WrapError(err, "PartIndices")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if cap(z.PartIndices) >= int(zb0009) {
|
||||||
|
z.PartIndices = (z.PartIndices)[:zb0009]
|
||||||
|
} else {
|
||||||
|
z.PartIndices = make([][]byte, zb0009)
|
||||||
|
}
|
||||||
|
for za0008 := range z.PartIndices {
|
||||||
|
z.PartIndices[za0008], bts, err = msgp.ReadBytesBytes(bts, z.PartIndices[za0008])
|
||||||
|
if err != nil {
|
||||||
|
err = msgp.WrapError(err, "PartIndices", za0008)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
case "Size":
|
case "Size":
|
||||||
z.Size, bts, err = msgp.ReadInt64Bytes(bts)
|
z.Size, bts, err = msgp.ReadInt64Bytes(bts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -1592,34 +1682,34 @@ func (z *xlMetaV2Object) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
|||||||
bts = bts[1:]
|
bts = bts[1:]
|
||||||
z.MetaSys = nil
|
z.MetaSys = nil
|
||||||
} else {
|
} else {
|
||||||
var zb0009 uint32
|
var zb0010 uint32
|
||||||
zb0009, bts, err = msgp.ReadMapHeaderBytes(bts)
|
zb0010, bts, err = msgp.ReadMapHeaderBytes(bts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
err = msgp.WrapError(err, "MetaSys")
|
err = msgp.WrapError(err, "MetaSys")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if z.MetaSys == nil {
|
if z.MetaSys == nil {
|
||||||
z.MetaSys = make(map[string][]byte, zb0009)
|
z.MetaSys = make(map[string][]byte, zb0010)
|
||||||
} else if len(z.MetaSys) > 0 {
|
} else if len(z.MetaSys) > 0 {
|
||||||
for key := range z.MetaSys {
|
for key := range z.MetaSys {
|
||||||
delete(z.MetaSys, key)
|
delete(z.MetaSys, key)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
for zb0009 > 0 {
|
for zb0010 > 0 {
|
||||||
var za0008 string
|
var za0009 string
|
||||||
var za0009 []byte
|
var za0010 []byte
|
||||||
zb0009--
|
zb0010--
|
||||||
za0008, bts, err = msgp.ReadStringBytes(bts)
|
za0009, bts, err = msgp.ReadStringBytes(bts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
err = msgp.WrapError(err, "MetaSys")
|
err = msgp.WrapError(err, "MetaSys")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
za0009, bts, err = msgp.ReadBytesBytes(bts, za0009)
|
za0010, bts, err = msgp.ReadBytesBytes(bts, za0010)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
err = msgp.WrapError(err, "MetaSys", za0008)
|
err = msgp.WrapError(err, "MetaSys", za0009)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
z.MetaSys[za0008] = za0009
|
z.MetaSys[za0009] = za0010
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
case "MetaUsr":
|
case "MetaUsr":
|
||||||
@ -1627,34 +1717,34 @@ func (z *xlMetaV2Object) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
|||||||
bts = bts[1:]
|
bts = bts[1:]
|
||||||
z.MetaUser = nil
|
z.MetaUser = nil
|
||||||
} else {
|
} else {
|
||||||
var zb0010 uint32
|
var zb0011 uint32
|
||||||
zb0010, bts, err = msgp.ReadMapHeaderBytes(bts)
|
zb0011, bts, err = msgp.ReadMapHeaderBytes(bts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
err = msgp.WrapError(err, "MetaUser")
|
err = msgp.WrapError(err, "MetaUser")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if z.MetaUser == nil {
|
if z.MetaUser == nil {
|
||||||
z.MetaUser = make(map[string]string, zb0010)
|
z.MetaUser = make(map[string]string, zb0011)
|
||||||
} else if len(z.MetaUser) > 0 {
|
} else if len(z.MetaUser) > 0 {
|
||||||
for key := range z.MetaUser {
|
for key := range z.MetaUser {
|
||||||
delete(z.MetaUser, key)
|
delete(z.MetaUser, key)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
for zb0010 > 0 {
|
for zb0011 > 0 {
|
||||||
var za0010 string
|
|
||||||
var za0011 string
|
var za0011 string
|
||||||
zb0010--
|
var za0012 string
|
||||||
za0010, bts, err = msgp.ReadStringBytes(bts)
|
zb0011--
|
||||||
|
za0011, bts, err = msgp.ReadStringBytes(bts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
err = msgp.WrapError(err, "MetaUser")
|
err = msgp.WrapError(err, "MetaUser")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
za0011, bts, err = msgp.ReadStringBytes(bts)
|
za0012, bts, err = msgp.ReadStringBytes(bts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
err = msgp.WrapError(err, "MetaUser", za0010)
|
err = msgp.WrapError(err, "MetaUser", za0011)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
z.MetaUser[za0010] = za0011
|
z.MetaUser[za0011] = za0012
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
@ -1675,18 +1765,22 @@ func (z *xlMetaV2Object) Msgsize() (s int) {
|
|||||||
for za0005 := range z.PartETags {
|
for za0005 := range z.PartETags {
|
||||||
s += msgp.StringPrefixSize + len(z.PartETags[za0005])
|
s += msgp.StringPrefixSize + len(z.PartETags[za0005])
|
||||||
}
|
}
|
||||||
s += 10 + msgp.ArrayHeaderSize + (len(z.PartSizes) * (msgp.Int64Size)) + 11 + msgp.ArrayHeaderSize + (len(z.PartActualSizes) * (msgp.Int64Size)) + 5 + msgp.Int64Size + 6 + msgp.Int64Size + 8 + msgp.MapHeaderSize
|
s += 10 + msgp.ArrayHeaderSize + (len(z.PartSizes) * (msgp.Int64Size)) + 11 + msgp.ArrayHeaderSize + (len(z.PartActualSizes) * (msgp.Int64Size)) + 8 + msgp.ArrayHeaderSize
|
||||||
|
for za0008 := range z.PartIndices {
|
||||||
|
s += msgp.BytesPrefixSize + len(z.PartIndices[za0008])
|
||||||
|
}
|
||||||
|
s += 5 + msgp.Int64Size + 6 + msgp.Int64Size + 8 + msgp.MapHeaderSize
|
||||||
if z.MetaSys != nil {
|
if z.MetaSys != nil {
|
||||||
for za0008, za0009 := range z.MetaSys {
|
for za0009, za0010 := range z.MetaSys {
|
||||||
_ = za0009
|
_ = za0010
|
||||||
s += msgp.StringPrefixSize + len(za0008) + msgp.BytesPrefixSize + len(za0009)
|
s += msgp.StringPrefixSize + len(za0009) + msgp.BytesPrefixSize + len(za0010)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
s += 8 + msgp.MapHeaderSize
|
s += 8 + msgp.MapHeaderSize
|
||||||
if z.MetaUser != nil {
|
if z.MetaUser != nil {
|
||||||
for za0010, za0011 := range z.MetaUser {
|
for za0011, za0012 := range z.MetaUser {
|
||||||
_ = za0011
|
_ = za0012
|
||||||
s += msgp.StringPrefixSize + len(za0010) + msgp.StringPrefixSize + len(za0011)
|
s += msgp.StringPrefixSize + len(za0011) + msgp.StringPrefixSize + len(za0012)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
|
Loading…
Reference in New Issue
Block a user