diff --git a/Makefile b/Makefile
index 859e04093..4cdf1c074 100644
--- a/Makefile
+++ b/Makefile
@@ -20,7 +20,7 @@ help: ## print this help
getdeps: ## fetch necessary dependencies
@mkdir -p ${GOPATH}/bin
@echo "Installing golangci-lint" && curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(GOPATH)/bin v1.43.0
- @echo "Installing msgp" && go install -v github.com/tinylib/msgp@latest
+ @echo "Installing msgp" && go install -v github.com/tinylib/msgp@v1.1.7-0.20211026165309-e818a1881b0e
@echo "Installing stringer" && go install -v golang.org/x/tools/cmd/stringer@latest
crosscompile: ## cross compile minio
diff --git a/cmd/bucket-replication.go b/cmd/bucket-replication.go
index f49271a96..57bb334ed 100644
--- a/cmd/bucket-replication.go
+++ b/cmd/bucket-replication.go
@@ -724,7 +724,7 @@ const (
// matches k1 with all keys, returns 'true' if one of them matches
func equals(k1 string, keys ...string) bool {
for _, k2 := range keys {
- if strings.EqualFold(strings.ToLower(k1), strings.ToLower(k2)) {
+ if strings.EqualFold(k1, k2) {
return true
}
}
diff --git a/cmd/erasure-metadata.go b/cmd/erasure-metadata.go
index a1a1da380..0a381aecc 100644
--- a/cmd/erasure-metadata.go
+++ b/cmd/erasure-metadata.go
@@ -476,9 +476,7 @@ func GetInternalReplicationState(m map[string][]byte) ReplicationState {
// getInternalReplicationState fetches internal replication state from the map m
func getInternalReplicationState(m map[string]string) ReplicationState {
- d := ReplicationState{
- ResetStatusesMap: make(map[string]string),
- }
+ d := ReplicationState{}
for k, v := range m {
switch {
case equals(k, ReservedMetadataPrefixLower+ReplicationTimestamp):
@@ -497,6 +495,9 @@ func getInternalReplicationState(m map[string]string) ReplicationState {
d.PurgeTargets = versionPurgeStatusesMap(v)
case strings.HasPrefix(k, ReservedMetadataPrefixLower+ReplicationReset):
arn := strings.TrimPrefix(k, fmt.Sprintf("%s-", ReservedMetadataPrefixLower+ReplicationReset))
+ if d.ResetStatusesMap == nil {
+ d.ResetStatusesMap = make(map[string]string, 1)
+ }
d.ResetStatusesMap[arn] = v
}
}
diff --git a/cmd/metacache-entries.go b/cmd/metacache-entries.go
index 3df3a0ac6..7d2b353af 100644
--- a/cmd/metacache-entries.go
+++ b/cmd/metacache-entries.go
@@ -148,11 +148,15 @@ func (e *metaCacheEntry) isLatestDeletemarker() bool {
if !isXL2V1Format(e.metadata) {
return false
}
+ if meta, _ := isIndexedMetaV2(e.metadata); meta != nil {
+ return meta.IsLatestDeleteMarker()
+ }
+ // Fall back...
var xlMeta xlMetaV2
- if err := xlMeta.Load(e.metadata); err != nil || len(xlMeta.Versions) == 0 {
+ if err := xlMeta.Load(e.metadata); err != nil || len(xlMeta.versions) == 0 {
return true
}
- return xlMeta.Versions[len(xlMeta.Versions)-1].Type == DeleteType
+ return xlMeta.versions[0].header.Type == DeleteType
}
// fileInfo returns the decoded metadata.
diff --git a/cmd/testdata/xl.meta-v1.2.zst b/cmd/testdata/xl.meta-v1.2.zst
new file mode 100644
index 000000000..5eb4c5da9
Binary files /dev/null and b/cmd/testdata/xl.meta-v1.2.zst differ
diff --git a/cmd/xl-storage-format-utils.go b/cmd/xl-storage-format-utils.go
index 6eb64f327..fba0dadbf 100644
--- a/cmd/xl-storage-format-utils.go
+++ b/cmd/xl-storage-format-utils.go
@@ -19,27 +19,12 @@ package cmd
import (
"fmt"
- "sort"
jsoniter "github.com/json-iterator/go"
"github.com/minio/minio/internal/logger"
+ "github.com/zeebo/xxh3"
)
-// versionsSorter sorts FileInfo slices by version.
-type versionsSorter []FileInfo
-
-func (v versionsSorter) sort() {
- sort.Slice(v, func(i, j int) bool {
- if v[i].IsLatest {
- return true
- }
- if v[j].IsLatest {
- return false
- }
- return v[i].ModTime.After(v[j].ModTime)
- })
-}
-
func getFileInfoVersions(xlMetaBuf []byte, volume, path string) (FileInfoVersions, error) {
fivs, err := getAllFileInfoVersions(xlMetaBuf, volume, path)
if err != nil {
@@ -54,24 +39,35 @@ func getFileInfoVersions(xlMetaBuf []byte, volume, path string) (FileInfoVersion
}
}
fivs.Versions = fivs.Versions[:n]
+ // Update numversions
+ for i := range fivs.Versions {
+ fivs.Versions[i].NumVersions = n
+ }
return fivs, nil
}
func getAllFileInfoVersions(xlMetaBuf []byte, volume, path string) (FileInfoVersions, error) {
if isXL2V1Format(xlMetaBuf) {
- var xlMeta xlMetaV2
- if err := xlMeta.Load(xlMetaBuf); err != nil {
- return FileInfoVersions{}, err
- }
- versions, latestModTime, err := xlMeta.ListVersions(volume, path)
- if err != nil {
+ var versions []FileInfo
+ var err error
+ if buf, _ := isIndexedMetaV2(xlMetaBuf); buf != nil {
+ versions, err = buf.ListVersions(volume, path)
+ } else {
+ var xlMeta xlMetaV2
+ if err := xlMeta.Load(xlMetaBuf); err != nil {
+ return FileInfoVersions{}, err
+ }
+ versions, err = xlMeta.ListVersions(volume, path)
+ }
+ if err != nil || len(versions) == 0 {
return FileInfoVersions{}, err
}
+
return FileInfoVersions{
Volume: volume,
Name: path,
Versions: versions,
- LatestModTime: latestModTime,
+ LatestModTime: versions[0].ModTime,
}, nil
}
@@ -98,11 +94,20 @@ func getAllFileInfoVersions(xlMetaBuf []byte, volume, path string) (FileInfoVers
func getFileInfo(xlMetaBuf []byte, volume, path, versionID string, data bool) (FileInfo, error) {
if isXL2V1Format(xlMetaBuf) {
- var xlMeta xlMetaV2
- if err := xlMeta.Load(xlMetaBuf); err != nil {
- return FileInfo{}, err
+ var fi FileInfo
+ var err error
+ var inData xlMetaInlineData
+ if buf, data := isIndexedMetaV2(xlMetaBuf); buf != nil {
+ inData = data
+ fi, err = buf.ToFileInfo(volume, path, versionID)
+ } else {
+ var xlMeta xlMetaV2
+ if err := xlMeta.Load(xlMetaBuf); err != nil {
+ return FileInfo{}, err
+ }
+ inData = xlMeta.data
+ fi, err = xlMeta.ToFileInfo(volume, path, versionID)
}
- fi, err := xlMeta.ToFileInfo(volume, path, versionID)
if !data || err != nil {
return fi, err
}
@@ -110,12 +115,12 @@ func getFileInfo(xlMetaBuf []byte, volume, path, versionID string, data bool) (F
if versionID == "" {
versionID = nullVersionID
}
- fi.Data = xlMeta.data.find(versionID)
+ fi.Data = inData.find(versionID)
if len(fi.Data) == 0 {
// PR #11758 used DataDir, preserve it
// for users who might have used master
// branch
- fi.Data = xlMeta.data.find(fi.DataDir)
+ fi.Data = inData.find(fi.DataDir)
}
return fi, nil
}
@@ -149,3 +154,27 @@ func getXLDiskLoc(diskID string) (poolIdx, setIdx, diskIdx int) {
}
return -1, -1, -1
}
+
+// hashDeterministicString will return a deterministic hash for the map values.
+// Trivial collisions are avoided, but this is by no means a strong hash.
+func hashDeterministicString(m map[string]string) uint64 {
+ // Seed (random)
+ var crc = uint64(0xc2b40bbac11a7295)
+ // Xor each value to make order independent
+ for k, v := range m {
+ // Separate key and value with an individual xor with a random number.
+ // Add values of each, so they cannot be trivially collided.
+ crc ^= (xxh3.HashString(k) ^ 0x4ee3bbaf7ab2506b) + (xxh3.HashString(v) ^ 0x8da4c8da66194257)
+ }
+ return crc
+}
+
+// hashDeterministicBytes will return a deterministic (weak) hash for the map values.
+// Trivial collisions are avoided, but this is by no means a strong hash.
+func hashDeterministicBytes(m map[string][]byte) uint64 {
+ var crc = uint64(0x1bbc7e1dde654743)
+ for k, v := range m {
+ crc ^= (xxh3.HashString(k) ^ 0x4ee3bbaf7ab2506b) + (xxh3.Hash(v) ^ 0x8da4c8da66194257)
+ }
+ return crc
+}
diff --git a/cmd/xl-storage-format-utils_test.go b/cmd/xl-storage-format-utils_test.go
new file mode 100644
index 000000000..4b087bddf
--- /dev/null
+++ b/cmd/xl-storage-format-utils_test.go
@@ -0,0 +1,93 @@
+package cmd
+
+import (
+ "testing"
+
+ xhttp "github.com/minio/minio/internal/http"
+)
+
+func Test_hashDeterministicString(t *testing.T) {
+ tests := []struct {
+ name string
+ arg map[string]string
+ }{
+ {
+ name: "zero",
+ arg: map[string]string{},
+ },
+ {
+ name: "nil",
+ arg: nil,
+ },
+ {
+ name: "one",
+ arg: map[string]string{"key": "value"},
+ },
+ {
+ name: "several",
+ arg: map[string]string{
+ xhttp.AmzRestore: "FAILED",
+ xhttp.ContentMD5: mustGetUUID(),
+ xhttp.AmzBucketReplicationStatus: "PENDING",
+ xhttp.ContentType: "application/json",
+ },
+ },
+ {
+ name: "someempty",
+ arg: map[string]string{
+ xhttp.AmzRestore: "",
+ xhttp.ContentMD5: mustGetUUID(),
+ xhttp.AmzBucketReplicationStatus: "",
+ xhttp.ContentType: "application/json",
+ },
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ const n = 100
+ want := hashDeterministicString(tt.arg)
+ m := tt.arg
+ for i := 0; i < n; i++ {
+ if got := hashDeterministicString(m); got != want {
+ t.Errorf("hashDeterministicString() = %v, want %v", got, want)
+ }
+ }
+ // Check casual collisions
+ if m == nil {
+ m = make(map[string]string)
+ }
+ m["12312312"] = ""
+ if got := hashDeterministicString(m); got == want {
+ t.Errorf("hashDeterministicString() = %v, does not want %v", got, want)
+ }
+ want = hashDeterministicString(m)
+ delete(m, "12312312")
+ m["another"] = ""
+
+ if got := hashDeterministicString(m); got == want {
+ t.Errorf("hashDeterministicString() = %v, does not want %v", got, want)
+ }
+
+ want = hashDeterministicString(m)
+ m["another"] = "hashDeterministicString"
+ if got := hashDeterministicString(m); got == want {
+ t.Errorf("hashDeterministicString() = %v, does not want %v", got, want)
+ }
+
+ want = hashDeterministicString(m)
+ m["another"] = "hashDeterministicStringhashDeterministicStringhashDeterministicStringhashDeterministicStringhashDeterministicStringhashDeterministicStringhashDeterministicString"
+ if got := hashDeterministicString(m); got == want {
+ t.Errorf("hashDeterministicString() = %v, does not want %v", got, want)
+ }
+
+ // Flip key/value
+ want = hashDeterministicString(m)
+ delete(m, "another")
+ m["hashDeterministicStringhashDeterministicStringhashDeterministicStringhashDeterministicStringhashDeterministicStringhashDeterministicStringhashDeterministicString"] = "another"
+ if got := hashDeterministicString(m); got == want {
+ t.Errorf("hashDeterministicString() = %v, does not want %v", got, want)
+ }
+
+ })
+ }
+}
diff --git a/cmd/xl-storage-format-v1.go b/cmd/xl-storage-format-v1.go
index 50049d04a..8ebfcecfd 100644
--- a/cmd/xl-storage-format-v1.go
+++ b/cmd/xl-storage-format-v1.go
@@ -18,11 +18,13 @@
package cmd
import (
+ "encoding/binary"
"encoding/hex"
"encoding/json"
"fmt"
"time"
+ "github.com/cespare/xxhash/v2"
jsoniter "github.com/json-iterator/go"
"github.com/minio/minio/internal/logger"
)
@@ -205,6 +207,27 @@ func (m *xlMetaV1Object) ToFileInfo(volume, path string) (FileInfo, error) {
return fi, nil
}
+// Signature will return a signature that is expected to be the same across all disks.
+func (m *xlMetaV1Object) Signature() [4]byte {
+ // Shallow copy
+ c := *m
+ // Zero unimportant fields
+ c.Erasure.Index = 0
+ c.Minio.Release = ""
+ crc := hashDeterministicString(c.Meta)
+ c.Meta = nil
+
+ if bts, err := c.MarshalMsg(metaDataPoolGet()); err == nil {
+ crc ^= xxhash.Sum64(bts)
+ metaDataPoolPut(bts)
+ }
+
+ // Combine upper and lower part
+ var tmp [4]byte
+ binary.LittleEndian.PutUint32(tmp[:], uint32(crc^(crc>>32)))
+ return tmp
+}
+
// XL metadata constants.
const (
// XL meta version.
diff --git a/cmd/xl-storage-format-v2-legacy.go b/cmd/xl-storage-format-v2-legacy.go
new file mode 100644
index 000000000..56e3d909e
--- /dev/null
+++ b/cmd/xl-storage-format-v2-legacy.go
@@ -0,0 +1,89 @@
+// Copyright (c) 2015-2021 MinIO, Inc.
+//
+// This file is part of MinIO Object Storage stack
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with this program. If not, see .
+
+package cmd
+
+import (
+ "fmt"
+
+ "github.com/tinylib/msgp/msgp"
+)
+
+// unmarshalV unmarshals with a specific header version.
+func (x *xlMetaV2VersionHeader) unmarshalV(v uint8, bts []byte) (o []byte, err error) {
+ switch v {
+ case 1:
+ return x.unmarshalV1(bts)
+ case xlHeaderVersion:
+ return x.UnmarshalMsg(bts)
+ }
+ return bts, fmt.Errorf("unknown xlHeaderVersion: %d", v)
+}
+
+// unmarshalV1 decodes version 1, never released.
+func (x *xlMetaV2VersionHeader) unmarshalV1(bts []byte) (o []byte, err error) {
+ var zb0001 uint32
+ zb0001, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0001 != 4 {
+ err = msgp.ArrayError{Wanted: 4, Got: zb0001}
+ return
+ }
+ bts, err = msgp.ReadExactBytes(bts, (x.VersionID)[:])
+ if err != nil {
+ err = msgp.WrapError(err, "VersionID")
+ return
+ }
+ x.ModTime, bts, err = msgp.ReadInt64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "ModTime")
+ return
+ }
+ {
+ var zb0002 uint8
+ zb0002, bts, err = msgp.ReadUint8Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Type")
+ return
+ }
+ x.Type = VersionType(zb0002)
+ }
+ {
+ var zb0003 uint8
+ zb0003, bts, err = msgp.ReadUint8Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Flags")
+ return
+ }
+ x.Flags = xlFlags(zb0003)
+ }
+ o = bts
+ return
+}
+
+// unmarshalV unmarshals with a specific metadata version.
+func (j *xlMetaV2Version) unmarshalV(v uint8, bts []byte) (o []byte, err error) {
+ switch v {
+ // We accept un-set as latest version.
+ case 0, xlMetaVersion:
+ return j.UnmarshalMsg(bts)
+ }
+ return bts, fmt.Errorf("unknown xlMetaVersion: %d", v)
+}
diff --git a/cmd/xl-storage-format-v2.go b/cmd/xl-storage-format-v2.go
index 78ab0187d..0674609c5 100644
--- a/cmd/xl-storage-format-v2.go
+++ b/cmd/xl-storage-format-v2.go
@@ -19,7 +19,9 @@ package cmd
import (
"bytes"
+ "context"
"encoding/binary"
+ "encoding/hex"
"errors"
"fmt"
"io"
@@ -46,6 +48,9 @@ var (
xlVersionCurrent [4]byte
)
+//go:generate msgp -file=$GOFILE -unexported
+//go:generate stringer -type VersionType -output=xl-storage-format-v2_string.go $GOFILE
+
const (
// Breaking changes.
// Newer versions cannot be read by older software.
@@ -56,7 +61,7 @@ const (
// Bumping this is informational, but should be done
// if any change is made to the data stored, bumping this
// will allow to detect the exact version later.
- xlVersionMinor = 2
+ xlVersionMinor = 3
)
func init() {
@@ -64,35 +69,6 @@ func init() {
binary.LittleEndian.PutUint16(xlVersionCurrent[2:4], xlVersionMinor)
}
-// checkXL2V1 will check if the metadata has correct header and is a known major version.
-// The remaining payload and versions are returned.
-func checkXL2V1(buf []byte) (payload []byte, major, minor uint16, err error) {
- if len(buf) <= 8 {
- return payload, 0, 0, fmt.Errorf("xlMeta: no data")
- }
-
- if !bytes.Equal(buf[:4], xlHeader[:]) {
- return payload, 0, 0, fmt.Errorf("xlMeta: unknown XLv2 header, expected %v, got %v", xlHeader[:4], buf[:4])
- }
-
- if bytes.Equal(buf[4:8], []byte("1 ")) {
- // Set as 1,0.
- major, minor = 1, 0
- } else {
- major, minor = binary.LittleEndian.Uint16(buf[4:6]), binary.LittleEndian.Uint16(buf[6:8])
- }
- if major > xlVersionMajor {
- return buf[8:], major, minor, fmt.Errorf("xlMeta: unknown major version %d found", major)
- }
-
- return buf[8:], major, minor, nil
-}
-
-func isXL2V1Format(buf []byte) bool {
- _, _, _, err := checkXL2V1(buf)
- return err == nil
-}
-
// The []journal contains all the different versions of the object.
//
// This array can have 3 kinds of objects:
@@ -124,8 +100,6 @@ func isXL2V1Format(buf []byte) bool {
// │ └── part.1
// └── xl.meta
-//go:generate msgp -file=$GOFILE -unexported
-
// VersionType defines the type of journal type of the current entry.
type VersionType uint8
@@ -187,26 +161,26 @@ type xlMetaV2DeleteMarker struct {
// xlMetaV2Object defines the data struct for object journal type
type xlMetaV2Object struct {
- VersionID [16]byte `json:"ID" msg:"ID"` // Version ID
- DataDir [16]byte `json:"DDir" msg:"DDir"` // Data dir ID
- ErasureAlgorithm ErasureAlgo `json:"EcAlgo" msg:"EcAlgo"` // Erasure coding algorithm
- ErasureM int `json:"EcM" msg:"EcM"` // Erasure data blocks
- ErasureN int `json:"EcN" msg:"EcN"` // Erasure parity blocks
- ErasureBlockSize int64 `json:"EcBSize" msg:"EcBSize"` // Erasure block size
- ErasureIndex int `json:"EcIndex" msg:"EcIndex"` // Erasure disk index
- ErasureDist []uint8 `json:"EcDist" msg:"EcDist"` // Erasure distribution
- BitrotChecksumAlgo ChecksumAlgo `json:"CSumAlgo" msg:"CSumAlgo"` // Bitrot checksum algo
- PartNumbers []int `json:"PartNums" msg:"PartNums"` // Part Numbers
- PartETags []string `json:"PartETags" msg:"PartETags"` // Part ETags
- PartSizes []int64 `json:"PartSizes" msg:"PartSizes"` // Part Sizes
- PartActualSizes []int64 `json:"PartASizes,omitempty" msg:"PartASizes,omitempty"` // Part ActualSizes (compression)
- Size int64 `json:"Size" msg:"Size"` // Object version size
- ModTime int64 `json:"MTime" msg:"MTime"` // Object version modified time
- MetaSys map[string][]byte `json:"MetaSys,omitempty" msg:"MetaSys,omitempty"` // Object version internal metadata
- MetaUser map[string]string `json:"MetaUsr,omitempty" msg:"MetaUsr,omitempty"` // Object version metadata set by user
+ VersionID [16]byte `json:"ID" msg:"ID"` // Version ID
+ DataDir [16]byte `json:"DDir" msg:"DDir"` // Data dir ID
+ ErasureAlgorithm ErasureAlgo `json:"EcAlgo" msg:"EcAlgo"` // Erasure coding algorithm
+ ErasureM int `json:"EcM" msg:"EcM"` // Erasure data blocks
+ ErasureN int `json:"EcN" msg:"EcN"` // Erasure parity blocks
+ ErasureBlockSize int64 `json:"EcBSize" msg:"EcBSize"` // Erasure block size
+ ErasureIndex int `json:"EcIndex" msg:"EcIndex"` // Erasure disk index
+ ErasureDist []uint8 `json:"EcDist" msg:"EcDist"` // Erasure distribution
+ BitrotChecksumAlgo ChecksumAlgo `json:"CSumAlgo" msg:"CSumAlgo"` // Bitrot checksum algo
+ PartNumbers []int `json:"PartNums" msg:"PartNums"` // Part Numbers
+ PartETags []string `json:"PartETags" msg:"PartETags,allownil"` // Part ETags
+ PartSizes []int64 `json:"PartSizes" msg:"PartSizes"` // Part Sizes
+ PartActualSizes []int64 `json:"PartASizes,omitempty" msg:"PartASizes,allownil"` // Part ActualSizes (compression)
+ Size int64 `json:"Size" msg:"Size"` // Object version size
+ ModTime int64 `json:"MTime" msg:"MTime"` // Object version modified time
+ MetaSys map[string][]byte `json:"MetaSys,omitempty" msg:"MetaSys,allownil"` // Object version internal metadata
+ MetaUser map[string]string `json:"MetaUsr,omitempty" msg:"MetaUsr,allownil"` // Object version metadata set by user
}
-// xlMetaV2Version describes the jouranal entry, Type defines
+// xlMetaV2Version describes the journal entry, Type defines
// the current journal entry type other types might be nil based
// on what Type field carries, it is imperative for the caller
// to verify which journal type first before accessing rest of the fields.
@@ -217,6 +191,84 @@ type xlMetaV2Version struct {
DeleteMarker *xlMetaV2DeleteMarker `json:"DelObj,omitempty" msg:"DelObj,omitempty"`
}
+// xlFlags contains flags on the object.
+// This can be extended up to 64 bits without breaking compatibility.
+type xlFlags uint8
+
+const (
+ xlFlagFreeVersion xlFlags = 1 << iota
+ xlFlagUsesDataDir
+ xlFlagInlineData
+)
+
+func (x xlFlags) String() string {
+ var s strings.Builder
+ if x&xlFlagFreeVersion != 0 {
+ s.WriteString("FreeVersion")
+ }
+ if x&xlFlagUsesDataDir != 0 {
+ if s.Len() > 0 {
+ s.WriteByte(',')
+ }
+ s.WriteString("UsesDD")
+ }
+ if x&xlFlagInlineData != 0 {
+ if s.Len() > 0 {
+ s.WriteByte(',')
+ }
+ s.WriteString("Inline")
+ }
+ return s.String()
+}
+
+// checkXL2V1 will check if the metadata has correct header and is a known major version.
+// The remaining payload and versions are returned.
+func checkXL2V1(buf []byte) (payload []byte, major, minor uint16, err error) {
+ if len(buf) <= 8 {
+ return payload, 0, 0, fmt.Errorf("xlMeta: no data")
+ }
+
+ if !bytes.Equal(buf[:4], xlHeader[:]) {
+ return payload, 0, 0, fmt.Errorf("xlMeta: unknown XLv2 header, expected %v, got %v", xlHeader[:4], buf[:4])
+ }
+
+ if bytes.Equal(buf[4:8], []byte("1 ")) {
+ // Set as 1,0.
+ major, minor = 1, 0
+ } else {
+ major, minor = binary.LittleEndian.Uint16(buf[4:6]), binary.LittleEndian.Uint16(buf[6:8])
+ }
+ if major > xlVersionMajor {
+ return buf[8:], major, minor, fmt.Errorf("xlMeta: unknown major version %d found", major)
+ }
+
+ return buf[8:], major, minor, nil
+}
+
+func isXL2V1Format(buf []byte) bool {
+ _, _, _, err := checkXL2V1(buf)
+ return err == nil
+}
+
+//msgp:tuple xlMetaV2VersionHeader
+type xlMetaV2VersionHeader struct {
+ VersionID [16]byte
+ ModTime int64
+ Signature [4]byte
+ Type VersionType
+ Flags xlFlags
+}
+
+func (x xlMetaV2VersionHeader) String() string {
+ return fmt.Sprintf("Type: %s, VersionID: %s, Signature: %s, ModTime: %s, Flags: %s",
+ x.Type.String(),
+ hex.EncodeToString(x.VersionID[:]),
+ hex.EncodeToString(x.Signature[:]),
+ time.Unix(0, x.ModTime),
+ x.Flags.String(),
+ )
+}
+
// Valid xl meta xlMetaV2Version is valid
func (j xlMetaV2Version) Valid() bool {
if !j.Type.valid() {
@@ -239,6 +291,61 @@ func (j xlMetaV2Version) Valid() bool {
return false
}
+// header will return a shallow header of the version.
+func (j *xlMetaV2Version) header() xlMetaV2VersionHeader {
+ var flags xlFlags
+ if j.FreeVersion() {
+ flags |= xlFlagFreeVersion
+ }
+ if j.Type == ObjectType && j.ObjectV2.UsesDataDir() {
+ flags |= xlFlagUsesDataDir
+ }
+ if j.Type == ObjectType && j.ObjectV2.InlineData() {
+ flags |= xlFlagInlineData
+ }
+ return xlMetaV2VersionHeader{
+ VersionID: j.getVersionID(),
+ ModTime: j.getModTime().UnixNano(),
+ Signature: j.getSignature(),
+ Type: j.Type,
+ Flags: flags,
+ }
+}
+
+// FreeVersion returns true if x represents a free-version, false otherwise.
+func (x xlMetaV2VersionHeader) FreeVersion() bool {
+ return x.Flags&xlFlagFreeVersion != 0
+}
+
+// UsesDataDir returns true if this object version uses its data directory for
+// its contents and false otherwise.
+func (x xlMetaV2VersionHeader) UsesDataDir() bool {
+ return x.Flags&xlFlagUsesDataDir != 0
+}
+
+// InlineData returns whether inline data has been set.
+// Note that false does not mean there is no inline data,
+// only that it is unlikely.
+func (x xlMetaV2VersionHeader) InlineData() bool {
+ return x.Flags&xlFlagInlineData != 0
+}
+
+// signatureErr is a signature returned when an error occurs.
+var signatureErr = [4]byte{'e', 'r', 'r', 0}
+
+// getSignature will return a signature that is expected to be the same across all disks.
+func (j xlMetaV2Version) getSignature() [4]byte {
+ switch j.Type {
+ case ObjectType:
+ return j.ObjectV2.Signature()
+ case DeleteType:
+ return j.DeleteMarker.Signature()
+ case LegacyType:
+ return j.ObjectV1.Signature()
+ }
+ return signatureErr
+}
+
// getModTime will return the ModTime of the underlying version.
func (j xlMetaV2Version) getModTime() time.Time {
switch j.Type {
@@ -252,705 +359,35 @@ func (j xlMetaV2Version) getModTime() time.Time {
return time.Time{}
}
-// xlMetaV2 - object meta structure defines the format and list of
-// the journals for the object.
-type xlMetaV2 struct {
- Versions []xlMetaV2Version `json:"Versions" msg:"Versions"`
-
- // data will contain raw data if any.
- // data will be one or more versions indexed by versionID.
- // To remove all data set to nil.
- data xlMetaInlineData `msg:"-"`
+// getVersionID will return the versionID of the underlying version.
+func (j xlMetaV2Version) getVersionID() [16]byte {
+ switch j.Type {
+ case ObjectType:
+ return j.ObjectV2.VersionID
+ case DeleteType:
+ return j.DeleteMarker.VersionID
+ case LegacyType:
+ return [16]byte{}
+ }
+ return [16]byte{}
}
-// xlMetaInlineData is serialized data in [string][]byte pairs.
-//
-//msgp:ignore xlMetaInlineData
-type xlMetaInlineData []byte
-
-// xlMetaInlineDataVer indicates the version of the inline data structure.
-const xlMetaInlineDataVer = 1
-
-// versionOK returns whether the version is ok.
-func (x xlMetaInlineData) versionOK() bool {
- if len(x) == 0 {
- return true
+func (j *xlMetaV2Version) ToFileInfo(volume, path string) (FileInfo, error) {
+ switch j.Type {
+ case ObjectType:
+ return j.ObjectV2.ToFileInfo(volume, path)
+ case DeleteType:
+ return j.DeleteMarker.ToFileInfo(volume, path)
+ case LegacyType:
+ return j.ObjectV1.ToFileInfo(volume, path)
}
- return x[0] > 0 && x[0] <= xlMetaInlineDataVer
+ return FileInfo{}, errFileNotFound
}
-// afterVersion returns the payload after the version, if any.
-func (x xlMetaInlineData) afterVersion() []byte {
- if len(x) == 0 {
- return x
- }
- return x[1:]
-}
-
-// find the data with key s.
-// Returns nil if not for or an error occurs.
-func (x xlMetaInlineData) find(key string) []byte {
- if len(x) == 0 || !x.versionOK() {
- return nil
- }
- sz, buf, err := msgp.ReadMapHeaderBytes(x.afterVersion())
- if err != nil || sz == 0 {
- return nil
- }
- for i := uint32(0); i < sz; i++ {
- var found []byte
- found, buf, err = msgp.ReadMapKeyZC(buf)
- if err != nil || sz == 0 {
- return nil
- }
- if string(found) == key {
- val, _, _ := msgp.ReadBytesZC(buf)
- return val
- }
- // Skip it
- _, buf, err = msgp.ReadBytesZC(buf)
- if err != nil {
- return nil
- }
- }
- return nil
-}
-
-// validate checks if the data is valid.
-// It does not check integrity of the stored data.
-func (x xlMetaInlineData) validate() error {
- if len(x) == 0 {
- return nil
- }
-
- if !x.versionOK() {
- return fmt.Errorf("xlMetaInlineData: unknown version 0x%x", x[0])
- }
-
- sz, buf, err := msgp.ReadMapHeaderBytes(x.afterVersion())
- if err != nil {
- return fmt.Errorf("xlMetaInlineData: %w", err)
- }
-
- for i := uint32(0); i < sz; i++ {
- var key []byte
- key, buf, err = msgp.ReadMapKeyZC(buf)
- if err != nil {
- return fmt.Errorf("xlMetaInlineData: %w", err)
- }
- if len(key) == 0 {
- return fmt.Errorf("xlMetaInlineData: key %d is length 0", i)
- }
- _, buf, err = msgp.ReadBytesZC(buf)
- if err != nil {
- return fmt.Errorf("xlMetaInlineData: %w", err)
- }
- }
-
- return nil
-}
-
-// repair will copy all seemingly valid data entries from a corrupted set.
-// This does not ensure that data is correct, but will allow all operations to complete.
-func (x *xlMetaInlineData) repair() {
- data := *x
- if len(data) == 0 {
- return
- }
-
- if !data.versionOK() {
- *x = nil
- return
- }
-
- sz, buf, err := msgp.ReadMapHeaderBytes(data.afterVersion())
- if err != nil {
- *x = nil
- return
- }
-
- // Remove all current data
- keys := make([][]byte, 0, sz)
- vals := make([][]byte, 0, sz)
- for i := uint32(0); i < sz; i++ {
- var key, val []byte
- key, buf, err = msgp.ReadMapKeyZC(buf)
- if err != nil {
- break
- }
- if len(key) == 0 {
- break
- }
- val, buf, err = msgp.ReadBytesZC(buf)
- if err != nil {
- break
- }
- keys = append(keys, key)
- vals = append(vals, val)
- }
- x.serialize(-1, keys, vals)
-}
-
-// validate checks if the data is valid.
-// It does not check integrity of the stored data.
-func (x xlMetaInlineData) list() ([]string, error) {
- if len(x) == 0 {
- return nil, nil
- }
- if !x.versionOK() {
- return nil, errors.New("xlMetaInlineData: unknown version")
- }
-
- sz, buf, err := msgp.ReadMapHeaderBytes(x.afterVersion())
- if err != nil {
- return nil, err
- }
- keys := make([]string, 0, sz)
- for i := uint32(0); i < sz; i++ {
- var key []byte
- key, buf, err = msgp.ReadMapKeyZC(buf)
- if err != nil {
- return keys, err
- }
- if len(key) == 0 {
- return keys, fmt.Errorf("xlMetaInlineData: key %d is length 0", i)
- }
- keys = append(keys, string(key))
- // Skip data...
- _, buf, err = msgp.ReadBytesZC(buf)
- if err != nil {
- return keys, err
- }
- }
- return keys, nil
-}
-
-// serialize will serialize the provided keys and values.
-// The function will panic if keys/value slices aren't of equal length.
-// Payload size can give an indication of expected payload size.
-// If plSize is <= 0 it will be calculated.
-func (x *xlMetaInlineData) serialize(plSize int, keys [][]byte, vals [][]byte) {
- if len(keys) != len(vals) {
- panic(fmt.Errorf("xlMetaInlineData.serialize: keys/value number mismatch"))
- }
- if len(keys) == 0 {
- *x = nil
- return
- }
- if plSize <= 0 {
- plSize = 1 + msgp.MapHeaderSize
- for i := range keys {
- plSize += len(keys[i]) + len(vals[i]) + msgp.StringPrefixSize + msgp.ArrayHeaderSize
- }
- }
- payload := make([]byte, 1, plSize)
- payload[0] = xlMetaInlineDataVer
- payload = msgp.AppendMapHeader(payload, uint32(len(keys)))
- for i := range keys {
- payload = msgp.AppendStringFromBytes(payload, keys[i])
- payload = msgp.AppendBytes(payload, vals[i])
- }
- *x = payload
-}
-
-// entries returns the number of entries in the data.
-func (x xlMetaInlineData) entries() int {
- if len(x) == 0 || !x.versionOK() {
- return 0
- }
- sz, _, _ := msgp.ReadMapHeaderBytes(x.afterVersion())
- return int(sz)
-}
-
-// replace will add or replace a key/value pair.
-func (x *xlMetaInlineData) replace(key string, value []byte) {
- in := x.afterVersion()
- sz, buf, _ := msgp.ReadMapHeaderBytes(in)
- keys := make([][]byte, 0, sz+1)
- vals := make([][]byte, 0, sz+1)
-
- // Version plus header...
- plSize := 1 + msgp.MapHeaderSize
- replaced := false
- for i := uint32(0); i < sz; i++ {
- var found, foundVal []byte
- var err error
- found, buf, err = msgp.ReadMapKeyZC(buf)
- if err != nil {
- break
- }
- foundVal, buf, err = msgp.ReadBytesZC(buf)
- if err != nil {
- break
- }
- plSize += len(found) + msgp.StringPrefixSize + msgp.ArrayHeaderSize
- keys = append(keys, found)
- if string(found) == key {
- vals = append(vals, value)
- plSize += len(value)
- replaced = true
- } else {
- vals = append(vals, foundVal)
- plSize += len(foundVal)
- }
- }
-
- // Add one more.
- if !replaced {
- keys = append(keys, []byte(key))
- vals = append(vals, value)
- plSize += len(key) + len(value) + msgp.StringPrefixSize + msgp.ArrayHeaderSize
- }
-
- // Reserialize...
- x.serialize(plSize, keys, vals)
-}
-
-// rename will rename a key.
-// Returns whether the key was found.
-func (x *xlMetaInlineData) rename(oldKey, newKey string) bool {
- in := x.afterVersion()
- sz, buf, _ := msgp.ReadMapHeaderBytes(in)
- keys := make([][]byte, 0, sz)
- vals := make([][]byte, 0, sz)
-
- // Version plus header...
- plSize := 1 + msgp.MapHeaderSize
- found := false
- for i := uint32(0); i < sz; i++ {
- var foundKey, foundVal []byte
- var err error
- foundKey, buf, err = msgp.ReadMapKeyZC(buf)
- if err != nil {
- break
- }
- foundVal, buf, err = msgp.ReadBytesZC(buf)
- if err != nil {
- break
- }
- plSize += len(foundVal) + msgp.StringPrefixSize + msgp.ArrayHeaderSize
- vals = append(vals, foundVal)
- if string(foundKey) != oldKey {
- keys = append(keys, foundKey)
- plSize += len(foundKey)
- } else {
- keys = append(keys, []byte(newKey))
- plSize += len(newKey)
- found = true
- }
- }
- // If not found, just return.
- if !found {
- return false
- }
-
- // Reserialize...
- x.serialize(plSize, keys, vals)
- return true
-}
-
-// remove will remove one or more keys.
-// Returns true if any key was found.
-func (x *xlMetaInlineData) remove(keys ...string) bool {
- in := x.afterVersion()
- sz, buf, _ := msgp.ReadMapHeaderBytes(in)
- newKeys := make([][]byte, 0, sz)
- newVals := make([][]byte, 0, sz)
- var removeKey func(s []byte) bool
-
- // Copy if big number of compares...
- if len(keys) > 5 && sz > 5 {
- mKeys := make(map[string]struct{}, len(keys))
- for _, key := range keys {
- mKeys[key] = struct{}{}
- }
- removeKey = func(s []byte) bool {
- _, ok := mKeys[string(s)]
- return ok
- }
- } else {
- removeKey = func(s []byte) bool {
- for _, key := range keys {
- if key == string(s) {
- return true
- }
- }
- return false
- }
- }
-
- // Version plus header...
- plSize := 1 + msgp.MapHeaderSize
- found := false
- for i := uint32(0); i < sz; i++ {
- var foundKey, foundVal []byte
- var err error
- foundKey, buf, err = msgp.ReadMapKeyZC(buf)
- if err != nil {
- break
- }
- foundVal, buf, err = msgp.ReadBytesZC(buf)
- if err != nil {
- break
- }
- if !removeKey(foundKey) {
- plSize += msgp.StringPrefixSize + msgp.ArrayHeaderSize + len(foundKey) + len(foundVal)
- newKeys = append(newKeys, foundKey)
- newVals = append(newVals, foundVal)
- } else {
- found = true
- }
- }
- // If not found, just return.
- if !found {
- return false
- }
- // If none left...
- if len(newKeys) == 0 {
- *x = nil
- return true
- }
-
- // Reserialize...
- x.serialize(plSize, newKeys, newVals)
- return true
-}
-
-// xlMetaV2TrimData will trim any data from the metadata without unmarshalling it.
-// If any error occurs the unmodified data is returned.
-func xlMetaV2TrimData(buf []byte) []byte {
- metaBuf, min, maj, err := checkXL2V1(buf)
- if err != nil {
- return buf
- }
- if maj == 1 && min < 1 {
- // First version to carry data.
- return buf
- }
- // Skip header
- _, metaBuf, err = msgp.ReadBytesZC(metaBuf)
- if err != nil {
- logger.LogIf(GlobalContext, err)
- return buf
- }
- // Skip CRC
- if maj > 1 || min >= 2 {
- _, metaBuf, err = msgp.ReadUint32Bytes(metaBuf)
- logger.LogIf(GlobalContext, err)
- }
- // = input - current pos
- ends := len(buf) - len(metaBuf)
- if ends > len(buf) {
- return buf
- }
-
- return buf[:ends]
-}
-
-// AddLegacy adds a legacy version, is only called when no prior
-// versions exist, safe to use it by only one function in xl-storage(RenameData)
-func (z *xlMetaV2) AddLegacy(m *xlMetaV1Object) error {
- if !m.valid() {
- return errFileCorrupt
- }
- m.VersionID = nullVersionID
- m.DataDir = legacyDataDir
- z.Versions = []xlMetaV2Version{
- {
- Type: LegacyType,
- ObjectV1: m,
- },
- }
- return nil
-}
-
-// Load unmarshal and load the entire message pack.
-// Note that references to the incoming buffer may be kept as data.
-func (z *xlMetaV2) Load(buf []byte) error {
- buf, major, minor, err := checkXL2V1(buf)
- if err != nil {
- return fmt.Errorf("xlMetaV2.Load %w", err)
- }
- switch major {
- case 1:
- switch minor {
- case 0:
- _, err = z.UnmarshalMsg(buf)
- if err != nil {
- return fmt.Errorf("xlMetaV2.Load %w", err)
- }
- return nil
- case 1, 2:
- v, buf, err := msgp.ReadBytesZC(buf)
- if err != nil {
- return fmt.Errorf("xlMetaV2.Load version(%d), bufLen(%d) %w", minor, len(buf), err)
- }
- if minor >= 2 {
- if crc, nbuf, err := msgp.ReadUint32Bytes(buf); err == nil {
- // Read metadata CRC (added in v2)
- buf = nbuf
- if got := uint32(xxhash.Sum64(v)); got != crc {
- return fmt.Errorf("xlMetaV2.Load version(%d), CRC mismatch, want 0x%x, got 0x%x", minor, crc, got)
- }
- } else {
- return fmt.Errorf("xlMetaV2.Load version(%d), loading CRC: %w", minor, err)
- }
- }
-
- if _, err = z.UnmarshalMsg(v); err != nil {
- return fmt.Errorf("xlMetaV2.Load version(%d), vLen(%d), %w", minor, len(v), err)
- }
- // Add remaining data.
- z.data = buf
- if err = z.data.validate(); err != nil {
- z.data.repair()
- logger.Info("xlMetaV2.Load: data validation failed: %v. %d entries after repair", err, z.data.entries())
- }
- default:
- return errors.New("unknown minor metadata version")
- }
- default:
- return errors.New("unknown major metadata version")
- }
- return nil
-}
-
-// AppendTo will marshal the data in z and append it to the provided slice.
-func (z *xlMetaV2) AppendTo(dst []byte) ([]byte, error) {
- sz := len(xlHeader) + len(xlVersionCurrent) + msgp.ArrayHeaderSize + z.Msgsize() + len(z.data) + len(dst) + msgp.Uint32Size
- if cap(dst) < sz {
- buf := make([]byte, len(dst), sz)
- copy(buf, dst)
- dst = buf
- }
- if err := z.data.validate(); err != nil {
- return nil, err
- }
-
- dst = append(dst, xlHeader[:]...)
- dst = append(dst, xlVersionCurrent[:]...)
- // Add "bin 32" type header to always have enough space.
- // We will fill out the correct size when we know it.
- dst = append(dst, 0xc6, 0, 0, 0, 0)
- dataOffset := len(dst)
- dst, err := z.MarshalMsg(dst)
- if err != nil {
- return nil, err
- }
-
- // Update size...
- binary.BigEndian.PutUint32(dst[dataOffset-4:dataOffset], uint32(len(dst)-dataOffset))
-
- // Add CRC of metadata.
- dst = msgp.AppendUint32(dst, uint32(xxhash.Sum64(dst[dataOffset:])))
- return append(dst, z.data...), nil
-}
-
-// UpdateObjectVersion updates metadata and modTime for a given
-// versionID, NOTE: versionID must be valid and should exist -
-// and must not be a DeleteMarker or legacy object, if no
-// versionID is specified 'null' versionID is updated instead.
-//
-// It is callers responsibility to set correct versionID, this
-// function shouldn't be further extended to update immutable
-// values such as ErasureInfo, ChecksumInfo.
-//
-// Metadata is only updated to new values, existing values
-// stay as is, if you wish to update all values you should
-// update all metadata freshly before calling this function
-// in-case you wish to clear existing metadata.
-func (z *xlMetaV2) UpdateObjectVersion(fi FileInfo) error {
- if fi.VersionID == "" {
- // this means versioning is not yet
- // enabled or suspend i.e all versions
- // are basically default value i.e "null"
- fi.VersionID = nullVersionID
- }
-
- var uv uuid.UUID
- var err error
- if fi.VersionID != "" && fi.VersionID != nullVersionID {
- uv, err = uuid.Parse(fi.VersionID)
- if err != nil {
- return err
- }
- }
-
- for i, version := range z.Versions {
- if !version.Valid() {
- return errFileCorrupt
- }
- switch version.Type {
- case LegacyType:
- if version.ObjectV1.VersionID == fi.VersionID {
- return errMethodNotAllowed
- }
- case ObjectType:
- if version.ObjectV2.VersionID == uv {
- for k, v := range fi.Metadata {
- if strings.HasPrefix(strings.ToLower(k), ReservedMetadataPrefixLower) {
- z.Versions[i].ObjectV2.MetaSys[k] = []byte(v)
- } else {
- z.Versions[i].ObjectV2.MetaUser[k] = v
- }
- }
- if !fi.ModTime.IsZero() {
- z.Versions[i].ObjectV2.ModTime = fi.ModTime.UnixNano()
- }
- return nil
- }
- case DeleteType:
- if version.DeleteMarker.VersionID == uv {
- return errMethodNotAllowed
- }
- }
- }
-
- return errFileVersionNotFound
-}
-
-// AddVersion adds a new version
-func (z *xlMetaV2) AddVersion(fi FileInfo) error {
- if fi.VersionID == "" {
- // this means versioning is not yet
- // enabled or suspend i.e all versions
- // are basically default value i.e "null"
- fi.VersionID = nullVersionID
- }
-
- var uv uuid.UUID
- var err error
- if fi.VersionID != "" && fi.VersionID != nullVersionID {
- uv, err = uuid.Parse(fi.VersionID)
- if err != nil {
- return err
- }
- }
-
- var dd uuid.UUID
- if fi.DataDir != "" {
- dd, err = uuid.Parse(fi.DataDir)
- if err != nil {
- return err
- }
- }
-
- ventry := xlMetaV2Version{}
-
- if fi.Deleted {
- ventry.Type = DeleteType
- ventry.DeleteMarker = &xlMetaV2DeleteMarker{
- VersionID: uv,
- ModTime: fi.ModTime.UnixNano(),
- MetaSys: make(map[string][]byte),
- }
- } else {
- ventry.Type = ObjectType
- ventry.ObjectV2 = &xlMetaV2Object{
- VersionID: uv,
- DataDir: dd,
- Size: fi.Size,
- ModTime: fi.ModTime.UnixNano(),
- ErasureAlgorithm: ReedSolomon,
- ErasureM: fi.Erasure.DataBlocks,
- ErasureN: fi.Erasure.ParityBlocks,
- ErasureBlockSize: fi.Erasure.BlockSize,
- ErasureIndex: fi.Erasure.Index,
- BitrotChecksumAlgo: HighwayHash,
- ErasureDist: make([]uint8, len(fi.Erasure.Distribution)),
- PartNumbers: make([]int, len(fi.Parts)),
- PartETags: make([]string, len(fi.Parts)),
- PartSizes: make([]int64, len(fi.Parts)),
- PartActualSizes: make([]int64, len(fi.Parts)),
- MetaSys: make(map[string][]byte),
- MetaUser: make(map[string]string, len(fi.Metadata)),
- }
-
- for i := range fi.Erasure.Distribution {
- ventry.ObjectV2.ErasureDist[i] = uint8(fi.Erasure.Distribution[i])
- }
-
- for i := range fi.Parts {
- ventry.ObjectV2.PartSizes[i] = fi.Parts[i].Size
- if fi.Parts[i].ETag != "" {
- ventry.ObjectV2.PartETags[i] = fi.Parts[i].ETag
- }
- ventry.ObjectV2.PartNumbers[i] = fi.Parts[i].Number
- ventry.ObjectV2.PartActualSizes[i] = fi.Parts[i].ActualSize
- }
-
- tierFVIDKey := ReservedMetadataPrefixLower + tierFVID
- tierFVMarkerKey := ReservedMetadataPrefixLower + tierFVMarker
- for k, v := range fi.Metadata {
- if strings.HasPrefix(strings.ToLower(k), ReservedMetadataPrefixLower) {
- // Skip tierFVID, tierFVMarker keys; it's used
- // only for creating free-version.
- switch k {
- case tierFVIDKey, tierFVMarkerKey:
- continue
- }
-
- ventry.ObjectV2.MetaSys[k] = []byte(v)
- } else {
- ventry.ObjectV2.MetaUser[k] = v
- }
- }
-
- // If asked to save data.
- if len(fi.Data) > 0 || fi.Size == 0 {
- z.data.replace(fi.VersionID, fi.Data)
- }
-
- if fi.TransitionStatus != "" {
- ventry.ObjectV2.MetaSys[ReservedMetadataPrefixLower+TransitionStatus] = []byte(fi.TransitionStatus)
- }
- if fi.TransitionedObjName != "" {
- ventry.ObjectV2.MetaSys[ReservedMetadataPrefixLower+TransitionedObjectName] = []byte(fi.TransitionedObjName)
- }
- if fi.TransitionVersionID != "" {
- ventry.ObjectV2.MetaSys[ReservedMetadataPrefixLower+TransitionedVersionID] = []byte(fi.TransitionVersionID)
- }
- if fi.TransitionTier != "" {
- ventry.ObjectV2.MetaSys[ReservedMetadataPrefixLower+TransitionTier] = []byte(fi.TransitionTier)
- }
- }
-
- if !ventry.Valid() {
- return errors.New("internal error: invalid version entry generated")
- }
-
- for i, version := range z.Versions {
- if !version.Valid() {
- return errFileCorrupt
- }
- switch version.Type {
- case LegacyType:
- // This would convert legacy type into new ObjectType
- // this means that we are basically purging the `null`
- // version of the object.
- if version.ObjectV1.VersionID == fi.VersionID {
- z.Versions[i] = ventry
- return nil
- }
- case ObjectType:
- if version.ObjectV2.VersionID == uv {
- z.Versions[i] = ventry
- return nil
- }
- case DeleteType:
- // Allowing delete marker to replaced with an proper
- // object data type as well, this is not S3 complaint
- // behavior but kept here for future flexibility.
- if version.DeleteMarker.VersionID == uv {
- z.Versions[i] = ventry
- return nil
- }
- }
- }
-
- z.Versions = append(z.Versions, ventry)
- return nil
-}
+const (
+ xlHeaderVersion = 2
+ xlMetaVersion = 1
+)
func (j xlMetaV2DeleteMarker) ToFileInfo(volume, path string) (FileInfo, error) {
versionID := ""
@@ -978,6 +415,25 @@ func (j xlMetaV2DeleteMarker) ToFileInfo(volume, path string) (FileInfo, error)
return fi, nil
}
+// Signature will return a signature that is expected to be the same across all disks.
+func (j *xlMetaV2DeleteMarker) Signature() [4]byte {
+ // Shallow copy
+ c := *j
+
+ // Marshal metadata
+ crc := hashDeterministicBytes(c.MetaSys)
+ c.MetaSys = nil
+ if bts, err := c.MarshalMsg(metaDataPoolGet()); err == nil {
+ crc ^= xxhash.Sum64(bts)
+ metaDataPoolPut(bts)
+ }
+
+ // Combine upper and lower part
+ var tmp [4]byte
+ binary.LittleEndian.PutUint32(tmp[:], uint32(crc^(crc>>32)))
+ return tmp
+}
+
// UsesDataDir returns true if this object version uses its data directory for
// its contents and false otherwise.
func (j xlMetaV2Object) UsesDataDir() bool {
@@ -990,6 +446,14 @@ func (j xlMetaV2Object) UsesDataDir() bool {
return isRestoredObjectOnDisk(j.MetaUser)
}
+// InlineData returns whether inline data has been set.
+// Note that false does not mean there is no inline data,
+// only that it is unlikely.
+func (j xlMetaV2Object) InlineData() bool {
+ _, ok := j.MetaSys[ReservedMetadataPrefixLower+"inline-data"]
+ return ok
+}
+
func (j *xlMetaV2Object) SetTransition(fi FileInfo) {
j.MetaSys[ReservedMetadataPrefixLower+TransitionStatus] = []byte(fi.TransitionStatus)
j.MetaSys[ReservedMetadataPrefixLower+TransitionedObjectName] = []byte(fi.TransitionedObjName)
@@ -1003,6 +467,40 @@ func (j *xlMetaV2Object) RemoveRestoreHdrs() {
delete(j.MetaUser, xhttp.AmzRestoreRequestDate)
}
+// Signature will return a signature that is expected to be the same across all disks.
+func (j *xlMetaV2Object) Signature() [4]byte {
+ // Shallow copy
+ c := *j
+ // Zero fields that will vary across disks
+ c.ErasureIndex = 0
+
+ // Nil 0 size allownil, so we don't differentiate between nil and 0 len.
+ if len(c.PartETags) == 0 {
+ c.PartETags = nil
+ }
+ if len(c.PartActualSizes) == 0 {
+ c.PartActualSizes = nil
+ }
+
+ // Get a 64 bit CRC
+ crc := hashDeterministicString(c.MetaUser)
+ crc ^= hashDeterministicBytes(c.MetaSys)
+
+ // Nil fields.
+ c.MetaSys = nil
+ c.MetaUser = nil
+
+ if bts, err := c.MarshalMsg(metaDataPoolGet()); err == nil {
+ crc ^= xxhash.Sum64(bts)
+ metaDataPoolPut(bts)
+ }
+
+ // Combine upper and lower part
+ var tmp [4]byte
+ binary.LittleEndian.PutUint32(tmp[:], uint32(crc^(crc>>32)))
+ return tmp
+}
+
func (j xlMetaV2Object) ToFileInfo(volume, path string) (FileInfo, error) {
versionID := ""
var uv uuid.UUID
@@ -1021,7 +519,9 @@ func (j xlMetaV2Object) ToFileInfo(volume, path string) (FileInfo, error) {
for i := range fi.Parts {
fi.Parts[i].Number = j.PartNumbers[i]
fi.Parts[i].Size = j.PartSizes[i]
- fi.Parts[i].ETag = j.PartETags[i]
+ if len(j.PartETags) > 0 {
+ fi.Parts[i].ETag = j.PartETags[i]
+ }
fi.Parts[i].ActualSize = j.PartActualSizes[i]
}
fi.Erasure.Checksums = make([]ChecksumInfo, len(j.PartSizes))
@@ -1081,370 +581,6 @@ func (j xlMetaV2Object) ToFileInfo(volume, path string) (FileInfo, error) {
return fi, nil
}
-func (z *xlMetaV2) SharedDataDirCountStr(versionID, dataDir string) int {
- var (
- uv uuid.UUID
- ddir uuid.UUID
- err error
- )
- if versionID == nullVersionID {
- versionID = ""
- }
- if versionID != "" {
- uv, err = uuid.Parse(versionID)
- if err != nil {
- return 0
- }
- }
- ddir, err = uuid.Parse(dataDir)
- if err != nil {
- return 0
- }
- return z.SharedDataDirCount(uv, ddir)
-}
-
-func (z *xlMetaV2) SharedDataDirCount(versionID [16]byte, dataDir [16]byte) int {
- // v2 object is inlined, if it is skip dataDir share check.
- if z.data.find(uuid.UUID(versionID).String()) != nil {
- return 0
- }
- var sameDataDirCount int
- for _, version := range z.Versions {
- switch version.Type {
- case ObjectType:
- if version.ObjectV2.VersionID == versionID {
- continue
- }
- if version.ObjectV2.DataDir != dataDir {
- continue
- }
- if version.ObjectV2.UsesDataDir() {
- sameDataDirCount++
- }
- }
- }
- return sameDataDirCount
-}
-
-// DeleteVersion deletes the version specified by version id.
-// returns to the caller which dataDir to delete, also
-// indicates if this is the last version.
-func (z *xlMetaV2) DeleteVersion(fi FileInfo) (string, bool, error) {
- // This is a situation where versionId is explicitly
- // specified as "null", as we do not save "null"
- // string it is considered empty. But empty also
- // means the version which matches will be purged.
- if fi.VersionID == nullVersionID {
- fi.VersionID = ""
- }
-
- var uv uuid.UUID
- var err error
- if fi.VersionID != "" {
- uv, err = uuid.Parse(fi.VersionID)
- if err != nil {
- return "", false, errFileVersionNotFound
- }
- }
-
- var ventry xlMetaV2Version
- if fi.Deleted {
- ventry = xlMetaV2Version{
- Type: DeleteType,
- DeleteMarker: &xlMetaV2DeleteMarker{
- VersionID: uv,
- ModTime: fi.ModTime.UnixNano(),
- MetaSys: make(map[string][]byte),
- },
- }
- if !ventry.Valid() {
- return "", false, errors.New("internal error: invalid version entry generated")
- }
- }
- updateVersion := false
- if fi.VersionPurgeStatus().Empty() && (fi.DeleteMarkerReplicationStatus() == "REPLICA" || fi.DeleteMarkerReplicationStatus().Empty()) {
- updateVersion = fi.MarkDeleted
- } else {
- // for replication scenario
- if fi.Deleted && fi.VersionPurgeStatus() != Complete {
- if !fi.VersionPurgeStatus().Empty() || fi.DeleteMarkerReplicationStatus().Empty() {
- updateVersion = true
- }
- }
- // object or delete-marker versioned delete is not complete
- if !fi.VersionPurgeStatus().Empty() && fi.VersionPurgeStatus() != Complete {
- updateVersion = true
- }
- }
-
- if fi.Deleted {
- if !fi.DeleteMarkerReplicationStatus().Empty() {
- switch fi.DeleteMarkerReplicationStatus() {
- case replication.Replica:
- ventry.DeleteMarker.MetaSys[ReservedMetadataPrefixLower+ReplicaStatus] = []byte(string(fi.ReplicationState.ReplicaStatus))
- ventry.DeleteMarker.MetaSys[ReservedMetadataPrefixLower+ReplicaTimestamp] = []byte(fi.ReplicationState.ReplicaTimeStamp.Format(http.TimeFormat))
- default:
- ventry.DeleteMarker.MetaSys[ReservedMetadataPrefixLower+ReplicationStatus] = []byte(fi.ReplicationState.ReplicationStatusInternal)
- ventry.DeleteMarker.MetaSys[ReservedMetadataPrefixLower+ReplicationTimestamp] = []byte(fi.ReplicationState.ReplicationTimeStamp.Format(http.TimeFormat))
- }
- }
- if !fi.VersionPurgeStatus().Empty() {
- ventry.DeleteMarker.MetaSys[VersionPurgeStatusKey] = []byte(fi.ReplicationState.VersionPurgeStatusInternal)
- }
- for k, v := range fi.ReplicationState.ResetStatusesMap {
- ventry.DeleteMarker.MetaSys[k] = []byte(v)
- }
- }
-
- for i, version := range z.Versions {
- if !version.Valid() {
- return "", false, errFileCorrupt
- }
- switch version.Type {
- case LegacyType:
- if version.ObjectV1.VersionID == fi.VersionID {
- z.Versions = append(z.Versions[:i], z.Versions[i+1:]...)
- if fi.Deleted {
- z.Versions = append(z.Versions, ventry)
- }
- return version.ObjectV1.DataDir, len(z.Versions) == 0, nil
- }
- case DeleteType:
- if version.DeleteMarker.VersionID == uv {
- if updateVersion {
- if len(z.Versions[i].DeleteMarker.MetaSys) == 0 {
- z.Versions[i].DeleteMarker.MetaSys = make(map[string][]byte)
- }
- if !fi.DeleteMarkerReplicationStatus().Empty() {
- switch fi.DeleteMarkerReplicationStatus() {
- case replication.Replica:
- z.Versions[i].DeleteMarker.MetaSys[ReservedMetadataPrefixLower+ReplicaStatus] = []byte(string(fi.ReplicationState.ReplicaStatus))
- z.Versions[i].DeleteMarker.MetaSys[ReservedMetadataPrefixLower+ReplicaTimestamp] = []byte(fi.ReplicationState.ReplicaTimeStamp.Format(http.TimeFormat))
- default:
- z.Versions[i].DeleteMarker.MetaSys[ReservedMetadataPrefixLower+ReplicationStatus] = []byte(fi.ReplicationState.ReplicationStatusInternal)
- z.Versions[i].DeleteMarker.MetaSys[ReservedMetadataPrefixLower+ReplicationTimestamp] = []byte(fi.ReplicationState.ReplicationTimeStamp.Format(http.TimeFormat))
- }
- }
- if !fi.VersionPurgeStatus().Empty() {
- z.Versions[i].DeleteMarker.MetaSys[VersionPurgeStatusKey] = []byte(fi.ReplicationState.VersionPurgeStatusInternal)
- }
- for k, v := range fi.ReplicationState.ResetStatusesMap {
- z.Versions[i].DeleteMarker.MetaSys[k] = []byte(v)
- }
- } else {
- z.Versions = append(z.Versions[:i], z.Versions[i+1:]...)
- if fi.MarkDeleted && (fi.VersionPurgeStatus().Empty() || (fi.VersionPurgeStatus() != Complete)) {
- z.Versions = append(z.Versions, ventry)
- }
- }
- return "", len(z.Versions) == 0, nil
- }
- case ObjectType:
- if version.ObjectV2.VersionID == uv && updateVersion {
- z.Versions[i].ObjectV2.MetaSys[VersionPurgeStatusKey] = []byte(fi.ReplicationState.VersionPurgeStatusInternal)
- for k, v := range fi.ReplicationState.ResetStatusesMap {
- z.Versions[i].ObjectV2.MetaSys[k] = []byte(v)
- }
- return "", len(z.Versions) == 0, nil
- }
- }
- }
-
- for i, version := range z.Versions {
- if !version.Valid() {
- return "", false, errFileCorrupt
- }
- switch version.Type {
- case ObjectType:
- if version.ObjectV2.VersionID == uv {
- switch {
- case fi.ExpireRestored:
- z.Versions[i].ObjectV2.RemoveRestoreHdrs()
-
- case fi.TransitionStatus == lifecycle.TransitionComplete:
- z.Versions[i].ObjectV2.SetTransition(fi)
-
- default:
- z.Versions = append(z.Versions[:i], z.Versions[i+1:]...)
- // if uv has tiered content we add a
- // free-version to track it for
- // asynchronous deletion via scanner.
- if freeVersion, toFree := version.ObjectV2.InitFreeVersion(fi); toFree {
- z.Versions = append(z.Versions, freeVersion)
- }
- }
-
- if fi.Deleted {
- z.Versions = append(z.Versions, ventry)
- }
- if z.SharedDataDirCount(version.ObjectV2.VersionID, version.ObjectV2.DataDir) > 0 {
- // Found that another version references the same dataDir
- // we shouldn't remove it, and only remove the version instead
- return "", len(z.Versions) == 0, nil
- }
- return uuid.UUID(version.ObjectV2.DataDir).String(), len(z.Versions) == 0, nil
- }
- }
- }
-
- if fi.Deleted {
- z.Versions = append(z.Versions, ventry)
- return "", false, nil
- }
- return "", false, errFileVersionNotFound
-}
-
-// TotalSize returns the total size of all versions.
-func (z xlMetaV2) TotalSize() int64 {
- var total int64
- for i := range z.Versions {
- switch z.Versions[i].Type {
- case ObjectType:
- total += z.Versions[i].ObjectV2.Size
- case LegacyType:
- total += z.Versions[i].ObjectV1.Stat.Size
- }
- }
- return total
-}
-
-// ListVersions lists current versions, and current deleted
-// versions returns error for unexpected entries.
-// showPendingDeletes is set to true if ListVersions needs to list objects marked deleted
-// but waiting to be replicated
-func (z xlMetaV2) ListVersions(volume, path string) ([]FileInfo, time.Time, error) {
- versions := make([]FileInfo, 0, len(z.Versions))
- var err error
-
- for _, version := range z.Versions {
- if !version.Valid() {
- return nil, time.Time{}, errFileCorrupt
- }
- var fi FileInfo
- switch version.Type {
- case ObjectType:
- fi, err = version.ObjectV2.ToFileInfo(volume, path)
- case DeleteType:
- fi, err = version.DeleteMarker.ToFileInfo(volume, path)
- case LegacyType:
- fi, err = version.ObjectV1.ToFileInfo(volume, path)
- }
- if err != nil {
- return nil, time.Time{}, err
- }
- versions = append(versions, fi)
- }
-
- versionsSorter(versions).sort()
-
- for i := range versions {
- versions[i].NumVersions = len(versions)
- if i > 0 {
- versions[i].SuccessorModTime = versions[i-1].ModTime
- }
- }
-
- versions[0].IsLatest = true
- return versions, versions[0].ModTime, nil
-}
-
-// ToFileInfo converts xlMetaV2 into a common FileInfo datastructure
-// for consumption across callers.
-func (z xlMetaV2) ToFileInfo(volume, path, versionID string) (fi FileInfo, err error) {
- var uv uuid.UUID
- if versionID != "" && versionID != nullVersionID {
- uv, err = uuid.Parse(versionID)
- if err != nil {
- logger.LogIf(GlobalContext, fmt.Errorf("invalid versionID specified %s", versionID))
- return FileInfo{}, errFileVersionNotFound
- }
- }
-
- orderedVersions := make([]xlMetaV2Version, 0, len(z.Versions))
- for _, version := range z.Versions {
- if !version.Valid() {
- logger.LogIf(GlobalContext, fmt.Errorf("invalid version detected %#v", version))
- if versionID == "" {
- return FileInfo{}, errFileNotFound
- }
- return FileInfo{}, errFileVersionNotFound
-
- }
- // skip listing free-version unless explicitly requested via versionID
- if version.FreeVersion() && version.DeleteMarker.VersionID != uv {
- continue
- }
- orderedVersions = append(orderedVersions, version)
- }
-
- if len(orderedVersions) > 1 {
- sort.Slice(orderedVersions, func(i, j int) bool {
- return orderedVersions[i].getModTime().After(orderedVersions[j].getModTime())
- })
- }
-
- if versionID == "" {
- if len(orderedVersions) >= 1 {
- switch orderedVersions[0].Type {
- case ObjectType:
- fi, err = orderedVersions[0].ObjectV2.ToFileInfo(volume, path)
- case DeleteType:
- fi, err = orderedVersions[0].DeleteMarker.ToFileInfo(volume, path)
- case LegacyType:
- fi, err = orderedVersions[0].ObjectV1.ToFileInfo(volume, path)
- }
- fi.IsLatest = true
- fi.NumVersions = len(orderedVersions)
- return fi, err
- }
- return FileInfo{}, errFileNotFound
- }
-
- var foundIndex = -1
-
- for i := range orderedVersions {
- switch orderedVersions[i].Type {
- case ObjectType:
- if orderedVersions[i].ObjectV2.VersionID == uv {
- fi, err = orderedVersions[i].ObjectV2.ToFileInfo(volume, path)
- foundIndex = i
- break
- }
- case LegacyType:
- if orderedVersions[i].ObjectV1.VersionID == versionID {
- fi, err = orderedVersions[i].ObjectV1.ToFileInfo(volume, path)
- foundIndex = i
- break
- }
- case DeleteType:
- if orderedVersions[i].DeleteMarker.VersionID == uv {
- fi, err = orderedVersions[i].DeleteMarker.ToFileInfo(volume, path)
- foundIndex = i
- break
- }
- }
- }
- if err != nil {
- return fi, err
- }
-
- if foundIndex >= 0 {
- // A version is found, fill dynamic fields
- fi.IsLatest = foundIndex == 0
- fi.NumVersions = len(z.Versions)
- if foundIndex > 0 {
- fi.SuccessorModTime = orderedVersions[foundIndex-1].getModTime()
- }
- return fi, nil
- }
-
- if versionID == "" {
- return FileInfo{}, errFileNotFound
- }
-
- return FileInfo{}, errFileVersionNotFound
-}
-
// Read at most this much on initial read.
const metaDataReadDefault = 4 << 10
@@ -1511,7 +647,7 @@ func readXLMetaNoData(r io.Reader, size int64) ([]byte, error) {
case 0:
err = readMore(size)
return buf, err
- case 1, 2:
+ case 1, 2, 3:
sz, tmp, err := msgp.ReadBytesHeader(tmp)
if err != nil {
return nil, err
@@ -1551,3 +687,1051 @@ func readXLMetaNoData(r io.Reader, size int64) ([]byte, error) {
return nil, errors.New("unknown major metadata version")
}
}
+
+func decodeXLHeaders(buf []byte) (versions int, headerV, metaV uint8, b []byte, err error) {
+ hdrVer, buf, err := msgp.ReadUint8Bytes(buf)
+ if err != nil {
+ return 0, 0, 0, buf, err
+ }
+ metaVer, buf, err := msgp.ReadUint8Bytes(buf)
+ if err != nil {
+ return 0, 0, 0, buf, err
+ }
+ if hdrVer > xlHeaderVersion {
+ return 0, 0, 0, buf, fmt.Errorf("decodeXLHeaders: Unknown xl header version %d", metaVer)
+ }
+ if metaVer > xlMetaVersion {
+ return 0, 0, 0, buf, fmt.Errorf("decodeXLHeaders: Unknown xl meta version %d", metaVer)
+ }
+ versions, buf, err = msgp.ReadIntBytes(buf)
+ if err != nil {
+ return 0, 0, 0, buf, err
+ }
+ if versions < 0 {
+ return 0, 0, 0, buf, fmt.Errorf("decodeXLHeaders: Negative version count %d", versions)
+ }
+ return versions, hdrVer, metaVer, buf, nil
+}
+
+// decodeVersions will decode a number of versions from a buffer
+// and perform a callback for each version in order, newest first.
+// Return errDoneForNow to stop processing and return nil.
+// Any non-nil error is returned.
+func decodeVersions(buf []byte, versions int, fn func(idx int, hdr, meta []byte) error) (err error) {
+ var tHdr, tMeta []byte // Zero copy bytes
+ for i := 0; i < versions; i++ {
+ tHdr, buf, err = msgp.ReadBytesZC(buf)
+ if err != nil {
+ return err
+ }
+ tMeta, buf, err = msgp.ReadBytesZC(buf)
+ if err != nil {
+ return err
+ }
+ if err = fn(i, tHdr, tMeta); err != nil {
+ if err == errDoneForNow {
+ err = nil
+ }
+ return err
+ }
+ }
+ return nil
+}
+
+// isIndexedMetaV2 returns non-nil result if metadata is indexed.
+// If data doesn't validate nil is also returned.
+func isIndexedMetaV2(buf []byte) (meta xlMetaBuf, data xlMetaInlineData) {
+ buf, major, minor, err := checkXL2V1(buf)
+ if err != nil {
+ return nil, nil
+ }
+ if major != 1 || minor < 3 {
+ return nil, nil
+ }
+ meta, buf, err = msgp.ReadBytesZC(buf)
+ if err != nil {
+ return nil, nil
+ }
+ if crc, nbuf, err := msgp.ReadUint32Bytes(buf); err == nil {
+ // Read metadata CRC
+ buf = nbuf
+ if got := uint32(xxhash.Sum64(meta)); got != crc {
+ return nil, nil
+ }
+ } else {
+ return nil, nil
+ }
+ data = buf
+ if data.validate() != nil {
+ data.repair()
+ }
+
+ return meta, data
+}
+
+type xlMetaV2ShallowVersion struct {
+ header xlMetaV2VersionHeader
+ meta []byte
+}
+
+//msgp:ignore xlMetaV2 xlMetaV2ShallowVersion
+
+type xlMetaV2 struct {
+ versions []xlMetaV2ShallowVersion
+
+ // data will contain raw data if any.
+ // data will be one or more versions indexed by versionID.
+ // To remove all data set to nil.
+ data xlMetaInlineData
+
+ // metadata version.
+ metaV uint8
+}
+
+// Load all versions of the stored data.
+// Note that references to the incoming buffer will be kept.
+func (x *xlMetaV2) Load(buf []byte) error {
+ if meta, data := isIndexedMetaV2(buf); meta != nil {
+ return x.loadIndexed(meta, data)
+ }
+ // Convert older format.
+ return x.loadLegacy(buf)
+}
+
+func (x *xlMetaV2) loadIndexed(buf xlMetaBuf, data xlMetaInlineData) error {
+ versions, headerV, metaV, buf, err := decodeXLHeaders(buf)
+ if err != nil {
+ return err
+ }
+ if cap(x.versions) < versions {
+ x.versions = make([]xlMetaV2ShallowVersion, 0, versions+1)
+ }
+ x.versions = x.versions[:versions]
+ x.data = data
+ x.metaV = metaV
+ if err = x.data.validate(); err != nil {
+ x.data.repair()
+ logger.Info("xlMetaV2.loadIndexed: data validation failed: %v. %d entries after repair", err, x.data.entries())
+ }
+
+ return decodeVersions(buf, versions, func(i int, hdr, meta []byte) error {
+ ver := &x.versions[i]
+ _, err = ver.header.unmarshalV(headerV, hdr)
+ if err != nil {
+ return err
+ }
+ ver.meta = meta
+ return nil
+ })
+}
+
+// loadLegacy will load content prior to v1.3
+// Note that references to the incoming buffer will be kept.
+func (x *xlMetaV2) loadLegacy(buf []byte) error {
+ buf, major, minor, err := checkXL2V1(buf)
+ if err != nil {
+ return fmt.Errorf("xlMetaV2.Load %w", err)
+ }
+ var allMeta []byte
+ switch major {
+ case 1:
+ switch minor {
+ case 0:
+ allMeta = buf
+ case 1, 2:
+ v, buf, err := msgp.ReadBytesZC(buf)
+ if err != nil {
+ return fmt.Errorf("xlMetaV2.Load version(%d), bufLen(%d) %w", minor, len(buf), err)
+ }
+ if minor >= 2 {
+ if crc, nbuf, err := msgp.ReadUint32Bytes(buf); err == nil {
+ // Read metadata CRC (added in v2)
+ buf = nbuf
+ if got := uint32(xxhash.Sum64(v)); got != crc {
+ return fmt.Errorf("xlMetaV2.Load version(%d), CRC mismatch, want 0x%x, got 0x%x", minor, crc, got)
+ }
+ } else {
+ return fmt.Errorf("xlMetaV2.Load version(%d), loading CRC: %w", minor, err)
+ }
+ }
+
+ allMeta = v
+ // Add remaining data.
+ x.data = buf
+ if err = x.data.validate(); err != nil {
+ x.data.repair()
+ logger.Info("xlMetaV2.Load: data validation failed: %v. %d entries after repair", err, x.data.entries())
+ }
+ default:
+ return errors.New("unknown minor metadata version")
+ }
+ default:
+ return errors.New("unknown major metadata version")
+ }
+ if allMeta == nil {
+ return errCorruptedFormat
+ }
+ // bts will shrink as we decode.
+ bts := allMeta
+ var field []byte
+ var zb0001 uint32
+ zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if err != nil {
+ return msgp.WrapError(err, "loadLegacy.ReadMapHeader")
+ }
+
+ var tmp xlMetaV2Version
+ for zb0001 > 0 {
+ zb0001--
+ field, bts, err = msgp.ReadMapKeyZC(bts)
+ if err != nil {
+ return msgp.WrapError(err, "loadLegacy.ReadMapKey")
+ }
+ switch msgp.UnsafeString(field) {
+ case "Versions":
+ var zb0002 uint32
+ zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ return msgp.WrapError(err, "Versions")
+ }
+ if cap(x.versions) >= int(zb0002) {
+ x.versions = (x.versions)[:zb0002]
+ } else {
+ x.versions = make([]xlMetaV2ShallowVersion, zb0002, zb0002+1)
+ }
+ for za0001 := range x.versions {
+ start := len(allMeta) - len(bts)
+ bts, err = tmp.unmarshalV(1, bts)
+ if err != nil {
+ return msgp.WrapError(err, "Versions", za0001)
+ }
+ end := len(allMeta) - len(bts)
+ // We reference the marshaled data, so we don't have to re-marshal.
+ x.versions[za0001] = xlMetaV2ShallowVersion{
+ header: tmp.header(),
+ meta: allMeta[start:end],
+ }
+ }
+ default:
+ bts, err = msgp.Skip(bts)
+ if err != nil {
+ return msgp.WrapError(err, "loadLegacy.Skip")
+ }
+ }
+ }
+ x.metaV = 1 // Fixed for legacy conversions.
+ x.sortByModTime()
+ return nil
+}
+
+func (x *xlMetaV2) addVersion(ver xlMetaV2Version) error {
+ modTime := ver.getModTime().UnixNano()
+ if !ver.Valid() {
+ return errors.New("attempted to add invalid version")
+ }
+ encoded, err := ver.MarshalMsg(nil)
+ if err != nil {
+ return err
+ }
+ // Add space at the end.
+ // Will have -1 modtime, so it will be inserted there.
+ x.versions = append(x.versions, xlMetaV2ShallowVersion{header: xlMetaV2VersionHeader{ModTime: -1}})
+
+ // Linear search, we likely have to insert at front.
+ for i, existing := range x.versions {
+ if existing.header.ModTime <= modTime {
+ // Insert at current idx. First move current back.
+ copy(x.versions[i+1:], x.versions[i:])
+ x.versions[i] = xlMetaV2ShallowVersion{
+ header: ver.header(),
+ meta: encoded,
+ }
+ return nil
+ }
+ }
+ return fmt.Errorf("addVersion: Internal error, unable to add version")
+}
+
+// AppendTo will marshal the data in z and append it to the provided slice.
+func (x *xlMetaV2) AppendTo(dst []byte) ([]byte, error) {
+ // Header...
+ sz := len(xlHeader) + len(xlVersionCurrent) + msgp.ArrayHeaderSize + len(dst) + 3*msgp.Uint32Size
+ // Existing + Inline data
+ sz += len(dst) + len(x.data)
+ // Versions...
+ for _, ver := range x.versions {
+ sz += 32 + len(ver.meta)
+ }
+ if cap(dst) < sz {
+ buf := make([]byte, len(dst), sz)
+ copy(buf, dst)
+ dst = buf
+ }
+ if err := x.data.validate(); err != nil {
+ return nil, err
+ }
+
+ dst = append(dst, xlHeader[:]...)
+ dst = append(dst, xlVersionCurrent[:]...)
+ // Add "bin 32" type header to always have enough space.
+ // We will fill out the correct size when we know it.
+ dst = append(dst, 0xc6, 0, 0, 0, 0)
+ dataOffset := len(dst)
+
+ dst = msgp.AppendUint(dst, xlHeaderVersion)
+ dst = msgp.AppendUint(dst, xlMetaVersion)
+ dst = msgp.AppendInt(dst, len(x.versions))
+
+ tmp := metaDataPoolGet()
+ defer metaDataPoolPut(tmp)
+ for _, ver := range x.versions {
+ var err error
+
+ // Add header
+ tmp, err = ver.header.MarshalMsg(tmp[:0])
+ if err != nil {
+ return nil, err
+ }
+ dst = msgp.AppendBytes(dst, tmp)
+
+ // Add full meta
+ dst = msgp.AppendBytes(dst, ver.meta)
+ }
+
+ // Update size...
+ binary.BigEndian.PutUint32(dst[dataOffset-4:dataOffset], uint32(len(dst)-dataOffset))
+
+ // Add CRC of metadata as fixed size (5 bytes)
+ // Prior to v1.3 this was variable sized.
+ tmp = tmp[:5]
+ tmp[0] = 0xce // muint32
+ binary.BigEndian.PutUint32(tmp[1:], uint32(xxhash.Sum64(dst[dataOffset:])))
+ dst = append(dst, tmp[:5]...)
+ return append(dst, x.data...), nil
+}
+
+func (x *xlMetaV2) findVersion(key [16]byte) (idx int, ver *xlMetaV2Version, err error) {
+ for i, ver := range x.versions {
+ if key == ver.header.VersionID {
+ obj, err := x.getIdx(i)
+ return i, obj, err
+ }
+ }
+ return -1, nil, errFileVersionNotFound
+}
+
+func (x *xlMetaV2) getIdx(idx int) (ver *xlMetaV2Version, err error) {
+ if idx < 0 || idx >= len(x.versions) {
+ return nil, errFileNotFound
+ }
+ var dst xlMetaV2Version
+ _, err = dst.unmarshalV(x.metaV, x.versions[idx].meta)
+ if false {
+ if err == nil && x.versions[idx].header.VersionID != dst.getVersionID() {
+ panic(fmt.Sprintf("header: %x != object id: %x", x.versions[idx].header.VersionID, dst.getVersionID()))
+ }
+ }
+ return &dst, err
+}
+
+// setIdx will replace a version at a given index.
+// Note that versions may become re-sorted if modtime changes.
+func (x *xlMetaV2) setIdx(idx int, ver xlMetaV2Version) (err error) {
+ if idx < 0 || idx >= len(x.versions) {
+ return errFileNotFound
+ }
+ update := &x.versions[idx]
+ prevMod := update.header.ModTime
+ update.meta, err = ver.MarshalMsg(update.meta[:0:len(update.meta)])
+ if err != nil {
+ update.meta = nil
+ return err
+ }
+ update.header = ver.header()
+ if prevMod != update.header.ModTime {
+ x.sortByModTime()
+ }
+ return nil
+}
+
+// sortByModTime will sort versions by modtime in descending order,
+// meaning index 0 will be latest version.
+func (x *xlMetaV2) sortByModTime() {
+ // Quick check
+ if len(x.versions) <= 1 || sort.SliceIsSorted(x.versions, func(i, j int) bool {
+ return x.versions[i].header.ModTime > x.versions[j].header.ModTime
+ }) {
+ return
+ }
+
+ // We should sort.
+ sort.Slice(x.versions, func(i, j int) bool {
+ return x.versions[i].header.ModTime > x.versions[j].header.ModTime
+ })
+}
+
+// DeleteVersion deletes the version specified by version id.
+// returns to the caller which dataDir to delete, also
+// indicates if this is the last version.
+func (x *xlMetaV2) DeleteVersion(fi FileInfo) (string, bool, error) {
+ // This is a situation where versionId is explicitly
+ // specified as "null", as we do not save "null"
+ // string it is considered empty. But empty also
+ // means the version which matches will be purged.
+ if fi.VersionID == nullVersionID {
+ fi.VersionID = ""
+ }
+
+ var uv uuid.UUID
+ var err error
+ if fi.VersionID != "" {
+ uv, err = uuid.Parse(fi.VersionID)
+ if err != nil {
+ return "", false, errFileVersionNotFound
+ }
+ }
+
+ var ventry xlMetaV2Version
+ if fi.Deleted {
+ ventry = xlMetaV2Version{
+ Type: DeleteType,
+ DeleteMarker: &xlMetaV2DeleteMarker{
+ VersionID: uv,
+ ModTime: fi.ModTime.UnixNano(),
+ MetaSys: make(map[string][]byte),
+ },
+ }
+ if !ventry.Valid() {
+ return "", false, errors.New("internal error: invalid version entry generated")
+ }
+ }
+ updateVersion := false
+ if fi.VersionPurgeStatus().Empty() && (fi.DeleteMarkerReplicationStatus() == "REPLICA" || fi.DeleteMarkerReplicationStatus().Empty()) {
+ updateVersion = fi.MarkDeleted
+ } else {
+ // for replication scenario
+ if fi.Deleted && fi.VersionPurgeStatus() != Complete {
+ if !fi.VersionPurgeStatus().Empty() || fi.DeleteMarkerReplicationStatus().Empty() {
+ updateVersion = true
+ }
+ }
+ // object or delete-marker versioned delete is not complete
+ if !fi.VersionPurgeStatus().Empty() && fi.VersionPurgeStatus() != Complete {
+ updateVersion = true
+ }
+ }
+
+ if fi.Deleted {
+ if !fi.DeleteMarkerReplicationStatus().Empty() {
+ switch fi.DeleteMarkerReplicationStatus() {
+ case replication.Replica:
+ ventry.DeleteMarker.MetaSys[ReservedMetadataPrefixLower+ReplicaStatus] = []byte(string(fi.ReplicationState.ReplicaStatus))
+ ventry.DeleteMarker.MetaSys[ReservedMetadataPrefixLower+ReplicaTimestamp] = []byte(fi.ReplicationState.ReplicaTimeStamp.Format(http.TimeFormat))
+ default:
+ ventry.DeleteMarker.MetaSys[ReservedMetadataPrefixLower+ReplicationStatus] = []byte(fi.ReplicationState.ReplicationStatusInternal)
+ ventry.DeleteMarker.MetaSys[ReservedMetadataPrefixLower+ReplicationTimestamp] = []byte(fi.ReplicationState.ReplicationTimeStamp.Format(http.TimeFormat))
+ }
+ }
+ if !fi.VersionPurgeStatus().Empty() {
+ ventry.DeleteMarker.MetaSys[VersionPurgeStatusKey] = []byte(fi.ReplicationState.VersionPurgeStatusInternal)
+ }
+ for k, v := range fi.ReplicationState.ResetStatusesMap {
+ ventry.DeleteMarker.MetaSys[k] = []byte(v)
+ }
+ }
+
+ for i, ver := range x.versions {
+ if ver.header.VersionID != uv {
+ continue
+ }
+ switch ver.header.Type {
+ case LegacyType:
+ ver, err := x.getIdx(i)
+ if err != nil {
+ return "", false, err
+ }
+ x.versions = append(x.versions[:i], x.versions[i+1:]...)
+ if fi.Deleted {
+ err = x.addVersion(ventry)
+ }
+ return ver.ObjectV1.DataDir, len(x.versions) == 0, err
+ case DeleteType:
+ if updateVersion {
+ ver, err := x.getIdx(i)
+ if err != nil {
+ return "", false, err
+ }
+ if len(ver.DeleteMarker.MetaSys) == 0 {
+ ver.DeleteMarker.MetaSys = make(map[string][]byte)
+ }
+ if !fi.DeleteMarkerReplicationStatus().Empty() {
+ switch fi.DeleteMarkerReplicationStatus() {
+ case replication.Replica:
+ ver.DeleteMarker.MetaSys[ReservedMetadataPrefixLower+ReplicaStatus] = []byte(string(fi.ReplicationState.ReplicaStatus))
+ ver.DeleteMarker.MetaSys[ReservedMetadataPrefixLower+ReplicaTimestamp] = []byte(fi.ReplicationState.ReplicaTimeStamp.Format(http.TimeFormat))
+ default:
+ ver.DeleteMarker.MetaSys[ReservedMetadataPrefixLower+ReplicationStatus] = []byte(fi.ReplicationState.ReplicationStatusInternal)
+ ver.DeleteMarker.MetaSys[ReservedMetadataPrefixLower+ReplicationTimestamp] = []byte(fi.ReplicationState.ReplicationTimeStamp.Format(http.TimeFormat))
+ }
+ }
+ if !fi.VersionPurgeStatus().Empty() {
+ ver.DeleteMarker.MetaSys[VersionPurgeStatusKey] = []byte(fi.ReplicationState.VersionPurgeStatusInternal)
+ }
+ for k, v := range fi.ReplicationState.ResetStatusesMap {
+ ver.DeleteMarker.MetaSys[k] = []byte(v)
+ }
+ err = x.setIdx(i, *ver)
+ return "", len(x.versions) == 0, err
+ }
+ var err error
+ x.versions = append(x.versions[:i], x.versions[i+1:]...)
+ if fi.MarkDeleted && (fi.VersionPurgeStatus().Empty() || (fi.VersionPurgeStatus() != Complete)) {
+ err = x.addVersion(ventry)
+ }
+ return "", len(x.versions) == 0, err
+ case ObjectType:
+ if updateVersion {
+ ver, err := x.getIdx(i)
+ if err != nil {
+ return "", false, err
+ }
+ ver.ObjectV2.MetaSys[VersionPurgeStatusKey] = []byte(fi.ReplicationState.VersionPurgeStatusInternal)
+ for k, v := range fi.ReplicationState.ResetStatusesMap {
+ ver.ObjectV2.MetaSys[k] = []byte(v)
+ }
+ err = x.setIdx(i, *ver)
+ return "", len(x.versions) == 0, err
+ }
+ }
+ }
+
+ for i, version := range x.versions {
+ if version.header.Type != ObjectType || version.header.VersionID != uv {
+ continue
+ }
+ ver, err := x.getIdx(i)
+ if err != nil {
+ return "", false, err
+ }
+ switch {
+ case fi.ExpireRestored:
+ ver.ObjectV2.RemoveRestoreHdrs()
+ err = x.setIdx(i, *ver)
+ case fi.TransitionStatus == lifecycle.TransitionComplete:
+ ver.ObjectV2.SetTransition(fi)
+ err = x.setIdx(i, *ver)
+ default:
+ x.versions = append(x.versions[:i], x.versions[i+1:]...)
+ // if uv has tiered content we add a
+ // free-version to track it for
+ // asynchronous deletion via scanner.
+ if freeVersion, toFree := ver.ObjectV2.InitFreeVersion(fi); toFree {
+ err = x.addVersion(freeVersion)
+ }
+ }
+ logger.LogIf(context.Background(), err)
+
+ if fi.Deleted {
+ err = x.addVersion(ventry)
+ }
+ if x.SharedDataDirCount(ver.ObjectV2.VersionID, ver.ObjectV2.DataDir) > 0 {
+ // Found that another version references the same dataDir
+ // we shouldn't remove it, and only remove the version instead
+ return "", len(x.versions) == 0, nil
+ }
+ return uuid.UUID(ver.ObjectV2.DataDir).String(), len(x.versions) == 0, err
+ }
+
+ if fi.Deleted {
+ err = x.addVersion(ventry)
+ return "", false, err
+ }
+ return "", false, errFileVersionNotFound
+}
+
+// xlMetaDataDirDecoder is a shallow decoder for decoding object datadir only.
+type xlMetaDataDirDecoder struct {
+ ObjectV2 *struct {
+ DataDir [16]byte `msg:"DDir"` // Data dir ID
+ } `msg:"V2Obj,omitempty"`
+}
+
+// UpdateObjectVersion updates metadata and modTime for a given
+// versionID, NOTE: versionID must be valid and should exist -
+// and must not be a DeleteMarker or legacy object, if no
+// versionID is specified 'null' versionID is updated instead.
+//
+// It is callers responsibility to set correct versionID, this
+// function shouldn't be further extended to update immutable
+// values such as ErasureInfo, ChecksumInfo.
+//
+// Metadata is only updated to new values, existing values
+// stay as is, if you wish to update all values you should
+// update all metadata freshly before calling this function
+// in-case you wish to clear existing metadata.
+func (x *xlMetaV2) UpdateObjectVersion(fi FileInfo) error {
+ if fi.VersionID == "" {
+ // this means versioning is not yet
+ // enabled or suspend i.e all versions
+ // are basically default value i.e "null"
+ fi.VersionID = nullVersionID
+ }
+
+ var uv uuid.UUID
+ var err error
+ if fi.VersionID != "" && fi.VersionID != nullVersionID {
+ uv, err = uuid.Parse(fi.VersionID)
+ if err != nil {
+ return err
+ }
+ }
+
+ for i, version := range x.versions {
+ switch version.header.Type {
+ case LegacyType, DeleteType:
+ if version.header.VersionID == uv {
+ return errMethodNotAllowed
+ }
+ case ObjectType:
+ if version.header.VersionID == uv {
+ ver, err := x.getIdx(i)
+ if err != nil {
+ return err
+ }
+ for k, v := range fi.Metadata {
+ if len(k) > len(ReservedMetadataPrefixLower) && strings.EqualFold(k[:len(ReservedMetadataPrefixLower)], ReservedMetadataPrefixLower) {
+ ver.ObjectV2.MetaSys[k] = []byte(v)
+ } else {
+ ver.ObjectV2.MetaUser[k] = v
+ }
+ }
+ if !fi.ModTime.IsZero() {
+ ver.ObjectV2.ModTime = fi.ModTime.UnixNano()
+ }
+ return x.setIdx(i, *ver)
+ }
+ }
+ }
+
+ return errFileVersionNotFound
+}
+
+// AddVersion adds a new version
+func (x *xlMetaV2) AddVersion(fi FileInfo) error {
+ if fi.VersionID == "" {
+ // this means versioning is not yet
+ // enabled or suspend i.e all versions
+ // are basically default value i.e "null"
+ fi.VersionID = nullVersionID
+ }
+
+ var uv uuid.UUID
+ var err error
+ if fi.VersionID != "" && fi.VersionID != nullVersionID {
+ uv, err = uuid.Parse(fi.VersionID)
+ if err != nil {
+ return err
+ }
+ }
+
+ var dd uuid.UUID
+ if fi.DataDir != "" {
+ dd, err = uuid.Parse(fi.DataDir)
+ if err != nil {
+ return err
+ }
+ }
+
+ ventry := xlMetaV2Version{}
+
+ if fi.Deleted {
+ ventry.Type = DeleteType
+ ventry.DeleteMarker = &xlMetaV2DeleteMarker{
+ VersionID: uv,
+ ModTime: fi.ModTime.UnixNano(),
+ MetaSys: make(map[string][]byte),
+ }
+ } else {
+ ventry.Type = ObjectType
+ ventry.ObjectV2 = &xlMetaV2Object{
+ VersionID: uv,
+ DataDir: dd,
+ Size: fi.Size,
+ ModTime: fi.ModTime.UnixNano(),
+ ErasureAlgorithm: ReedSolomon,
+ ErasureM: fi.Erasure.DataBlocks,
+ ErasureN: fi.Erasure.ParityBlocks,
+ ErasureBlockSize: fi.Erasure.BlockSize,
+ ErasureIndex: fi.Erasure.Index,
+ BitrotChecksumAlgo: HighwayHash,
+ ErasureDist: make([]uint8, len(fi.Erasure.Distribution)),
+ PartNumbers: make([]int, len(fi.Parts)),
+ PartETags: nil,
+ PartSizes: make([]int64, len(fi.Parts)),
+ PartActualSizes: make([]int64, len(fi.Parts)),
+ MetaSys: make(map[string][]byte),
+ MetaUser: make(map[string]string, len(fi.Metadata)),
+ }
+ for i := range fi.Parts {
+ // Only add etags if any.
+ if fi.Parts[i].ETag != "" {
+ ventry.ObjectV2.PartETags = make([]string, len(fi.Parts))
+ break
+ }
+ }
+ for i := range fi.Erasure.Distribution {
+ ventry.ObjectV2.ErasureDist[i] = uint8(fi.Erasure.Distribution[i])
+ }
+
+ for i := range fi.Parts {
+ ventry.ObjectV2.PartSizes[i] = fi.Parts[i].Size
+ if len(ventry.ObjectV2.PartETags) > 0 && fi.Parts[i].ETag != "" {
+ ventry.ObjectV2.PartETags[i] = fi.Parts[i].ETag
+ }
+ ventry.ObjectV2.PartNumbers[i] = fi.Parts[i].Number
+ ventry.ObjectV2.PartActualSizes[i] = fi.Parts[i].ActualSize
+ }
+
+ tierFVIDKey := ReservedMetadataPrefixLower + tierFVID
+ tierFVMarkerKey := ReservedMetadataPrefixLower + tierFVMarker
+ for k, v := range fi.Metadata {
+ if len(k) > len(ReservedMetadataPrefixLower) && strings.EqualFold(k[:len(ReservedMetadataPrefixLower)], ReservedMetadataPrefixLower) {
+ // Skip tierFVID, tierFVMarker keys; it's used
+ // only for creating free-version.
+ switch k {
+ case tierFVIDKey, tierFVMarkerKey:
+ continue
+ }
+
+ ventry.ObjectV2.MetaSys[k] = []byte(v)
+ } else {
+ ventry.ObjectV2.MetaUser[k] = v
+ }
+ }
+
+ // If asked to save data.
+ if len(fi.Data) > 0 || fi.Size == 0 {
+ x.data.replace(fi.VersionID, fi.Data)
+ }
+
+ if fi.TransitionStatus != "" {
+ ventry.ObjectV2.MetaSys[ReservedMetadataPrefixLower+TransitionStatus] = []byte(fi.TransitionStatus)
+ }
+ if fi.TransitionedObjName != "" {
+ ventry.ObjectV2.MetaSys[ReservedMetadataPrefixLower+TransitionedObjectName] = []byte(fi.TransitionedObjName)
+ }
+ if fi.TransitionVersionID != "" {
+ ventry.ObjectV2.MetaSys[ReservedMetadataPrefixLower+TransitionedVersionID] = []byte(fi.TransitionVersionID)
+ }
+ if fi.TransitionTier != "" {
+ ventry.ObjectV2.MetaSys[ReservedMetadataPrefixLower+TransitionTier] = []byte(fi.TransitionTier)
+ }
+ }
+
+ if !ventry.Valid() {
+ return errors.New("internal error: invalid version entry generated")
+ }
+
+ // Check if we should replace first.
+ for i := range x.versions {
+ if x.versions[i].header.VersionID != uv {
+ continue
+ }
+ switch x.versions[i].header.Type {
+ case LegacyType:
+ // This would convert legacy type into new ObjectType
+ // this means that we are basically purging the `null`
+ // version of the object.
+ return x.setIdx(i, ventry)
+ case ObjectType:
+ return x.setIdx(i, ventry)
+ case DeleteType:
+ // Allowing delete marker to replaced with proper
+ // object data type as well, this is not S3 complaint
+ // behavior but kept here for future flexibility.
+ return x.setIdx(i, ventry)
+ }
+ }
+
+ // We did not find it, add it.
+ return x.addVersion(ventry)
+}
+
+func (x *xlMetaV2) SharedDataDirCount(versionID [16]byte, dataDir [16]byte) int {
+ // v2 object is inlined, if it is skip dataDir share check.
+ if x.data.entries() > 0 && x.data.find(uuid.UUID(versionID).String()) != nil {
+ return 0
+ }
+ var sameDataDirCount int
+ var decoded xlMetaDataDirDecoder
+ for _, version := range x.versions {
+ if version.header.Type != ObjectType || version.header.VersionID == versionID || !version.header.UsesDataDir() {
+ continue
+ }
+ _, err := decoded.UnmarshalMsg(version.meta)
+ if err != nil || decoded.ObjectV2 == nil || decoded.ObjectV2.DataDir != dataDir {
+ continue
+ }
+ sameDataDirCount++
+ }
+ return sameDataDirCount
+}
+
+func (x *xlMetaV2) SharedDataDirCountStr(versionID, dataDir string) int {
+ var (
+ uv uuid.UUID
+ ddir uuid.UUID
+ err error
+ )
+ if versionID == nullVersionID {
+ versionID = ""
+ }
+ if versionID != "" {
+ uv, err = uuid.Parse(versionID)
+ if err != nil {
+ return 0
+ }
+ }
+ ddir, err = uuid.Parse(dataDir)
+ if err != nil {
+ return 0
+ }
+ return x.SharedDataDirCount(uv, ddir)
+}
+
+// AddLegacy adds a legacy version, is only called when no prior
+// versions exist, safe to use it by only one function in xl-storage(RenameData)
+func (x *xlMetaV2) AddLegacy(m *xlMetaV1Object) error {
+ if !m.valid() {
+ return errFileCorrupt
+ }
+ m.VersionID = nullVersionID
+ m.DataDir = legacyDataDir
+
+ return x.addVersion(xlMetaV2Version{ObjectV1: m, Type: LegacyType})
+}
+
+// ToFileInfo converts xlMetaV2 into a common FileInfo datastructure
+// for consumption across callers.
+func (x xlMetaV2) ToFileInfo(volume, path, versionID string) (fi FileInfo, err error) {
+ var uv uuid.UUID
+ if versionID != "" && versionID != nullVersionID {
+ uv, err = uuid.Parse(versionID)
+ if err != nil {
+ logger.LogIf(GlobalContext, fmt.Errorf("invalid versionID specified %s", versionID))
+ return fi, errFileVersionNotFound
+ }
+ }
+ var succModTime int64
+ isLatest := true
+ nonFreeVersions := len(x.versions)
+ found := false
+ for _, ver := range x.versions {
+ header := &ver.header
+ // skip listing free-version unless explicitly requested via versionID
+ if header.FreeVersion() {
+ nonFreeVersions--
+ if header.VersionID != uv {
+ continue
+ }
+ }
+ if found {
+ continue
+ }
+
+ // We need a specific version, skip...
+ if versionID != "" && uv != header.VersionID {
+ isLatest = false
+ succModTime = header.ModTime
+ continue
+ }
+
+ // We found what we need.
+ found = true
+ var version xlMetaV2Version
+ if _, err := version.unmarshalV(x.metaV, ver.meta); err != nil {
+ return fi, err
+ }
+ if fi, err = version.ToFileInfo(volume, path); err != nil {
+ return fi, err
+ }
+ fi.IsLatest = isLatest
+ if succModTime != 0 {
+ fi.SuccessorModTime = time.Unix(0, succModTime)
+ }
+ }
+ if !found {
+ if versionID == "" {
+ return FileInfo{}, errFileNotFound
+ }
+
+ return FileInfo{}, errFileVersionNotFound
+ }
+ fi.NumVersions = nonFreeVersions
+ return fi, err
+}
+
+// ListVersions lists current versions, and current deleted
+// versions returns error for unexpected entries.
+// showPendingDeletes is set to true if ListVersions needs to list objects marked deleted
+// but waiting to be replicated
+func (x xlMetaV2) ListVersions(volume, path string) ([]FileInfo, error) {
+ versions := make([]FileInfo, 0, len(x.versions))
+ var err error
+
+ var dst xlMetaV2Version
+ for _, version := range x.versions {
+ _, err = dst.unmarshalV(x.metaV, version.meta)
+ if err != nil {
+ return versions, err
+ }
+ fi, err := dst.ToFileInfo(volume, path)
+ if err != nil {
+ return versions, err
+ }
+ fi.NumVersions = len(x.versions)
+ versions = append(versions, fi)
+ }
+
+ for i := range versions {
+ versions[i].NumVersions = len(versions)
+ if i > 0 {
+ versions[i].SuccessorModTime = versions[i-1].ModTime
+ }
+ }
+ if len(versions) > 0 {
+ versions[0].IsLatest = true
+ }
+ return versions, nil
+}
+
+type xlMetaBuf []byte
+
+// ToFileInfo converts xlMetaV2 into a common FileInfo datastructure
+// for consumption across callers.
+func (x xlMetaBuf) ToFileInfo(volume, path, versionID string) (fi FileInfo, err error) {
+ var uv uuid.UUID
+ if versionID != "" && versionID != nullVersionID {
+ uv, err = uuid.Parse(versionID)
+ if err != nil {
+ logger.LogIf(GlobalContext, fmt.Errorf("invalid versionID specified %s", versionID))
+ return fi, errFileVersionNotFound
+ }
+ }
+ versions, headerV, metaV, buf, err := decodeXLHeaders(x)
+ if err != nil {
+ return fi, err
+ }
+ var header xlMetaV2VersionHeader
+ var succModTime int64
+ isLatest := true
+ nonFreeVersions := versions
+ found := false
+ err = decodeVersions(buf, versions, func(idx int, hdr, meta []byte) error {
+ if _, err := header.unmarshalV(headerV, hdr); err != nil {
+ return err
+ }
+
+ // skip listing free-version unless explicitly requested via versionID
+ if header.FreeVersion() {
+ nonFreeVersions--
+ if header.VersionID != uv {
+ return nil
+ }
+ }
+ if found {
+ return nil
+ }
+
+ // We need a specific version, skip...
+ if versionID != "" && uv != header.VersionID {
+ isLatest = false
+ succModTime = header.ModTime
+ return nil
+ }
+
+ // We found what we need.
+ found = true
+ var version xlMetaV2Version
+ if _, err := version.unmarshalV(metaV, meta); err != nil {
+ return err
+ }
+ if fi, err = version.ToFileInfo(volume, path); err != nil {
+ return err
+ }
+ fi.IsLatest = isLatest
+ if succModTime != 0 {
+ fi.SuccessorModTime = time.Unix(0, succModTime)
+ }
+ return nil
+ })
+ if !found {
+ if versionID == "" {
+ return FileInfo{}, errFileNotFound
+ }
+
+ return FileInfo{}, errFileVersionNotFound
+ }
+ fi.NumVersions = nonFreeVersions
+ return fi, err
+}
+
+// ListVersions lists current versions, and current deleted
+// versions returns error for unexpected entries.
+// showPendingDeletes is set to true if ListVersions needs to list objects marked deleted
+// but waiting to be replicated
+func (x xlMetaBuf) ListVersions(volume, path string) ([]FileInfo, error) {
+ vers, _, metaV, buf, err := decodeXLHeaders(x)
+ if err != nil {
+ return nil, err
+ }
+ var succModTime time.Time
+ isLatest := true
+ dst := make([]FileInfo, 0, vers)
+ var xl xlMetaV2Version
+ err = decodeVersions(buf, vers, func(idx int, hdr, meta []byte) error {
+ if _, err := xl.unmarshalV(metaV, meta); err != nil {
+ return err
+ }
+ if !xl.Valid() {
+ return errFileCorrupt
+ }
+ fi, err := xl.ToFileInfo(volume, path)
+ if err != nil {
+ return err
+ }
+ fi.IsLatest = isLatest
+ fi.SuccessorModTime = succModTime
+ fi.NumVersions = vers
+ isLatest = false
+ succModTime = xl.getModTime()
+
+ dst = append(dst, fi)
+ return nil
+ })
+ return dst, err
+}
+
+// IsLatestDeleteMarker returns true if latest version is a deletemarker or there are no versions.
+// If any error occurs false is returned.
+func (x xlMetaBuf) IsLatestDeleteMarker() bool {
+ vers, headerV, _, buf, err := decodeXLHeaders(x)
+ if err != nil {
+ return false
+ }
+ if vers == 0 {
+ return true
+ }
+ isDeleteMarker := false
+
+ _ = decodeVersions(buf, vers, func(idx int, hdr, _ []byte) error {
+ var xl xlMetaV2VersionHeader
+ if _, err := xl.unmarshalV(headerV, hdr); err != nil {
+ return errDoneForNow
+ }
+ isDeleteMarker = xl.Type == DeleteType
+ return errDoneForNow
+
+ })
+ return isDeleteMarker
+}
diff --git a/cmd/xl-storage-format-v2_gen.go b/cmd/xl-storage-format-v2_gen.go
index 307147ca1..0ee5d38df 100644
--- a/cmd/xl-storage-format-v2_gen.go
+++ b/cmd/xl-storage-format-v2_gen.go
@@ -163,7 +163,111 @@ func (z VersionType) Msgsize() (s int) {
}
// DecodeMsg implements msgp.Decodable
-func (z *xlMetaV2) DecodeMsg(dc *msgp.Reader) (err error) {
+func (z *xlFlags) DecodeMsg(dc *msgp.Reader) (err error) {
+ {
+ var zb0001 uint8
+ zb0001, err = dc.ReadUint8()
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ (*z) = xlFlags(zb0001)
+ }
+ return
+}
+
+// EncodeMsg implements msgp.Encodable
+func (z xlFlags) EncodeMsg(en *msgp.Writer) (err error) {
+ err = en.WriteUint8(uint8(z))
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ return
+}
+
+// MarshalMsg implements msgp.Marshaler
+func (z xlFlags) MarshalMsg(b []byte) (o []byte, err error) {
+ o = msgp.Require(b, z.Msgsize())
+ o = msgp.AppendUint8(o, uint8(z))
+ return
+}
+
+// UnmarshalMsg implements msgp.Unmarshaler
+func (z *xlFlags) UnmarshalMsg(bts []byte) (o []byte, err error) {
+ {
+ var zb0001 uint8
+ zb0001, bts, err = msgp.ReadUint8Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ (*z) = xlFlags(zb0001)
+ }
+ o = bts
+ return
+}
+
+// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
+func (z xlFlags) Msgsize() (s int) {
+ s = msgp.Uint8Size
+ return
+}
+
+// DecodeMsg implements msgp.Decodable
+func (z *xlMetaBuf) DecodeMsg(dc *msgp.Reader) (err error) {
+ {
+ var zb0001 []byte
+ zb0001, err = dc.ReadBytes([]byte((*z)))
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ (*z) = xlMetaBuf(zb0001)
+ }
+ return
+}
+
+// EncodeMsg implements msgp.Encodable
+func (z xlMetaBuf) EncodeMsg(en *msgp.Writer) (err error) {
+ err = en.WriteBytes([]byte(z))
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ return
+}
+
+// MarshalMsg implements msgp.Marshaler
+func (z xlMetaBuf) MarshalMsg(b []byte) (o []byte, err error) {
+ o = msgp.Require(b, z.Msgsize())
+ o = msgp.AppendBytes(o, []byte(z))
+ return
+}
+
+// UnmarshalMsg implements msgp.Unmarshaler
+func (z *xlMetaBuf) UnmarshalMsg(bts []byte) (o []byte, err error) {
+ {
+ var zb0001 []byte
+ zb0001, bts, err = msgp.ReadBytesBytes(bts, []byte((*z)))
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ (*z) = xlMetaBuf(zb0001)
+ }
+ o = bts
+ return
+}
+
+// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
+func (z xlMetaBuf) Msgsize() (s int) {
+ s = msgp.BytesPrefixSize + len([]byte(z))
+ return
+}
+
+// DecodeMsg implements msgp.Decodable
+func (z *xlMetaDataDirDecoder) DecodeMsg(dc *msgp.Reader) (err error) {
var field []byte
_ = field
var zb0001 uint32
@@ -180,24 +284,48 @@ func (z *xlMetaV2) DecodeMsg(dc *msgp.Reader) (err error) {
return
}
switch msgp.UnsafeString(field) {
- case "Versions":
- var zb0002 uint32
- zb0002, err = dc.ReadArrayHeader()
- if err != nil {
- err = msgp.WrapError(err, "Versions")
- return
- }
- if cap(z.Versions) >= int(zb0002) {
- z.Versions = (z.Versions)[:zb0002]
- } else {
- z.Versions = make([]xlMetaV2Version, zb0002)
- }
- for za0001 := range z.Versions {
- err = z.Versions[za0001].DecodeMsg(dc)
+ case "V2Obj":
+ if dc.IsNil() {
+ err = dc.ReadNil()
if err != nil {
- err = msgp.WrapError(err, "Versions", za0001)
+ err = msgp.WrapError(err, "ObjectV2")
return
}
+ z.ObjectV2 = nil
+ } else {
+ if z.ObjectV2 == nil {
+ z.ObjectV2 = new(struct {
+ DataDir [16]byte `msg:"DDir"`
+ })
+ }
+ var zb0002 uint32
+ zb0002, err = dc.ReadMapHeader()
+ if err != nil {
+ err = msgp.WrapError(err, "ObjectV2")
+ return
+ }
+ for zb0002 > 0 {
+ zb0002--
+ field, err = dc.ReadMapKeyPtr()
+ if err != nil {
+ err = msgp.WrapError(err, "ObjectV2")
+ return
+ }
+ switch msgp.UnsafeString(field) {
+ case "DDir":
+ err = dc.ReadExactBytes((z.ObjectV2.DataDir)[:])
+ if err != nil {
+ err = msgp.WrapError(err, "ObjectV2", "DataDir")
+ return
+ }
+ default:
+ err = dc.Skip()
+ if err != nil {
+ err = msgp.WrapError(err, "ObjectV2")
+ return
+ }
+ }
+ }
}
default:
err = dc.Skip()
@@ -211,47 +339,82 @@ func (z *xlMetaV2) DecodeMsg(dc *msgp.Reader) (err error) {
}
// EncodeMsg implements msgp.Encodable
-func (z *xlMetaV2) EncodeMsg(en *msgp.Writer) (err error) {
- // map header, size 1
- // write "Versions"
- err = en.Append(0x81, 0xa8, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73)
+func (z *xlMetaDataDirDecoder) EncodeMsg(en *msgp.Writer) (err error) {
+ // omitempty: check for empty values
+ zb0001Len := uint32(1)
+ var zb0001Mask uint8 /* 1 bits */
+ if z.ObjectV2 == nil {
+ zb0001Len--
+ zb0001Mask |= 0x1
+ }
+ // variable map header, size zb0001Len
+ err = en.Append(0x80 | uint8(zb0001Len))
if err != nil {
return
}
- err = en.WriteArrayHeader(uint32(len(z.Versions)))
- if err != nil {
- err = msgp.WrapError(err, "Versions")
+ if zb0001Len == 0 {
return
}
- for za0001 := range z.Versions {
- err = z.Versions[za0001].EncodeMsg(en)
+ if (zb0001Mask & 0x1) == 0 { // if not empty
+ // write "V2Obj"
+ err = en.Append(0xa5, 0x56, 0x32, 0x4f, 0x62, 0x6a)
if err != nil {
- err = msgp.WrapError(err, "Versions", za0001)
return
}
+ if z.ObjectV2 == nil {
+ err = en.WriteNil()
+ if err != nil {
+ return
+ }
+ } else {
+ // map header, size 1
+ // write "DDir"
+ err = en.Append(0x81, 0xa4, 0x44, 0x44, 0x69, 0x72)
+ if err != nil {
+ return
+ }
+ err = en.WriteBytes((z.ObjectV2.DataDir)[:])
+ if err != nil {
+ err = msgp.WrapError(err, "ObjectV2", "DataDir")
+ return
+ }
+ }
}
return
}
// MarshalMsg implements msgp.Marshaler
-func (z *xlMetaV2) MarshalMsg(b []byte) (o []byte, err error) {
+func (z *xlMetaDataDirDecoder) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
- // map header, size 1
- // string "Versions"
- o = append(o, 0x81, 0xa8, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73)
- o = msgp.AppendArrayHeader(o, uint32(len(z.Versions)))
- for za0001 := range z.Versions {
- o, err = z.Versions[za0001].MarshalMsg(o)
- if err != nil {
- err = msgp.WrapError(err, "Versions", za0001)
- return
+ // omitempty: check for empty values
+ zb0001Len := uint32(1)
+ var zb0001Mask uint8 /* 1 bits */
+ if z.ObjectV2 == nil {
+ zb0001Len--
+ zb0001Mask |= 0x1
+ }
+ // variable map header, size zb0001Len
+ o = append(o, 0x80|uint8(zb0001Len))
+ if zb0001Len == 0 {
+ return
+ }
+ if (zb0001Mask & 0x1) == 0 { // if not empty
+ // string "V2Obj"
+ o = append(o, 0xa5, 0x56, 0x32, 0x4f, 0x62, 0x6a)
+ if z.ObjectV2 == nil {
+ o = msgp.AppendNil(o)
+ } else {
+ // map header, size 1
+ // string "DDir"
+ o = append(o, 0x81, 0xa4, 0x44, 0x44, 0x69, 0x72)
+ o = msgp.AppendBytes(o, (z.ObjectV2.DataDir)[:])
}
}
return
}
// UnmarshalMsg implements msgp.Unmarshaler
-func (z *xlMetaV2) UnmarshalMsg(bts []byte) (o []byte, err error) {
+func (z *xlMetaDataDirDecoder) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
var zb0001 uint32
@@ -268,24 +431,47 @@ func (z *xlMetaV2) UnmarshalMsg(bts []byte) (o []byte, err error) {
return
}
switch msgp.UnsafeString(field) {
- case "Versions":
- var zb0002 uint32
- zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "Versions")
- return
- }
- if cap(z.Versions) >= int(zb0002) {
- z.Versions = (z.Versions)[:zb0002]
- } else {
- z.Versions = make([]xlMetaV2Version, zb0002)
- }
- for za0001 := range z.Versions {
- bts, err = z.Versions[za0001].UnmarshalMsg(bts)
+ case "V2Obj":
+ if msgp.IsNil(bts) {
+ bts, err = msgp.ReadNilBytes(bts)
if err != nil {
- err = msgp.WrapError(err, "Versions", za0001)
return
}
+ z.ObjectV2 = nil
+ } else {
+ if z.ObjectV2 == nil {
+ z.ObjectV2 = new(struct {
+ DataDir [16]byte `msg:"DDir"`
+ })
+ }
+ var zb0002 uint32
+ zb0002, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "ObjectV2")
+ return
+ }
+ for zb0002 > 0 {
+ zb0002--
+ field, bts, err = msgp.ReadMapKeyZC(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "ObjectV2")
+ return
+ }
+ switch msgp.UnsafeString(field) {
+ case "DDir":
+ bts, err = msgp.ReadExactBytes(bts, (z.ObjectV2.DataDir)[:])
+ if err != nil {
+ err = msgp.WrapError(err, "ObjectV2", "DataDir")
+ return
+ }
+ default:
+ bts, err = msgp.Skip(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "ObjectV2")
+ return
+ }
+ }
+ }
}
default:
bts, err = msgp.Skip(bts)
@@ -300,10 +486,12 @@ func (z *xlMetaV2) UnmarshalMsg(bts []byte) (o []byte, err error) {
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
-func (z *xlMetaV2) Msgsize() (s int) {
- s = 1 + 9 + msgp.ArrayHeaderSize
- for za0001 := range z.Versions {
- s += z.Versions[za0001].Msgsize()
+func (z *xlMetaDataDirDecoder) Msgsize() (s int) {
+ s = 1 + 6
+ if z.ObjectV2 == nil {
+ s += msgp.NilSize
+ } else {
+ s += 1 + 5 + msgp.ArrayHeaderSize + (16 * (msgp.ByteSize))
}
return
}
@@ -673,23 +861,32 @@ func (z *xlMetaV2Object) DecodeMsg(dc *msgp.Reader) (err error) {
}
}
case "PartETags":
- var zb0006 uint32
- zb0006, err = dc.ReadArrayHeader()
- if err != nil {
- err = msgp.WrapError(err, "PartETags")
- return
- }
- if cap(z.PartETags) >= int(zb0006) {
- z.PartETags = (z.PartETags)[:zb0006]
- } else {
- z.PartETags = make([]string, zb0006)
- }
- for za0005 := range z.PartETags {
- z.PartETags[za0005], err = dc.ReadString()
+ if dc.IsNil() {
+ err = dc.ReadNil()
if err != nil {
- err = msgp.WrapError(err, "PartETags", za0005)
+ err = msgp.WrapError(err, "PartETags")
return
}
+ z.PartETags = nil
+ } else {
+ var zb0006 uint32
+ zb0006, err = dc.ReadArrayHeader()
+ if err != nil {
+ err = msgp.WrapError(err, "PartETags")
+ return
+ }
+ if cap(z.PartETags) >= int(zb0006) {
+ z.PartETags = (z.PartETags)[:zb0006]
+ } else {
+ z.PartETags = make([]string, zb0006)
+ }
+ for za0005 := range z.PartETags {
+ z.PartETags[za0005], err = dc.ReadString()
+ if err != nil {
+ err = msgp.WrapError(err, "PartETags", za0005)
+ return
+ }
+ }
}
case "PartSizes":
var zb0007 uint32
@@ -711,23 +908,32 @@ func (z *xlMetaV2Object) DecodeMsg(dc *msgp.Reader) (err error) {
}
}
case "PartASizes":
- var zb0008 uint32
- zb0008, err = dc.ReadArrayHeader()
- if err != nil {
- err = msgp.WrapError(err, "PartActualSizes")
- return
- }
- if cap(z.PartActualSizes) >= int(zb0008) {
- z.PartActualSizes = (z.PartActualSizes)[:zb0008]
- } else {
- z.PartActualSizes = make([]int64, zb0008)
- }
- for za0007 := range z.PartActualSizes {
- z.PartActualSizes[za0007], err = dc.ReadInt64()
+ if dc.IsNil() {
+ err = dc.ReadNil()
if err != nil {
- err = msgp.WrapError(err, "PartActualSizes", za0007)
+ err = msgp.WrapError(err, "PartActualSizes")
return
}
+ z.PartActualSizes = nil
+ } else {
+ var zb0008 uint32
+ zb0008, err = dc.ReadArrayHeader()
+ if err != nil {
+ err = msgp.WrapError(err, "PartActualSizes")
+ return
+ }
+ if cap(z.PartActualSizes) >= int(zb0008) {
+ z.PartActualSizes = (z.PartActualSizes)[:zb0008]
+ } else {
+ z.PartActualSizes = make([]int64, zb0008)
+ }
+ for za0007 := range z.PartActualSizes {
+ z.PartActualSizes[za0007], err = dc.ReadInt64()
+ if err != nil {
+ err = msgp.WrapError(err, "PartActualSizes", za0007)
+ return
+ }
+ }
}
case "Size":
z.Size, err = dc.ReadInt64()
@@ -742,64 +948,82 @@ func (z *xlMetaV2Object) DecodeMsg(dc *msgp.Reader) (err error) {
return
}
case "MetaSys":
- var zb0009 uint32
- zb0009, err = dc.ReadMapHeader()
- if err != nil {
- err = msgp.WrapError(err, "MetaSys")
- return
- }
- if z.MetaSys == nil {
- z.MetaSys = make(map[string][]byte, zb0009)
- } else if len(z.MetaSys) > 0 {
- for key := range z.MetaSys {
- delete(z.MetaSys, key)
- }
- }
- for zb0009 > 0 {
- zb0009--
- var za0008 string
- var za0009 []byte
- za0008, err = dc.ReadString()
+ if dc.IsNil() {
+ err = dc.ReadNil()
if err != nil {
err = msgp.WrapError(err, "MetaSys")
return
}
- za0009, err = dc.ReadBytes(za0009)
+ z.MetaSys = nil
+ } else {
+ var zb0009 uint32
+ zb0009, err = dc.ReadMapHeader()
if err != nil {
- err = msgp.WrapError(err, "MetaSys", za0008)
+ err = msgp.WrapError(err, "MetaSys")
return
}
- z.MetaSys[za0008] = za0009
- }
- case "MetaUsr":
- var zb0010 uint32
- zb0010, err = dc.ReadMapHeader()
- if err != nil {
- err = msgp.WrapError(err, "MetaUser")
- return
- }
- if z.MetaUser == nil {
- z.MetaUser = make(map[string]string, zb0010)
- } else if len(z.MetaUser) > 0 {
- for key := range z.MetaUser {
- delete(z.MetaUser, key)
+ if z.MetaSys == nil {
+ z.MetaSys = make(map[string][]byte, zb0009)
+ } else if len(z.MetaSys) > 0 {
+ for key := range z.MetaSys {
+ delete(z.MetaSys, key)
+ }
+ }
+ for zb0009 > 0 {
+ zb0009--
+ var za0008 string
+ var za0009 []byte
+ za0008, err = dc.ReadString()
+ if err != nil {
+ err = msgp.WrapError(err, "MetaSys")
+ return
+ }
+ za0009, err = dc.ReadBytes(za0009)
+ if err != nil {
+ err = msgp.WrapError(err, "MetaSys", za0008)
+ return
+ }
+ z.MetaSys[za0008] = za0009
}
}
- for zb0010 > 0 {
- zb0010--
- var za0010 string
- var za0011 string
- za0010, err = dc.ReadString()
+ case "MetaUsr":
+ if dc.IsNil() {
+ err = dc.ReadNil()
if err != nil {
err = msgp.WrapError(err, "MetaUser")
return
}
- za0011, err = dc.ReadString()
+ z.MetaUser = nil
+ } else {
+ var zb0010 uint32
+ zb0010, err = dc.ReadMapHeader()
if err != nil {
- err = msgp.WrapError(err, "MetaUser", za0010)
+ err = msgp.WrapError(err, "MetaUser")
return
}
- z.MetaUser[za0010] = za0011
+ if z.MetaUser == nil {
+ z.MetaUser = make(map[string]string, zb0010)
+ } else if len(z.MetaUser) > 0 {
+ for key := range z.MetaUser {
+ delete(z.MetaUser, key)
+ }
+ }
+ for zb0010 > 0 {
+ zb0010--
+ var za0010 string
+ var za0011 string
+ za0010, err = dc.ReadString()
+ if err != nil {
+ err = msgp.WrapError(err, "MetaUser")
+ return
+ }
+ za0011, err = dc.ReadString()
+ if err != nil {
+ err = msgp.WrapError(err, "MetaUser", za0010)
+ return
+ }
+ z.MetaUser[za0010] = za0011
+ }
}
default:
err = dc.Skip()
@@ -814,31 +1038,9 @@ func (z *xlMetaV2Object) DecodeMsg(dc *msgp.Reader) (err error) {
// EncodeMsg implements msgp.Encodable
func (z *xlMetaV2Object) EncodeMsg(en *msgp.Writer) (err error) {
- // omitempty: check for empty values
- zb0001Len := uint32(17)
- var zb0001Mask uint32 /* 17 bits */
- if z.PartActualSizes == nil {
- zb0001Len--
- zb0001Mask |= 0x1000
- }
- if z.MetaSys == nil {
- zb0001Len--
- zb0001Mask |= 0x8000
- }
- if z.MetaUser == nil {
- zb0001Len--
- zb0001Mask |= 0x10000
- }
- // variable map header, size zb0001Len
- err = en.WriteMapHeader(zb0001Len)
- if err != nil {
- return
- }
- if zb0001Len == 0 {
- return
- }
+ // map header, size 17
// write "ID"
- err = en.Append(0xa2, 0x49, 0x44)
+ err = en.Append(0xde, 0x0, 0x11, 0xa2, 0x49, 0x44)
if err != nil {
return
}
@@ -956,17 +1158,24 @@ func (z *xlMetaV2Object) EncodeMsg(en *msgp.Writer) (err error) {
if err != nil {
return
}
- err = en.WriteArrayHeader(uint32(len(z.PartETags)))
- if err != nil {
- err = msgp.WrapError(err, "PartETags")
- return
- }
- for za0005 := range z.PartETags {
- err = en.WriteString(z.PartETags[za0005])
+ if z.PartETags == nil { // allownil: if nil
+ err = en.WriteNil()
if err != nil {
- err = msgp.WrapError(err, "PartETags", za0005)
return
}
+ } else {
+ err = en.WriteArrayHeader(uint32(len(z.PartETags)))
+ if err != nil {
+ err = msgp.WrapError(err, "PartETags")
+ return
+ }
+ for za0005 := range z.PartETags {
+ err = en.WriteString(z.PartETags[za0005])
+ if err != nil {
+ err = msgp.WrapError(err, "PartETags", za0005)
+ return
+ }
+ }
}
// write "PartSizes"
err = en.Append(0xa9, 0x50, 0x61, 0x72, 0x74, 0x53, 0x69, 0x7a, 0x65, 0x73)
@@ -985,12 +1194,17 @@ func (z *xlMetaV2Object) EncodeMsg(en *msgp.Writer) (err error) {
return
}
}
- if (zb0001Mask & 0x1000) == 0 { // if not empty
- // write "PartASizes"
- err = en.Append(0xaa, 0x50, 0x61, 0x72, 0x74, 0x41, 0x53, 0x69, 0x7a, 0x65, 0x73)
+ // write "PartASizes"
+ err = en.Append(0xaa, 0x50, 0x61, 0x72, 0x74, 0x41, 0x53, 0x69, 0x7a, 0x65, 0x73)
+ if err != nil {
+ return
+ }
+ if z.PartActualSizes == nil { // allownil: if nil
+ err = en.WriteNil()
if err != nil {
return
}
+ } else {
err = en.WriteArrayHeader(uint32(len(z.PartActualSizes)))
if err != nil {
err = msgp.WrapError(err, "PartActualSizes")
@@ -1024,12 +1238,17 @@ func (z *xlMetaV2Object) EncodeMsg(en *msgp.Writer) (err error) {
err = msgp.WrapError(err, "ModTime")
return
}
- if (zb0001Mask & 0x8000) == 0 { // if not empty
- // write "MetaSys"
- err = en.Append(0xa7, 0x4d, 0x65, 0x74, 0x61, 0x53, 0x79, 0x73)
+ // write "MetaSys"
+ err = en.Append(0xa7, 0x4d, 0x65, 0x74, 0x61, 0x53, 0x79, 0x73)
+ if err != nil {
+ return
+ }
+ if z.MetaSys == nil { // allownil: if nil
+ err = en.WriteNil()
if err != nil {
return
}
+ } else {
err = en.WriteMapHeader(uint32(len(z.MetaSys)))
if err != nil {
err = msgp.WrapError(err, "MetaSys")
@@ -1048,12 +1267,17 @@ func (z *xlMetaV2Object) EncodeMsg(en *msgp.Writer) (err error) {
}
}
}
- if (zb0001Mask & 0x10000) == 0 { // if not empty
- // write "MetaUsr"
- err = en.Append(0xa7, 0x4d, 0x65, 0x74, 0x61, 0x55, 0x73, 0x72)
+ // write "MetaUsr"
+ err = en.Append(0xa7, 0x4d, 0x65, 0x74, 0x61, 0x55, 0x73, 0x72)
+ if err != nil {
+ return
+ }
+ if z.MetaUser == nil { // allownil: if nil
+ err = en.WriteNil()
if err != nil {
return
}
+ } else {
err = en.WriteMapHeader(uint32(len(z.MetaUser)))
if err != nil {
err = msgp.WrapError(err, "MetaUser")
@@ -1078,28 +1302,9 @@ func (z *xlMetaV2Object) EncodeMsg(en *msgp.Writer) (err error) {
// MarshalMsg implements msgp.Marshaler
func (z *xlMetaV2Object) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
- // omitempty: check for empty values
- zb0001Len := uint32(17)
- var zb0001Mask uint32 /* 17 bits */
- if z.PartActualSizes == nil {
- zb0001Len--
- zb0001Mask |= 0x1000
- }
- if z.MetaSys == nil {
- zb0001Len--
- zb0001Mask |= 0x8000
- }
- if z.MetaUser == nil {
- zb0001Len--
- zb0001Mask |= 0x10000
- }
- // variable map header, size zb0001Len
- o = msgp.AppendMapHeader(o, zb0001Len)
- if zb0001Len == 0 {
- return
- }
+ // map header, size 17
// string "ID"
- o = append(o, 0xa2, 0x49, 0x44)
+ o = append(o, 0xde, 0x0, 0x11, 0xa2, 0x49, 0x44)
o = msgp.AppendBytes(o, (z.VersionID)[:])
// string "DDir"
o = append(o, 0xa4, 0x44, 0x44, 0x69, 0x72)
@@ -1136,9 +1341,13 @@ func (z *xlMetaV2Object) MarshalMsg(b []byte) (o []byte, err error) {
}
// string "PartETags"
o = append(o, 0xa9, 0x50, 0x61, 0x72, 0x74, 0x45, 0x54, 0x61, 0x67, 0x73)
- o = msgp.AppendArrayHeader(o, uint32(len(z.PartETags)))
- for za0005 := range z.PartETags {
- o = msgp.AppendString(o, z.PartETags[za0005])
+ if z.PartETags == nil { // allownil: if nil
+ o = msgp.AppendNil(o)
+ } else {
+ o = msgp.AppendArrayHeader(o, uint32(len(z.PartETags)))
+ for za0005 := range z.PartETags {
+ o = msgp.AppendString(o, z.PartETags[za0005])
+ }
}
// string "PartSizes"
o = append(o, 0xa9, 0x50, 0x61, 0x72, 0x74, 0x53, 0x69, 0x7a, 0x65, 0x73)
@@ -1146,9 +1355,11 @@ func (z *xlMetaV2Object) MarshalMsg(b []byte) (o []byte, err error) {
for za0006 := range z.PartSizes {
o = msgp.AppendInt64(o, z.PartSizes[za0006])
}
- if (zb0001Mask & 0x1000) == 0 { // if not empty
- // string "PartASizes"
- o = append(o, 0xaa, 0x50, 0x61, 0x72, 0x74, 0x41, 0x53, 0x69, 0x7a, 0x65, 0x73)
+ // string "PartASizes"
+ o = append(o, 0xaa, 0x50, 0x61, 0x72, 0x74, 0x41, 0x53, 0x69, 0x7a, 0x65, 0x73)
+ if z.PartActualSizes == nil { // allownil: if nil
+ o = msgp.AppendNil(o)
+ } else {
o = msgp.AppendArrayHeader(o, uint32(len(z.PartActualSizes)))
for za0007 := range z.PartActualSizes {
o = msgp.AppendInt64(o, z.PartActualSizes[za0007])
@@ -1160,18 +1371,22 @@ func (z *xlMetaV2Object) MarshalMsg(b []byte) (o []byte, err error) {
// string "MTime"
o = append(o, 0xa5, 0x4d, 0x54, 0x69, 0x6d, 0x65)
o = msgp.AppendInt64(o, z.ModTime)
- if (zb0001Mask & 0x8000) == 0 { // if not empty
- // string "MetaSys"
- o = append(o, 0xa7, 0x4d, 0x65, 0x74, 0x61, 0x53, 0x79, 0x73)
+ // string "MetaSys"
+ o = append(o, 0xa7, 0x4d, 0x65, 0x74, 0x61, 0x53, 0x79, 0x73)
+ if z.MetaSys == nil { // allownil: if nil
+ o = msgp.AppendNil(o)
+ } else {
o = msgp.AppendMapHeader(o, uint32(len(z.MetaSys)))
for za0008, za0009 := range z.MetaSys {
o = msgp.AppendString(o, za0008)
o = msgp.AppendBytes(o, za0009)
}
}
- if (zb0001Mask & 0x10000) == 0 { // if not empty
- // string "MetaUsr"
- o = append(o, 0xa7, 0x4d, 0x65, 0x74, 0x61, 0x55, 0x73, 0x72)
+ // string "MetaUsr"
+ o = append(o, 0xa7, 0x4d, 0x65, 0x74, 0x61, 0x55, 0x73, 0x72)
+ if z.MetaUser == nil { // allownil: if nil
+ o = msgp.AppendNil(o)
+ } else {
o = msgp.AppendMapHeader(o, uint32(len(z.MetaUser)))
for za0010, za0011 := range z.MetaUser {
o = msgp.AppendString(o, za0010)
@@ -1294,23 +1509,28 @@ func (z *xlMetaV2Object) UnmarshalMsg(bts []byte) (o []byte, err error) {
}
}
case "PartETags":
- var zb0006 uint32
- zb0006, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "PartETags")
- return
- }
- if cap(z.PartETags) >= int(zb0006) {
- z.PartETags = (z.PartETags)[:zb0006]
+ if msgp.IsNil(bts) {
+ bts = bts[1:]
+ z.PartETags = nil
} else {
- z.PartETags = make([]string, zb0006)
- }
- for za0005 := range z.PartETags {
- z.PartETags[za0005], bts, err = msgp.ReadStringBytes(bts)
+ var zb0006 uint32
+ zb0006, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
- err = msgp.WrapError(err, "PartETags", za0005)
+ err = msgp.WrapError(err, "PartETags")
return
}
+ if cap(z.PartETags) >= int(zb0006) {
+ z.PartETags = (z.PartETags)[:zb0006]
+ } else {
+ z.PartETags = make([]string, zb0006)
+ }
+ for za0005 := range z.PartETags {
+ z.PartETags[za0005], bts, err = msgp.ReadStringBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "PartETags", za0005)
+ return
+ }
+ }
}
case "PartSizes":
var zb0007 uint32
@@ -1332,23 +1552,28 @@ func (z *xlMetaV2Object) UnmarshalMsg(bts []byte) (o []byte, err error) {
}
}
case "PartASizes":
- var zb0008 uint32
- zb0008, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "PartActualSizes")
- return
- }
- if cap(z.PartActualSizes) >= int(zb0008) {
- z.PartActualSizes = (z.PartActualSizes)[:zb0008]
+ if msgp.IsNil(bts) {
+ bts = bts[1:]
+ z.PartActualSizes = nil
} else {
- z.PartActualSizes = make([]int64, zb0008)
- }
- for za0007 := range z.PartActualSizes {
- z.PartActualSizes[za0007], bts, err = msgp.ReadInt64Bytes(bts)
+ var zb0008 uint32
+ zb0008, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
- err = msgp.WrapError(err, "PartActualSizes", za0007)
+ err = msgp.WrapError(err, "PartActualSizes")
return
}
+ if cap(z.PartActualSizes) >= int(zb0008) {
+ z.PartActualSizes = (z.PartActualSizes)[:zb0008]
+ } else {
+ z.PartActualSizes = make([]int64, zb0008)
+ }
+ for za0007 := range z.PartActualSizes {
+ z.PartActualSizes[za0007], bts, err = msgp.ReadInt64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "PartActualSizes", za0007)
+ return
+ }
+ }
}
case "Size":
z.Size, bts, err = msgp.ReadInt64Bytes(bts)
@@ -1363,64 +1588,74 @@ func (z *xlMetaV2Object) UnmarshalMsg(bts []byte) (o []byte, err error) {
return
}
case "MetaSys":
- var zb0009 uint32
- zb0009, bts, err = msgp.ReadMapHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "MetaSys")
- return
- }
- if z.MetaSys == nil {
- z.MetaSys = make(map[string][]byte, zb0009)
- } else if len(z.MetaSys) > 0 {
- for key := range z.MetaSys {
- delete(z.MetaSys, key)
- }
- }
- for zb0009 > 0 {
- var za0008 string
- var za0009 []byte
- zb0009--
- za0008, bts, err = msgp.ReadStringBytes(bts)
+ if msgp.IsNil(bts) {
+ bts = bts[1:]
+ z.MetaSys = nil
+ } else {
+ var zb0009 uint32
+ zb0009, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "MetaSys")
return
}
- za0009, bts, err = msgp.ReadBytesBytes(bts, za0009)
- if err != nil {
- err = msgp.WrapError(err, "MetaSys", za0008)
- return
+ if z.MetaSys == nil {
+ z.MetaSys = make(map[string][]byte, zb0009)
+ } else if len(z.MetaSys) > 0 {
+ for key := range z.MetaSys {
+ delete(z.MetaSys, key)
+ }
+ }
+ for zb0009 > 0 {
+ var za0008 string
+ var za0009 []byte
+ zb0009--
+ za0008, bts, err = msgp.ReadStringBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "MetaSys")
+ return
+ }
+ za0009, bts, err = msgp.ReadBytesBytes(bts, za0009)
+ if err != nil {
+ err = msgp.WrapError(err, "MetaSys", za0008)
+ return
+ }
+ z.MetaSys[za0008] = za0009
}
- z.MetaSys[za0008] = za0009
}
case "MetaUsr":
- var zb0010 uint32
- zb0010, bts, err = msgp.ReadMapHeaderBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "MetaUser")
- return
- }
- if z.MetaUser == nil {
- z.MetaUser = make(map[string]string, zb0010)
- } else if len(z.MetaUser) > 0 {
- for key := range z.MetaUser {
- delete(z.MetaUser, key)
- }
- }
- for zb0010 > 0 {
- var za0010 string
- var za0011 string
- zb0010--
- za0010, bts, err = msgp.ReadStringBytes(bts)
+ if msgp.IsNil(bts) {
+ bts = bts[1:]
+ z.MetaUser = nil
+ } else {
+ var zb0010 uint32
+ zb0010, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "MetaUser")
return
}
- za0011, bts, err = msgp.ReadStringBytes(bts)
- if err != nil {
- err = msgp.WrapError(err, "MetaUser", za0010)
- return
+ if z.MetaUser == nil {
+ z.MetaUser = make(map[string]string, zb0010)
+ } else if len(z.MetaUser) > 0 {
+ for key := range z.MetaUser {
+ delete(z.MetaUser, key)
+ }
+ }
+ for zb0010 > 0 {
+ var za0010 string
+ var za0011 string
+ zb0010--
+ za0010, bts, err = msgp.ReadStringBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "MetaUser")
+ return
+ }
+ za0011, bts, err = msgp.ReadStringBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "MetaUser", za0010)
+ return
+ }
+ z.MetaUser[za0010] = za0011
}
- z.MetaUser[za0010] = za0011
}
default:
bts, err = msgp.Skip(bts)
@@ -1826,3 +2061,154 @@ func (z *xlMetaV2Version) Msgsize() (s int) {
}
return
}
+
+// DecodeMsg implements msgp.Decodable
+func (z *xlMetaV2VersionHeader) DecodeMsg(dc *msgp.Reader) (err error) {
+ var zb0001 uint32
+ zb0001, err = dc.ReadArrayHeader()
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0001 != 5 {
+ err = msgp.ArrayError{Wanted: 5, Got: zb0001}
+ return
+ }
+ err = dc.ReadExactBytes((z.VersionID)[:])
+ if err != nil {
+ err = msgp.WrapError(err, "VersionID")
+ return
+ }
+ z.ModTime, err = dc.ReadInt64()
+ if err != nil {
+ err = msgp.WrapError(err, "ModTime")
+ return
+ }
+ err = dc.ReadExactBytes((z.Signature)[:])
+ if err != nil {
+ err = msgp.WrapError(err, "Signature")
+ return
+ }
+ {
+ var zb0002 uint8
+ zb0002, err = dc.ReadUint8()
+ if err != nil {
+ err = msgp.WrapError(err, "Type")
+ return
+ }
+ z.Type = VersionType(zb0002)
+ }
+ {
+ var zb0003 uint8
+ zb0003, err = dc.ReadUint8()
+ if err != nil {
+ err = msgp.WrapError(err, "Flags")
+ return
+ }
+ z.Flags = xlFlags(zb0003)
+ }
+ return
+}
+
+// EncodeMsg implements msgp.Encodable
+func (z *xlMetaV2VersionHeader) EncodeMsg(en *msgp.Writer) (err error) {
+ // array header, size 5
+ err = en.Append(0x95)
+ if err != nil {
+ return
+ }
+ err = en.WriteBytes((z.VersionID)[:])
+ if err != nil {
+ err = msgp.WrapError(err, "VersionID")
+ return
+ }
+ err = en.WriteInt64(z.ModTime)
+ if err != nil {
+ err = msgp.WrapError(err, "ModTime")
+ return
+ }
+ err = en.WriteBytes((z.Signature)[:])
+ if err != nil {
+ err = msgp.WrapError(err, "Signature")
+ return
+ }
+ err = en.WriteUint8(uint8(z.Type))
+ if err != nil {
+ err = msgp.WrapError(err, "Type")
+ return
+ }
+ err = en.WriteUint8(uint8(z.Flags))
+ if err != nil {
+ err = msgp.WrapError(err, "Flags")
+ return
+ }
+ return
+}
+
+// MarshalMsg implements msgp.Marshaler
+func (z *xlMetaV2VersionHeader) MarshalMsg(b []byte) (o []byte, err error) {
+ o = msgp.Require(b, z.Msgsize())
+ // array header, size 5
+ o = append(o, 0x95)
+ o = msgp.AppendBytes(o, (z.VersionID)[:])
+ o = msgp.AppendInt64(o, z.ModTime)
+ o = msgp.AppendBytes(o, (z.Signature)[:])
+ o = msgp.AppendUint8(o, uint8(z.Type))
+ o = msgp.AppendUint8(o, uint8(z.Flags))
+ return
+}
+
+// UnmarshalMsg implements msgp.Unmarshaler
+func (z *xlMetaV2VersionHeader) UnmarshalMsg(bts []byte) (o []byte, err error) {
+ var zb0001 uint32
+ zb0001, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0001 != 5 {
+ err = msgp.ArrayError{Wanted: 5, Got: zb0001}
+ return
+ }
+ bts, err = msgp.ReadExactBytes(bts, (z.VersionID)[:])
+ if err != nil {
+ err = msgp.WrapError(err, "VersionID")
+ return
+ }
+ z.ModTime, bts, err = msgp.ReadInt64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "ModTime")
+ return
+ }
+ bts, err = msgp.ReadExactBytes(bts, (z.Signature)[:])
+ if err != nil {
+ err = msgp.WrapError(err, "Signature")
+ return
+ }
+ {
+ var zb0002 uint8
+ zb0002, bts, err = msgp.ReadUint8Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Type")
+ return
+ }
+ z.Type = VersionType(zb0002)
+ }
+ {
+ var zb0003 uint8
+ zb0003, bts, err = msgp.ReadUint8Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Flags")
+ return
+ }
+ z.Flags = xlFlags(zb0003)
+ }
+ o = bts
+ return
+}
+
+// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
+func (z *xlMetaV2VersionHeader) Msgsize() (s int) {
+ s = 1 + msgp.ArrayHeaderSize + (16 * (msgp.ByteSize)) + msgp.Int64Size + msgp.ArrayHeaderSize + (4 * (msgp.ByteSize)) + msgp.Uint8Size + msgp.Uint8Size
+ return
+}
diff --git a/cmd/xl-storage-format-v2_gen_test.go b/cmd/xl-storage-format-v2_gen_test.go
index 39f03c898..afcc74dae 100644
--- a/cmd/xl-storage-format-v2_gen_test.go
+++ b/cmd/xl-storage-format-v2_gen_test.go
@@ -9,8 +9,8 @@ import (
"github.com/tinylib/msgp/msgp"
)
-func TestMarshalUnmarshalxlMetaV2(t *testing.T) {
- v := xlMetaV2{}
+func TestMarshalUnmarshalxlMetaDataDirDecoder(t *testing.T) {
+ v := xlMetaDataDirDecoder{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
@@ -32,8 +32,8 @@ func TestMarshalUnmarshalxlMetaV2(t *testing.T) {
}
}
-func BenchmarkMarshalMsgxlMetaV2(b *testing.B) {
- v := xlMetaV2{}
+func BenchmarkMarshalMsgxlMetaDataDirDecoder(b *testing.B) {
+ v := xlMetaDataDirDecoder{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
@@ -41,8 +41,8 @@ func BenchmarkMarshalMsgxlMetaV2(b *testing.B) {
}
}
-func BenchmarkAppendMsgxlMetaV2(b *testing.B) {
- v := xlMetaV2{}
+func BenchmarkAppendMsgxlMetaDataDirDecoder(b *testing.B) {
+ v := xlMetaDataDirDecoder{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
@@ -53,8 +53,8 @@ func BenchmarkAppendMsgxlMetaV2(b *testing.B) {
}
}
-func BenchmarkUnmarshalxlMetaV2(b *testing.B) {
- v := xlMetaV2{}
+func BenchmarkUnmarshalxlMetaDataDirDecoder(b *testing.B) {
+ v := xlMetaDataDirDecoder{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
@@ -67,17 +67,17 @@ func BenchmarkUnmarshalxlMetaV2(b *testing.B) {
}
}
-func TestEncodeDecodexlMetaV2(t *testing.T) {
- v := xlMetaV2{}
+func TestEncodeDecodexlMetaDataDirDecoder(t *testing.T) {
+ v := xlMetaDataDirDecoder{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
m := v.Msgsize()
if buf.Len() > m {
- t.Log("WARNING: TestEncodeDecodexlMetaV2 Msgsize() is inaccurate")
+ t.Log("WARNING: TestEncodeDecodexlMetaDataDirDecoder Msgsize() is inaccurate")
}
- vn := xlMetaV2{}
+ vn := xlMetaDataDirDecoder{}
err := msgp.Decode(&buf, &vn)
if err != nil {
t.Error(err)
@@ -91,8 +91,8 @@ func TestEncodeDecodexlMetaV2(t *testing.T) {
}
}
-func BenchmarkEncodexlMetaV2(b *testing.B) {
- v := xlMetaV2{}
+func BenchmarkEncodexlMetaDataDirDecoder(b *testing.B) {
+ v := xlMetaDataDirDecoder{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
@@ -105,8 +105,8 @@ func BenchmarkEncodexlMetaV2(b *testing.B) {
en.Flush()
}
-func BenchmarkDecodexlMetaV2(b *testing.B) {
- v := xlMetaV2{}
+func BenchmarkDecodexlMetaDataDirDecoder(b *testing.B) {
+ v := xlMetaDataDirDecoder{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
@@ -460,3 +460,116 @@ func BenchmarkDecodexlMetaV2Version(b *testing.B) {
}
}
}
+
+func TestMarshalUnmarshalxlMetaV2VersionHeader(t *testing.T) {
+ v := xlMetaV2VersionHeader{}
+ bts, err := v.MarshalMsg(nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ left, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
+ }
+
+ left, err = msgp.Skip(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
+ }
+}
+
+func BenchmarkMarshalMsgxlMetaV2VersionHeader(b *testing.B) {
+ v := xlMetaV2VersionHeader{}
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ v.MarshalMsg(nil)
+ }
+}
+
+func BenchmarkAppendMsgxlMetaV2VersionHeader(b *testing.B) {
+ v := xlMetaV2VersionHeader{}
+ bts := make([]byte, 0, v.Msgsize())
+ bts, _ = v.MarshalMsg(bts[0:0])
+ b.SetBytes(int64(len(bts)))
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ bts, _ = v.MarshalMsg(bts[0:0])
+ }
+}
+
+func BenchmarkUnmarshalxlMetaV2VersionHeader(b *testing.B) {
+ v := xlMetaV2VersionHeader{}
+ bts, _ := v.MarshalMsg(nil)
+ b.ReportAllocs()
+ b.SetBytes(int64(len(bts)))
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ _, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+}
+
+func TestEncodeDecodexlMetaV2VersionHeader(t *testing.T) {
+ v := xlMetaV2VersionHeader{}
+ var buf bytes.Buffer
+ msgp.Encode(&buf, &v)
+
+ m := v.Msgsize()
+ if buf.Len() > m {
+ t.Log("WARNING: TestEncodeDecodexlMetaV2VersionHeader Msgsize() is inaccurate")
+ }
+
+ vn := xlMetaV2VersionHeader{}
+ err := msgp.Decode(&buf, &vn)
+ if err != nil {
+ t.Error(err)
+ }
+
+ buf.Reset()
+ msgp.Encode(&buf, &v)
+ err = msgp.NewReader(&buf).Skip()
+ if err != nil {
+ t.Error(err)
+ }
+}
+
+func BenchmarkEncodexlMetaV2VersionHeader(b *testing.B) {
+ v := xlMetaV2VersionHeader{}
+ var buf bytes.Buffer
+ msgp.Encode(&buf, &v)
+ b.SetBytes(int64(buf.Len()))
+ en := msgp.NewWriter(msgp.Nowhere)
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ v.EncodeMsg(en)
+ }
+ en.Flush()
+}
+
+func BenchmarkDecodexlMetaV2VersionHeader(b *testing.B) {
+ v := xlMetaV2VersionHeader{}
+ var buf bytes.Buffer
+ msgp.Encode(&buf, &v)
+ b.SetBytes(int64(buf.Len()))
+ rd := msgp.NewEndlessReader(buf.Bytes(), b)
+ dc := msgp.NewReader(rd)
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ err := v.DecodeMsg(dc)
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+}
diff --git a/cmd/xl-storage-format-v2_string.go b/cmd/xl-storage-format-v2_string.go
new file mode 100644
index 000000000..8ebf4ace3
--- /dev/null
+++ b/cmd/xl-storage-format-v2_string.go
@@ -0,0 +1,27 @@
+// Code generated by "stringer -type VersionType -output=xl-storage-format-v2_string.go xl-storage-format-v2.go"; DO NOT EDIT.
+
+package cmd
+
+import "strconv"
+
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[invalidVersionType-0]
+ _ = x[ObjectType-1]
+ _ = x[DeleteType-2]
+ _ = x[LegacyType-3]
+ _ = x[lastVersionType-4]
+}
+
+const _VersionType_name = "invalidVersionTypeObjectTypeDeleteTypeLegacyTypelastVersionType"
+
+var _VersionType_index = [...]uint8{0, 18, 28, 38, 48, 63}
+
+func (i VersionType) String() string {
+ if i >= VersionType(len(_VersionType_index)-1) {
+ return "VersionType(" + strconv.FormatInt(int64(i), 10) + ")"
+ }
+ return _VersionType_name[_VersionType_index[i]:_VersionType_index[i+1]]
+}
diff --git a/cmd/xl-storage-format-v2_test.go b/cmd/xl-storage-format-v2_test.go
index 4b55a4132..0c8548562 100644
--- a/cmd/xl-storage-format-v2_test.go
+++ b/cmd/xl-storage-format-v2_test.go
@@ -19,12 +19,15 @@ package cmd
import (
"bytes"
+ "sort"
"testing"
"time"
"github.com/google/uuid"
+ "github.com/klauspost/compress/zstd"
"github.com/minio/minio/internal/bucket/lifecycle"
xhttp "github.com/minio/minio/internal/http"
+ "github.com/minio/minio/internal/ioutil"
)
func TestXLV2FormatData(t *testing.T) {
@@ -341,15 +344,17 @@ func TestDeleteVersionWithSharedDataDir(t *testing.T) {
}
}
fi.TransitionStatus = tc.transitionStatus
+ fi.ModTime = fi.ModTime.Add(time.Duration(i) * time.Second)
failOnErr(i+1, xl.AddVersion(fi))
fi.ExpireRestored = tc.expireRestored
fileInfos = append(fileInfos, fi)
}
for i, tc := range testCases {
- version := xl.Versions[i]
- if actual := xl.SharedDataDirCount(version.ObjectV2.VersionID, version.ObjectV2.DataDir); actual != tc.shares {
- t.Fatalf("Test %d: For %#v, expected sharers of data directory %d got %d", i+1, version.ObjectV2, tc.shares, actual)
+ _, version, err := xl.findVersion(uuid.MustParse(tc.versionID))
+ failOnErr(i+1, err)
+ if got := xl.SharedDataDirCount(version.getVersionID(), version.ObjectV2.DataDir); got != tc.shares {
+ t.Fatalf("Test %d: For %#v, expected sharers of data directory %d got %d", i+1, version.ObjectV2.VersionID, tc.shares, got)
}
}
@@ -366,3 +371,110 @@ func TestDeleteVersionWithSharedDataDir(t *testing.T) {
count++
}
}
+
+func Benchmark_xlMetaV2Shallow_Load(b *testing.B) {
+ data, err := ioutil.ReadFile("testdata/xl.meta-v1.2.zst")
+ if err != nil {
+ b.Fatal(err)
+ }
+ dec, _ := zstd.NewReader(nil)
+ data, err = dec.DecodeAll(data, nil)
+ if err != nil {
+ b.Fatal(err)
+ }
+
+ b.Run("legacy", func(b *testing.B) {
+ var xl xlMetaV2
+ b.ReportAllocs()
+ b.ResetTimer()
+ b.SetBytes(855) // number of versions...
+ for i := 0; i < b.N; i++ {
+ err = xl.Load(data)
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+ })
+ b.Run("indexed", func(b *testing.B) {
+ var xl xlMetaV2
+ err = xl.Load(data)
+ if err != nil {
+ b.Fatal(err)
+ }
+ data, err := xl.AppendTo(nil)
+ if err != nil {
+ b.Fatal(err)
+ }
+ b.ReportAllocs()
+ b.ResetTimer()
+ b.SetBytes(855) // number of versions...
+ for i := 0; i < b.N; i++ {
+ err = xl.Load(data)
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+ })
+
+}
+
+func Test_xlMetaV2Shallow_Load(t *testing.T) {
+ // Load Legacy
+ data, err := ioutil.ReadFile("testdata/xl.meta-v1.2.zst")
+ if err != nil {
+ t.Fatal(err)
+ }
+ dec, _ := zstd.NewReader(nil)
+ data, err = dec.DecodeAll(data, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ test := func(t *testing.T, xl *xlMetaV2) {
+ if len(xl.versions) != 855 {
+ t.Errorf("want %d versions, got %d", 855, len(xl.versions))
+ }
+ xl.sortByModTime()
+ if !sort.SliceIsSorted(xl.versions, func(i, j int) bool {
+ return xl.versions[i].header.ModTime > xl.versions[j].header.ModTime
+ }) {
+ t.Errorf("Contents not sorted")
+ }
+ for i := range xl.versions {
+ hdr := xl.versions[i].header
+ ver, err := xl.getIdx(i)
+ if err != nil {
+ t.Error(err)
+ continue
+ }
+ gotHdr := ver.header()
+ if hdr != gotHdr {
+ t.Errorf("Header does not match, index: %+v != meta: %+v", hdr, gotHdr)
+ }
+ }
+ }
+ t.Run("load-legacy", func(t *testing.T) {
+ var xl xlMetaV2
+ err = xl.Load(data)
+ if err != nil {
+ t.Fatal(err)
+ }
+ test(t, &xl)
+ })
+ t.Run("roundtrip", func(t *testing.T) {
+ var xl xlMetaV2
+ err = xl.Load(data)
+ if err != nil {
+ t.Fatal(err)
+ }
+ data, err = xl.AppendTo(nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ xl = xlMetaV2{}
+ err = xl.Load(data)
+ if err != nil {
+ t.Fatal(err)
+ }
+ test(t, &xl)
+ })
+}
diff --git a/cmd/xl-storage-format_test.go b/cmd/xl-storage-format_test.go
index 4fd621234..8f4f82f36 100644
--- a/cmd/xl-storage-format_test.go
+++ b/cmd/xl-storage-format_test.go
@@ -21,10 +21,14 @@ import (
"bytes"
"encoding/hex"
"encoding/json"
+ "fmt"
+ "math/rand"
"testing"
+ "time"
"github.com/dustin/go-humanize"
jsoniter "github.com/json-iterator/go"
+ xhttp "github.com/minio/minio/internal/http"
)
func TestIsXLMetaFormatValid(t *testing.T) {
@@ -317,3 +321,221 @@ func TestGetPartSizeFromIdx(t *testing.T) {
}
}
}
+
+func BenchmarkXlMetaV2Shallow(b *testing.B) {
+ fi := FileInfo{
+ Volume: "volume",
+ Name: "object-name",
+ VersionID: "756100c6-b393-4981-928a-d49bbc164741",
+ IsLatest: true,
+ Deleted: false,
+ TransitionStatus: "PENDING",
+ DataDir: "bffea160-ca7f-465f-98bc-9b4f1c3ba1ef",
+ XLV1: false,
+ ModTime: time.Now(),
+ Size: 1234456,
+ Mode: 0,
+ Metadata: map[string]string{
+ xhttp.AmzRestore: "FAILED",
+ xhttp.ContentMD5: mustGetUUID(),
+ xhttp.AmzBucketReplicationStatus: "PENDING",
+ xhttp.ContentType: "application/json",
+ },
+ Parts: []ObjectPartInfo{{
+ Number: 1,
+ Size: 1234345,
+ ActualSize: 1234345,
+ },
+ {
+ Number: 2,
+ Size: 1234345,
+ ActualSize: 1234345,
+ },
+ },
+ Erasure: ErasureInfo{
+ Algorithm: ReedSolomon.String(),
+ DataBlocks: 4,
+ ParityBlocks: 2,
+ BlockSize: 10000,
+ Index: 1,
+ Distribution: []int{1, 2, 3, 4, 5, 6, 7, 8},
+ Checksums: []ChecksumInfo{{
+ PartNumber: 1,
+ Algorithm: HighwayHash256S,
+ Hash: nil,
+ },
+ {
+ PartNumber: 2,
+ Algorithm: HighwayHash256S,
+ Hash: nil,
+ },
+ },
+ },
+ }
+ for _, size := range []int{1, 10, 1000, 100_000} {
+ b.Run(fmt.Sprint(size, "-versions"), func(b *testing.B) {
+ var xl xlMetaV2
+ ids := make([]string, size)
+ for i := 0; i < size; i++ {
+ fi.VersionID = mustGetUUID()
+ fi.DataDir = mustGetUUID()
+ ids[i] = fi.VersionID
+ fi.ModTime = fi.ModTime.Add(-time.Second)
+ xl.AddVersion(fi)
+ }
+ // Encode all. This is used for benchmarking.
+ enc, err := xl.AppendTo(nil)
+ if err != nil {
+ b.Fatal(err)
+ }
+ b.Logf("Serialized size: %d bytes", len(enc))
+ rng := rand.New(rand.NewSource(0))
+ var dump = make([]byte, len(enc))
+ b.Run("UpdateObjectVersion", func(b *testing.B) {
+ b.SetBytes(int64(size))
+ b.ResetTimer()
+ b.ReportAllocs()
+ for i := 0; i < b.N; i++ {
+ // Load...
+ xl = xlMetaV2{}
+ err := xl.Load(enc)
+ if err != nil {
+ b.Fatal(err)
+ }
+ // Update modtime for resorting...
+ fi.ModTime = fi.ModTime.Add(-time.Second)
+ // Update a random version.
+ fi.VersionID = ids[rng.Intn(size)]
+ // Update...
+ err = xl.UpdateObjectVersion(fi)
+ if err != nil {
+ b.Fatal(err)
+ }
+ // Save...
+ dump, err = xl.AppendTo(dump[:0])
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+ })
+ b.Run("DeleteVersion", func(b *testing.B) {
+ b.SetBytes(int64(size))
+ b.ResetTimer()
+ b.ReportAllocs()
+ for i := 0; i < b.N; i++ {
+ // Load...
+ xl = xlMetaV2{}
+ err := xl.Load(enc)
+ if err != nil {
+ b.Fatal(err)
+ }
+ // Update a random version.
+ fi.VersionID = ids[rng.Intn(size)]
+ // Delete...
+ _, _, err = xl.DeleteVersion(fi)
+ if err != nil {
+ b.Fatal(err)
+ }
+ // Save...
+ dump, err = xl.AppendTo(dump[:0])
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+ })
+ b.Run("AddVersion", func(b *testing.B) {
+ b.SetBytes(int64(size))
+ b.ResetTimer()
+ b.ReportAllocs()
+ for i := 0; i < b.N; i++ {
+ // Load...
+ xl = xlMetaV2{}
+ err := xl.Load(enc)
+ if err != nil {
+ b.Fatal(err)
+ }
+ // Update modtime for resorting...
+ fi.ModTime = fi.ModTime.Add(-time.Second)
+ // Update a random version.
+ fi.VersionID = mustGetUUID()
+ // Add...
+ err = xl.AddVersion(fi)
+ if err != nil {
+ b.Fatal(err)
+ }
+ // Save...
+ dump, err = xl.AppendTo(dump[:0])
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+ })
+ b.Run("ToFileInfo", func(b *testing.B) {
+ b.SetBytes(int64(size))
+ b.ResetTimer()
+ b.ReportAllocs()
+ for i := 0; i < b.N; i++ {
+ // Load...
+ xl = xlMetaV2{}
+ err := xl.Load(enc)
+ if err != nil {
+ b.Fatal(err)
+ }
+ // List...
+ _, err = xl.ToFileInfo("volume", "path", ids[rng.Intn(size)])
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+ })
+ b.Run("ListVersions", func(b *testing.B) {
+ b.SetBytes(int64(size))
+ b.ResetTimer()
+ b.ReportAllocs()
+ for i := 0; i < b.N; i++ {
+ // Load...
+ xl = xlMetaV2{}
+ err := xl.Load(enc)
+ if err != nil {
+ b.Fatal(err)
+ }
+ // List...
+ _, err = xl.ListVersions("volume", "path")
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+ })
+ b.Run("ToFileInfoNew", func(b *testing.B) {
+ b.SetBytes(int64(size))
+ b.ResetTimer()
+ b.ReportAllocs()
+ for i := 0; i < b.N; i++ {
+ buf, _ := isIndexedMetaV2(enc)
+ if buf == nil {
+ b.Fatal("buf == nil")
+ }
+ _, err = buf.ToFileInfo("volume", "path", ids[rng.Intn(size)])
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+ })
+ b.Run("ListVersionsNew", func(b *testing.B) {
+ b.SetBytes(int64(size))
+ b.ResetTimer()
+ b.ReportAllocs()
+ for i := 0; i < b.N; i++ {
+ buf, _ := isIndexedMetaV2(enc)
+ if buf == nil {
+ b.Fatal("buf == nil")
+ }
+ _, err = buf.ListVersions("volume", "path")
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+ })
+ })
+ }
+}
diff --git a/cmd/xl-storage-free-version.go b/cmd/xl-storage-free-version.go
index 44e55ffaa..3187595e1 100644
--- a/cmd/xl-storage-free-version.go
+++ b/cmd/xl-storage-free-version.go
@@ -75,7 +75,7 @@ func (j xlMetaV2Version) FreeVersion() bool {
// AddFreeVersion adds a free-version if needed for fi.VersionID version.
// Free-version will be added if fi.VersionID has transitioned.
-func (z *xlMetaV2) AddFreeVersion(fi FileInfo) error {
+func (x *xlMetaV2) AddFreeVersion(fi FileInfo) error {
var uv uuid.UUID
var err error
switch fi.VersionID {
@@ -87,19 +87,22 @@ func (z *xlMetaV2) AddFreeVersion(fi FileInfo) error {
}
}
- for _, version := range z.Versions {
- switch version.Type {
- case ObjectType:
- if version.ObjectV2.VersionID == uv {
- // if uv has tiered content we add a
- // free-version to track it for asynchronous
- // deletion via scanner.
- if freeVersion, toFree := version.ObjectV2.InitFreeVersion(fi); toFree {
- z.Versions = append(z.Versions, freeVersion)
- }
- return nil
- }
+ for i, version := range x.versions {
+ if version.header.VersionID != uv || version.header.Type != ObjectType {
+ continue
}
+ // if uv has tiered content we add a
+ // free-version to track it for asynchronous
+ // deletion via scanner.
+ ver, err := x.getIdx(i)
+ if err != nil {
+ return err
+ }
+
+ if freeVersion, toFree := ver.ObjectV2.InitFreeVersion(fi); toFree {
+ return x.addVersion(freeVersion)
+ }
+ return nil
}
return nil
}
diff --git a/cmd/xl-storage-free-version_test.go b/cmd/xl-storage-free-version_test.go
index 40e6d1e22..ea81b2c24 100644
--- a/cmd/xl-storage-free-version_test.go
+++ b/cmd/xl-storage-free-version_test.go
@@ -24,8 +24,8 @@ import (
"github.com/minio/minio/internal/bucket/lifecycle"
)
-func (z xlMetaV2) listFreeVersions(volume, path string) ([]FileInfo, error) {
- fivs, _, err := z.ListVersions(volume, path)
+func (x xlMetaV2) listFreeVersions(volume, path string) ([]FileInfo, error) {
+ fivs, err := x.ListVersions(volume, path)
if err != nil {
return nil, err
}
@@ -41,8 +41,21 @@ func (z xlMetaV2) listFreeVersions(volume, path string) ([]FileInfo, error) {
}
func TestFreeVersion(t *testing.T) {
+ fatalErr := func(err error) {
+ t.Helper()
+ if err != nil {
+ t.Fatal(err)
+ }
+ }
+
// Add a version with tiered content, one with local content
xl := xlMetaV2{}
+ counter := 1
+ report := func() {
+ t.Helper()
+ // t.Logf("versions (%d): len = %d", counter, len(xl.versions))
+ counter++
+ }
fi := FileInfo{
Volume: "volume",
Name: "object-name",
@@ -77,16 +90,21 @@ func TestFreeVersion(t *testing.T) {
SuccessorModTime: time.Time{},
}
// Add a version with local content
- xl.AddVersion(fi)
+ fatalErr(xl.AddVersion(fi))
+ report()
// Add null version with tiered content
tierfi := fi
tierfi.VersionID = ""
- xl.AddVersion(tierfi)
+ fatalErr(xl.AddVersion(tierfi))
+ report()
tierfi.TransitionStatus = lifecycle.TransitionComplete
tierfi.TransitionedObjName = mustGetUUID()
tierfi.TransitionTier = "MINIOTIER-1"
- xl.DeleteVersion(tierfi)
+ var err error
+ _, _, err = xl.DeleteVersion(tierfi)
+ fatalErr(err)
+ report()
fvIDs := []string{
"00000000-0000-0000-0000-0000000000f1",
@@ -95,15 +113,20 @@ func TestFreeVersion(t *testing.T) {
// Simulate overwrite of null version
newtierfi := tierfi
newtierfi.SetTierFreeVersionID(fvIDs[0])
- xl.AddFreeVersion(newtierfi)
- xl.AddVersion(newtierfi)
+ fatalErr(xl.AddFreeVersion(newtierfi))
+ report()
+ fatalErr(xl.AddVersion(newtierfi))
+ report()
// Simulate removal of null version
newtierfi.TransitionTier = ""
newtierfi.TransitionedObjName = ""
newtierfi.TransitionStatus = ""
newtierfi.SetTierFreeVersionID(fvIDs[1])
- xl.DeleteVersion(newtierfi)
+ report()
+ _, _, err = xl.DeleteVersion(newtierfi)
+ report()
+ fatalErr(err)
// Check number of free-versions
freeVersions, err := xl.listFreeVersions(newtierfi.Volume, newtierfi.Name)
@@ -118,8 +141,10 @@ func TestFreeVersion(t *testing.T) {
freefi := newtierfi
for _, fvID := range fvIDs {
freefi.VersionID = fvID
- xl.DeleteVersion(freefi)
+ _, _, err = xl.DeleteVersion(freefi)
+ fatalErr(err)
}
+ report()
// Check number of free-versions
freeVersions, err = xl.listFreeVersions(newtierfi.Volume, newtierfi.Name)
@@ -129,11 +154,13 @@ func TestFreeVersion(t *testing.T) {
if len(freeVersions) != 0 {
t.Fatalf("Expected zero free version but got %d", len(freeVersions))
}
+ report()
// Adding a free version to a version with no tiered content.
newfi := fi
newfi.SetTierFreeVersionID("00000000-0000-0000-0000-0000000000f3")
- xl.AddFreeVersion(newfi) // this shouldn't add a free-version
+ fatalErr(xl.AddFreeVersion(newfi)) // this shouldn't add a free-version
+ report()
// Check number of free-versions
freeVersions, err = xl.listFreeVersions(newtierfi.Volume, newtierfi.Name)
diff --git a/cmd/xl-storage-meta-inline.go b/cmd/xl-storage-meta-inline.go
new file mode 100644
index 000000000..d8259c555
--- /dev/null
+++ b/cmd/xl-storage-meta-inline.go
@@ -0,0 +1,408 @@
+// Copyright (c) 2015-2021 MinIO, Inc.
+//
+// This file is part of MinIO Object Storage stack
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with this program. If not, see .
+
+package cmd
+
+import (
+ "errors"
+ "fmt"
+
+ "github.com/minio/minio/internal/logger"
+ "github.com/tinylib/msgp/msgp"
+)
+
+// xlMetaInlineData is serialized data in [string][]byte pairs.
+type xlMetaInlineData []byte
+
+// xlMetaInlineDataVer indicates the version of the inline data structure.
+const xlMetaInlineDataVer = 1
+
+// versionOK returns whether the version is ok.
+func (x xlMetaInlineData) versionOK() bool {
+ if len(x) == 0 {
+ return true
+ }
+ return x[0] > 0 && x[0] <= xlMetaInlineDataVer
+}
+
+// afterVersion returns the payload after the version, if any.
+func (x xlMetaInlineData) afterVersion() []byte {
+ if len(x) == 0 {
+ return x
+ }
+ return x[1:]
+}
+
+// find the data with key s.
+// Returns nil if not for or an error occurs.
+func (x xlMetaInlineData) find(key string) []byte {
+ if len(x) == 0 || !x.versionOK() {
+ return nil
+ }
+ sz, buf, err := msgp.ReadMapHeaderBytes(x.afterVersion())
+ if err != nil || sz == 0 {
+ return nil
+ }
+ for i := uint32(0); i < sz; i++ {
+ var found []byte
+ found, buf, err = msgp.ReadMapKeyZC(buf)
+ if err != nil || sz == 0 {
+ return nil
+ }
+ if string(found) == key {
+ val, _, _ := msgp.ReadBytesZC(buf)
+ return val
+ }
+ // Skip it
+ _, buf, err = msgp.ReadBytesZC(buf)
+ if err != nil {
+ return nil
+ }
+ }
+ return nil
+}
+
+// validate checks if the data is valid.
+// It does not check integrity of the stored data.
+func (x xlMetaInlineData) validate() error {
+ if len(x) == 0 {
+ return nil
+ }
+
+ if !x.versionOK() {
+ return fmt.Errorf("xlMetaInlineData: unknown version 0x%x", x[0])
+ }
+
+ sz, buf, err := msgp.ReadMapHeaderBytes(x.afterVersion())
+ if err != nil {
+ return fmt.Errorf("xlMetaInlineData: %w", err)
+ }
+
+ for i := uint32(0); i < sz; i++ {
+ var key []byte
+ key, buf, err = msgp.ReadMapKeyZC(buf)
+ if err != nil {
+ return fmt.Errorf("xlMetaInlineData: %w", err)
+ }
+ if len(key) == 0 {
+ return fmt.Errorf("xlMetaInlineData: key %d is length 0", i)
+ }
+ _, buf, err = msgp.ReadBytesZC(buf)
+ if err != nil {
+ return fmt.Errorf("xlMetaInlineData: %w", err)
+ }
+ }
+
+ return nil
+}
+
+// repair will copy all seemingly valid data entries from a corrupted set.
+// This does not ensure that data is correct, but will allow all operations to complete.
+func (x *xlMetaInlineData) repair() {
+ data := *x
+ if len(data) == 0 {
+ return
+ }
+
+ if !data.versionOK() {
+ *x = nil
+ return
+ }
+
+ sz, buf, err := msgp.ReadMapHeaderBytes(data.afterVersion())
+ if err != nil {
+ *x = nil
+ return
+ }
+
+ // Remove all current data
+ keys := make([][]byte, 0, sz)
+ vals := make([][]byte, 0, sz)
+ for i := uint32(0); i < sz; i++ {
+ var key, val []byte
+ key, buf, err = msgp.ReadMapKeyZC(buf)
+ if err != nil {
+ break
+ }
+ if len(key) == 0 {
+ break
+ }
+ val, buf, err = msgp.ReadBytesZC(buf)
+ if err != nil {
+ break
+ }
+ keys = append(keys, key)
+ vals = append(vals, val)
+ }
+ x.serialize(-1, keys, vals)
+}
+
+// validate checks if the data is valid.
+// It does not check integrity of the stored data.
+func (x xlMetaInlineData) list() ([]string, error) {
+ if len(x) == 0 {
+ return nil, nil
+ }
+ if !x.versionOK() {
+ return nil, errors.New("xlMetaInlineData: unknown version")
+ }
+
+ sz, buf, err := msgp.ReadMapHeaderBytes(x.afterVersion())
+ if err != nil {
+ return nil, err
+ }
+ keys := make([]string, 0, sz)
+ for i := uint32(0); i < sz; i++ {
+ var key []byte
+ key, buf, err = msgp.ReadMapKeyZC(buf)
+ if err != nil {
+ return keys, err
+ }
+ if len(key) == 0 {
+ return keys, fmt.Errorf("xlMetaInlineData: key %d is length 0", i)
+ }
+ keys = append(keys, string(key))
+ // Skip data...
+ _, buf, err = msgp.ReadBytesZC(buf)
+ if err != nil {
+ return keys, err
+ }
+ }
+ return keys, nil
+}
+
+// serialize will serialize the provided keys and values.
+// The function will panic if keys/value slices aren't of equal length.
+// Payload size can give an indication of expected payload size.
+// If plSize is <= 0 it will be calculated.
+func (x *xlMetaInlineData) serialize(plSize int, keys [][]byte, vals [][]byte) {
+ if len(keys) != len(vals) {
+ panic(fmt.Errorf("xlMetaInlineData.serialize: keys/value number mismatch"))
+ }
+ if len(keys) == 0 {
+ *x = nil
+ return
+ }
+ if plSize <= 0 {
+ plSize = 1 + msgp.MapHeaderSize
+ for i := range keys {
+ plSize += len(keys[i]) + len(vals[i]) + msgp.StringPrefixSize + msgp.ArrayHeaderSize
+ }
+ }
+ payload := make([]byte, 1, plSize)
+ payload[0] = xlMetaInlineDataVer
+ payload = msgp.AppendMapHeader(payload, uint32(len(keys)))
+ for i := range keys {
+ payload = msgp.AppendStringFromBytes(payload, keys[i])
+ payload = msgp.AppendBytes(payload, vals[i])
+ }
+ *x = payload
+}
+
+// entries returns the number of entries in the data.
+func (x xlMetaInlineData) entries() int {
+ if len(x) == 0 || !x.versionOK() {
+ return 0
+ }
+ sz, _, _ := msgp.ReadMapHeaderBytes(x.afterVersion())
+ return int(sz)
+}
+
+// replace will add or replace a key/value pair.
+func (x *xlMetaInlineData) replace(key string, value []byte) {
+ in := x.afterVersion()
+ sz, buf, _ := msgp.ReadMapHeaderBytes(in)
+ keys := make([][]byte, 0, sz+1)
+ vals := make([][]byte, 0, sz+1)
+
+ // Version plus header...
+ plSize := 1 + msgp.MapHeaderSize
+ replaced := false
+ for i := uint32(0); i < sz; i++ {
+ var found, foundVal []byte
+ var err error
+ found, buf, err = msgp.ReadMapKeyZC(buf)
+ if err != nil {
+ break
+ }
+ foundVal, buf, err = msgp.ReadBytesZC(buf)
+ if err != nil {
+ break
+ }
+ plSize += len(found) + msgp.StringPrefixSize + msgp.ArrayHeaderSize
+ keys = append(keys, found)
+ if string(found) == key {
+ vals = append(vals, value)
+ plSize += len(value)
+ replaced = true
+ } else {
+ vals = append(vals, foundVal)
+ plSize += len(foundVal)
+ }
+ }
+
+ // Add one more.
+ if !replaced {
+ keys = append(keys, []byte(key))
+ vals = append(vals, value)
+ plSize += len(key) + len(value) + msgp.StringPrefixSize + msgp.ArrayHeaderSize
+ }
+
+ // Reserialize...
+ x.serialize(plSize, keys, vals)
+}
+
+// rename will rename a key.
+// Returns whether the key was found.
+func (x *xlMetaInlineData) rename(oldKey, newKey string) bool {
+ in := x.afterVersion()
+ sz, buf, _ := msgp.ReadMapHeaderBytes(in)
+ keys := make([][]byte, 0, sz)
+ vals := make([][]byte, 0, sz)
+
+ // Version plus header...
+ plSize := 1 + msgp.MapHeaderSize
+ found := false
+ for i := uint32(0); i < sz; i++ {
+ var foundKey, foundVal []byte
+ var err error
+ foundKey, buf, err = msgp.ReadMapKeyZC(buf)
+ if err != nil {
+ break
+ }
+ foundVal, buf, err = msgp.ReadBytesZC(buf)
+ if err != nil {
+ break
+ }
+ plSize += len(foundVal) + msgp.StringPrefixSize + msgp.ArrayHeaderSize
+ vals = append(vals, foundVal)
+ if string(foundKey) != oldKey {
+ keys = append(keys, foundKey)
+ plSize += len(foundKey)
+ } else {
+ keys = append(keys, []byte(newKey))
+ plSize += len(newKey)
+ found = true
+ }
+ }
+ // If not found, just return.
+ if !found {
+ return false
+ }
+
+ // Reserialize...
+ x.serialize(plSize, keys, vals)
+ return true
+}
+
+// remove will remove one or more keys.
+// Returns true if any key was found.
+func (x *xlMetaInlineData) remove(keys ...string) bool {
+ in := x.afterVersion()
+ sz, buf, _ := msgp.ReadMapHeaderBytes(in)
+ newKeys := make([][]byte, 0, sz)
+ newVals := make([][]byte, 0, sz)
+ var removeKey func(s []byte) bool
+
+ // Copy if big number of compares...
+ if len(keys) > 5 && sz > 5 {
+ mKeys := make(map[string]struct{}, len(keys))
+ for _, key := range keys {
+ mKeys[key] = struct{}{}
+ }
+ removeKey = func(s []byte) bool {
+ _, ok := mKeys[string(s)]
+ return ok
+ }
+ } else {
+ removeKey = func(s []byte) bool {
+ for _, key := range keys {
+ if key == string(s) {
+ return true
+ }
+ }
+ return false
+ }
+ }
+
+ // Version plus header...
+ plSize := 1 + msgp.MapHeaderSize
+ found := false
+ for i := uint32(0); i < sz; i++ {
+ var foundKey, foundVal []byte
+ var err error
+ foundKey, buf, err = msgp.ReadMapKeyZC(buf)
+ if err != nil {
+ break
+ }
+ foundVal, buf, err = msgp.ReadBytesZC(buf)
+ if err != nil {
+ break
+ }
+ if !removeKey(foundKey) {
+ plSize += msgp.StringPrefixSize + msgp.ArrayHeaderSize + len(foundKey) + len(foundVal)
+ newKeys = append(newKeys, foundKey)
+ newVals = append(newVals, foundVal)
+ } else {
+ found = true
+ }
+ }
+ // If not found, just return.
+ if !found {
+ return false
+ }
+ // If none left...
+ if len(newKeys) == 0 {
+ *x = nil
+ return true
+ }
+
+ // Reserialize...
+ x.serialize(plSize, newKeys, newVals)
+ return true
+}
+
+// xlMetaV2TrimData will trim any data from the metadata without unmarshalling it.
+// If any error occurs the unmodified data is returned.
+func xlMetaV2TrimData(buf []byte) []byte {
+ metaBuf, min, maj, err := checkXL2V1(buf)
+ if err != nil {
+ return buf
+ }
+ if maj == 1 && min < 1 {
+ // First version to carry data.
+ return buf
+ }
+ // Skip header
+ _, metaBuf, err = msgp.ReadBytesZC(metaBuf)
+ if err != nil {
+ logger.LogIf(GlobalContext, err)
+ return buf
+ }
+ // Skip CRC
+ if maj > 1 || min >= 2 {
+ _, metaBuf, err = msgp.ReadUint32Bytes(metaBuf)
+ logger.LogIf(GlobalContext, err)
+ }
+ // = input - current pos
+ ends := len(buf) - len(metaBuf)
+ if ends > len(buf) {
+ return buf
+ }
+
+ return buf[:ends]
+}
diff --git a/cmd/xl-storage.go b/cmd/xl-storage.go
index f8b352749..9f6c9b93d 100644
--- a/cmd/xl-storage.go
+++ b/cmd/xl-storage.go
@@ -978,7 +978,7 @@ func (s *xlStorage) DeleteVersion(ctx context.Context, volume, path string, fi F
}
var xlMeta xlMetaV2
- if err = xlMeta.Load(buf); err != nil {
+ if err := xlMeta.Load(buf); err != nil {
return err
}
@@ -1044,6 +1044,7 @@ func (s *xlStorage) UpdateMetadata(ctx context.Context, volume, path string, fi
}
return err
}
+ defer metaDataPoolPut(buf)
if !isXL2V1Format(buf) {
return errFileVersionNotFound
@@ -1059,12 +1060,13 @@ func (s *xlStorage) UpdateMetadata(ctx context.Context, volume, path string, fi
return err
}
- buf, err = xlMeta.AppendTo(nil)
+ wbuf, err := xlMeta.AppendTo(metaDataPoolGet())
if err != nil {
return err
}
+ defer metaDataPoolPut(wbuf)
- return s.WriteAll(ctx, volume, pathJoin(path, xlStorageFormatFile), buf)
+ return s.WriteAll(ctx, volume, pathJoin(path, xlStorageFormatFile), wbuf)
}
// WriteMetadata - writes FileInfo metadata for path at `xl.meta`
diff --git a/docs/debugging/xl-meta/main.go b/docs/debugging/xl-meta/main.go
index 8c5d6e8b8..55da4ab53 100644
--- a/docs/debugging/xl-meta/main.go
+++ b/docs/debugging/xl-meta/main.go
@@ -114,6 +114,48 @@ FLAGS:
return nil, err
}
data = b
+ case 3:
+ v, b, err := msgp.ReadBytesZC(b)
+ if err != nil {
+ return nil, err
+ }
+ if _, nbuf, err := msgp.ReadUint32Bytes(b); err == nil {
+ // Read metadata CRC (added in v2, ignore if not found)
+ b = nbuf
+ }
+
+ nVers, v, err := decodeXLHeaders(v)
+ if err != nil {
+ return nil, err
+ }
+ var versions = struct {
+ Versions []json.RawMessage
+ Headers []json.RawMessage
+ }{
+ Versions: make([]json.RawMessage, nVers),
+ Headers: make([]json.RawMessage, nVers),
+ }
+ err = decodeVersions(v, nVers, func(idx int, hdr, meta []byte) error {
+ var buf bytes.Buffer
+ if _, err := msgp.UnmarshalAsJSON(&buf, hdr); err != nil {
+ return err
+ }
+ versions.Headers[idx] = buf.Bytes()
+ buf = bytes.Buffer{}
+ if _, err := msgp.UnmarshalAsJSON(&buf, meta); err != nil {
+ return err
+ }
+ versions.Versions[idx] = buf.Bytes()
+ return nil
+ })
+ if err != nil {
+ return nil, err
+ }
+ enc := json.NewEncoder(buf)
+ if err := enc.Encode(versions); err != nil {
+ return nil, err
+ }
+ data = b
default:
return nil, fmt.Errorf("unknown metadata version %d", minor)
}
@@ -416,3 +458,54 @@ func (x xlMetaInlineData) files(fn func(name string, data []byte)) error {
return nil
}
+
+const (
+ xlHeaderVersion = 2
+ xlMetaVersion = 1
+)
+
+func decodeXLHeaders(buf []byte) (versions int, b []byte, err error) {
+ hdrVer, buf, err := msgp.ReadUintBytes(buf)
+ if err != nil {
+ return 0, buf, err
+ }
+ metaVer, buf, err := msgp.ReadUintBytes(buf)
+ if err != nil {
+ return 0, buf, err
+ }
+ if hdrVer > xlHeaderVersion {
+ return 0, buf, fmt.Errorf("decodeXLHeaders: Unknown xl header version %d", metaVer)
+ }
+ if metaVer > xlMetaVersion {
+ return 0, buf, fmt.Errorf("decodeXLHeaders: Unknown xl meta version %d", metaVer)
+ }
+ versions, buf, err = msgp.ReadIntBytes(buf)
+ if err != nil {
+ return 0, buf, err
+ }
+ if versions < 0 {
+ return 0, buf, fmt.Errorf("decodeXLHeaders: Negative version count %d", versions)
+ }
+ return versions, buf, nil
+}
+
+// decodeVersions will decode a number of versions from a buffer
+// and perform a callback for each version in order, newest first.
+// Any non-nil error is returned.
+func decodeVersions(buf []byte, versions int, fn func(idx int, hdr, meta []byte) error) (err error) {
+ var tHdr, tMeta []byte // Zero copy bytes
+ for i := 0; i < versions; i++ {
+ tHdr, buf, err = msgp.ReadBytesZC(buf)
+ if err != nil {
+ return err
+ }
+ tMeta, buf, err = msgp.ReadBytesZC(buf)
+ if err != nil {
+ return err
+ }
+ if err = fn(i, tHdr, tMeta); err != nil {
+ return err
+ }
+ }
+ return nil
+}
diff --git a/go.mod b/go.mod
index d00c79535..d7dc13b0c 100644
--- a/go.mod
+++ b/go.mod
@@ -81,6 +81,7 @@ require (
github.com/valyala/bytebufferpool v1.0.0
github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c
github.com/yargevad/filepathx v1.0.0
+ github.com/zeebo/xxh3 v1.0.0
go.etcd.io/etcd/api/v3 v3.5.0
go.etcd.io/etcd/client/v3 v3.5.0
go.uber.org/atomic v1.9.0
diff --git a/go.sum b/go.sum
index 0d7e987a4..ed747054f 100644
--- a/go.sum
+++ b/go.sum
@@ -1527,6 +1527,8 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
+github.com/zeebo/xxh3 v1.0.0 h1:6eLPZCVXpsGnhv8RiWBEJs5kenm2W1CMwon19/l8ODc=
+github.com/zeebo/xxh3 v1.0.0/go.mod h1:8VHV24/3AZLn3b6Mlp/KuC33LWH687Wq6EnziEB+rsA=
github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q=
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=