2021-04-18 15:41:13 -04:00
|
|
|
// Copyright (c) 2015-2021 MinIO, Inc.
|
|
|
|
//
|
|
|
|
// This file is part of MinIO Object Storage stack
|
|
|
|
//
|
|
|
|
// This program is free software: you can redistribute it and/or modify
|
|
|
|
// it under the terms of the GNU Affero General Public License as published by
|
|
|
|
// the Free Software Foundation, either version 3 of the License, or
|
|
|
|
// (at your option) any later version.
|
|
|
|
//
|
|
|
|
// This program is distributed in the hope that it will be useful
|
|
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
// GNU Affero General Public License for more details.
|
|
|
|
//
|
|
|
|
// You should have received a copy of the GNU Affero General Public License
|
|
|
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
2020-06-12 23:04:01 -04:00
|
|
|
|
|
|
|
package cmd
|
|
|
|
|
|
|
|
import (
|
|
|
|
"bytes"
|
2021-11-18 15:15:22 -05:00
|
|
|
"context"
|
2021-03-29 20:00:55 -04:00
|
|
|
"encoding/binary"
|
2021-11-18 15:15:22 -05:00
|
|
|
"encoding/hex"
|
2020-06-23 13:20:31 -04:00
|
|
|
"errors"
|
2020-06-12 23:04:01 -04:00
|
|
|
"fmt"
|
2021-05-21 12:10:54 -04:00
|
|
|
"io"
|
2021-09-18 16:31:35 -04:00
|
|
|
"net/http"
|
2020-09-09 21:11:24 -04:00
|
|
|
"sort"
|
2020-06-12 23:04:01 -04:00
|
|
|
"strings"
|
2021-08-12 17:27:22 -04:00
|
|
|
"sync"
|
2020-06-12 23:04:01 -04:00
|
|
|
"time"
|
|
|
|
|
2021-04-08 20:29:54 -04:00
|
|
|
"github.com/cespare/xxhash/v2"
|
2020-06-12 23:04:01 -04:00
|
|
|
"github.com/google/uuid"
|
2021-12-02 14:29:16 -05:00
|
|
|
jsoniter "github.com/json-iterator/go"
|
2021-06-01 17:59:40 -04:00
|
|
|
"github.com/minio/minio/internal/bucket/lifecycle"
|
2021-09-18 16:31:35 -04:00
|
|
|
"github.com/minio/minio/internal/bucket/replication"
|
2021-06-01 17:59:40 -04:00
|
|
|
xhttp "github.com/minio/minio/internal/http"
|
|
|
|
"github.com/minio/minio/internal/logger"
|
2021-04-08 20:29:54 -04:00
|
|
|
"github.com/tinylib/msgp/msgp"
|
2020-06-12 23:04:01 -04:00
|
|
|
)
|
|
|
|
|
|
|
|
var (
|
|
|
|
// XL header specifies the format
|
|
|
|
xlHeader = [4]byte{'X', 'L', '2', ' '}
|
|
|
|
|
2021-03-29 20:00:55 -04:00
|
|
|
// Current version being written.
|
|
|
|
xlVersionCurrent [4]byte
|
|
|
|
)
|
|
|
|
|
2021-11-18 15:15:22 -05:00
|
|
|
//go:generate msgp -file=$GOFILE -unexported
|
2021-12-02 14:29:16 -05:00
|
|
|
//go:generate stringer -type VersionType,ErasureAlgo -output=xl-storage-format-v2_string.go $GOFILE
|
2021-11-18 15:15:22 -05:00
|
|
|
|
2021-03-29 20:00:55 -04:00
|
|
|
const (
|
|
|
|
// Breaking changes.
|
|
|
|
// Newer versions cannot be read by older software.
|
|
|
|
// This will prevent downgrades to incompatible versions.
|
|
|
|
xlVersionMajor = 1
|
|
|
|
|
|
|
|
// Non breaking changes.
|
|
|
|
// Bumping this is informational, but should be done
|
|
|
|
// if any change is made to the data stored, bumping this
|
|
|
|
// will allow to detect the exact version later.
|
2021-11-18 15:15:22 -05:00
|
|
|
xlVersionMinor = 3
|
2020-06-12 23:04:01 -04:00
|
|
|
)
|
|
|
|
|
2021-03-29 20:00:55 -04:00
|
|
|
func init() {
|
|
|
|
binary.LittleEndian.PutUint16(xlVersionCurrent[0:2], xlVersionMajor)
|
|
|
|
binary.LittleEndian.PutUint16(xlVersionCurrent[2:4], xlVersionMinor)
|
|
|
|
}
|
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
// The []journal contains all the different versions of the object.
|
|
|
|
//
|
|
|
|
// This array can have 3 kinds of objects:
|
|
|
|
//
|
|
|
|
// ``object``: If the object is uploaded the usual way: putobject, multipart-put, copyobject
|
|
|
|
//
|
|
|
|
// ``delete``: This is the delete-marker
|
|
|
|
//
|
|
|
|
// ``legacyObject``: This is the legacy object in xlV1 format, preserved until its overwritten
|
|
|
|
//
|
|
|
|
// The most recently updated element in the array is considered the latest version.
|
|
|
|
|
2021-06-30 22:32:07 -04:00
|
|
|
// In addition to these we have a special kind called free-version. This is represented
|
|
|
|
// using a delete-marker and MetaSys entries. It's used to track tiered content of a
|
|
|
|
// deleted/overwritten version. This version is visible _only_to the scanner routine, for subsequent deletion.
|
|
|
|
// This kind of tracking is necessary since a version's tiered content is deleted asynchronously.
|
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
// Backend directory tree structure:
|
|
|
|
// disk1/
|
|
|
|
// └── bucket
|
|
|
|
// └── object
|
|
|
|
// ├── a192c1d5-9bd5-41fd-9a90-ab10e165398d
|
|
|
|
// │ └── part.1
|
|
|
|
// ├── c06e0436-f813-447e-ae5e-f2564df9dfd4
|
|
|
|
// │ └── part.1
|
|
|
|
// ├── df433928-2dcf-47b1-a786-43efa0f6b424
|
|
|
|
// │ └── part.1
|
|
|
|
// ├── legacy
|
|
|
|
// │ └── part.1
|
|
|
|
// └── xl.meta
|
|
|
|
|
|
|
|
// VersionType defines the type of journal type of the current entry.
|
|
|
|
type VersionType uint8
|
|
|
|
|
|
|
|
// List of different types of journal type
|
|
|
|
const (
|
|
|
|
invalidVersionType VersionType = 0
|
|
|
|
ObjectType VersionType = 1
|
|
|
|
DeleteType VersionType = 2
|
|
|
|
LegacyType VersionType = 3
|
|
|
|
lastVersionType VersionType = 4
|
|
|
|
)
|
|
|
|
|
|
|
|
func (e VersionType) valid() bool {
|
|
|
|
return e > invalidVersionType && e < lastVersionType
|
|
|
|
}
|
|
|
|
|
|
|
|
// ErasureAlgo defines common type of different erasure algorithms
|
|
|
|
type ErasureAlgo uint8
|
|
|
|
|
|
|
|
// List of currently supported erasure coding algorithms
|
|
|
|
const (
|
|
|
|
invalidErasureAlgo ErasureAlgo = 0
|
|
|
|
ReedSolomon ErasureAlgo = 1
|
|
|
|
lastErasureAlgo ErasureAlgo = 2
|
|
|
|
)
|
|
|
|
|
|
|
|
func (e ErasureAlgo) valid() bool {
|
|
|
|
return e > invalidErasureAlgo && e < lastErasureAlgo
|
|
|
|
}
|
|
|
|
|
|
|
|
// ChecksumAlgo defines common type of different checksum algorithms
|
|
|
|
type ChecksumAlgo uint8
|
|
|
|
|
|
|
|
// List of currently supported checksum algorithms
|
|
|
|
const (
|
|
|
|
invalidChecksumAlgo ChecksumAlgo = 0
|
|
|
|
HighwayHash ChecksumAlgo = 1
|
|
|
|
lastChecksumAlgo ChecksumAlgo = 2
|
|
|
|
)
|
|
|
|
|
|
|
|
func (e ChecksumAlgo) valid() bool {
|
|
|
|
return e > invalidChecksumAlgo && e < lastChecksumAlgo
|
|
|
|
}
|
|
|
|
|
|
|
|
// xlMetaV2DeleteMarker defines the data struct for the delete marker journal type
|
|
|
|
type xlMetaV2DeleteMarker struct {
|
2020-11-19 21:43:58 -05:00
|
|
|
VersionID [16]byte `json:"ID" msg:"ID"` // Version ID for delete marker
|
|
|
|
ModTime int64 `json:"MTime" msg:"MTime"` // Object delete marker modified time
|
|
|
|
MetaSys map[string][]byte `json:"MetaSys,omitempty" msg:"MetaSys,omitempty"` // Delete marker internal metadata
|
2020-06-12 23:04:01 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// xlMetaV2Object defines the data struct for object journal type
|
|
|
|
type xlMetaV2Object struct {
|
2021-11-18 15:15:22 -05:00
|
|
|
VersionID [16]byte `json:"ID" msg:"ID"` // Version ID
|
|
|
|
DataDir [16]byte `json:"DDir" msg:"DDir"` // Data dir ID
|
|
|
|
ErasureAlgorithm ErasureAlgo `json:"EcAlgo" msg:"EcAlgo"` // Erasure coding algorithm
|
|
|
|
ErasureM int `json:"EcM" msg:"EcM"` // Erasure data blocks
|
|
|
|
ErasureN int `json:"EcN" msg:"EcN"` // Erasure parity blocks
|
|
|
|
ErasureBlockSize int64 `json:"EcBSize" msg:"EcBSize"` // Erasure block size
|
|
|
|
ErasureIndex int `json:"EcIndex" msg:"EcIndex"` // Erasure disk index
|
|
|
|
ErasureDist []uint8 `json:"EcDist" msg:"EcDist"` // Erasure distribution
|
|
|
|
BitrotChecksumAlgo ChecksumAlgo `json:"CSumAlgo" msg:"CSumAlgo"` // Bitrot checksum algo
|
|
|
|
PartNumbers []int `json:"PartNums" msg:"PartNums"` // Part Numbers
|
|
|
|
PartETags []string `json:"PartETags" msg:"PartETags,allownil"` // Part ETags
|
|
|
|
PartSizes []int64 `json:"PartSizes" msg:"PartSizes"` // Part Sizes
|
|
|
|
PartActualSizes []int64 `json:"PartASizes,omitempty" msg:"PartASizes,allownil"` // Part ActualSizes (compression)
|
|
|
|
Size int64 `json:"Size" msg:"Size"` // Object version size
|
|
|
|
ModTime int64 `json:"MTime" msg:"MTime"` // Object version modified time
|
|
|
|
MetaSys map[string][]byte `json:"MetaSys,omitempty" msg:"MetaSys,allownil"` // Object version internal metadata
|
|
|
|
MetaUser map[string]string `json:"MetaUsr,omitempty" msg:"MetaUsr,allownil"` // Object version metadata set by user
|
2020-06-12 23:04:01 -04:00
|
|
|
}
|
|
|
|
|
2021-11-18 15:15:22 -05:00
|
|
|
// xlMetaV2Version describes the journal entry, Type defines
|
2020-06-12 23:04:01 -04:00
|
|
|
// the current journal entry type other types might be nil based
|
|
|
|
// on what Type field carries, it is imperative for the caller
|
|
|
|
// to verify which journal type first before accessing rest of the fields.
|
|
|
|
type xlMetaV2Version struct {
|
|
|
|
Type VersionType `json:"Type" msg:"Type"`
|
|
|
|
ObjectV1 *xlMetaV1Object `json:"V1Obj,omitempty" msg:"V1Obj,omitempty"`
|
|
|
|
ObjectV2 *xlMetaV2Object `json:"V2Obj,omitempty" msg:"V2Obj,omitempty"`
|
|
|
|
DeleteMarker *xlMetaV2DeleteMarker `json:"DelObj,omitempty" msg:"DelObj,omitempty"`
|
|
|
|
}
|
|
|
|
|
2021-11-18 15:15:22 -05:00
|
|
|
// xlFlags contains flags on the object.
|
|
|
|
// This can be extended up to 64 bits without breaking compatibility.
|
|
|
|
type xlFlags uint8
|
|
|
|
|
|
|
|
const (
|
|
|
|
xlFlagFreeVersion xlFlags = 1 << iota
|
|
|
|
xlFlagUsesDataDir
|
|
|
|
xlFlagInlineData
|
|
|
|
)
|
|
|
|
|
|
|
|
func (x xlFlags) String() string {
|
|
|
|
var s strings.Builder
|
|
|
|
if x&xlFlagFreeVersion != 0 {
|
|
|
|
s.WriteString("FreeVersion")
|
|
|
|
}
|
|
|
|
if x&xlFlagUsesDataDir != 0 {
|
|
|
|
if s.Len() > 0 {
|
|
|
|
s.WriteByte(',')
|
|
|
|
}
|
|
|
|
s.WriteString("UsesDD")
|
|
|
|
}
|
|
|
|
if x&xlFlagInlineData != 0 {
|
|
|
|
if s.Len() > 0 {
|
|
|
|
s.WriteByte(',')
|
|
|
|
}
|
|
|
|
s.WriteString("Inline")
|
|
|
|
}
|
|
|
|
return s.String()
|
|
|
|
}
|
|
|
|
|
|
|
|
// checkXL2V1 will check if the metadata has correct header and is a known major version.
|
|
|
|
// The remaining payload and versions are returned.
|
|
|
|
func checkXL2V1(buf []byte) (payload []byte, major, minor uint16, err error) {
|
|
|
|
if len(buf) <= 8 {
|
|
|
|
return payload, 0, 0, fmt.Errorf("xlMeta: no data")
|
|
|
|
}
|
|
|
|
|
|
|
|
if !bytes.Equal(buf[:4], xlHeader[:]) {
|
|
|
|
return payload, 0, 0, fmt.Errorf("xlMeta: unknown XLv2 header, expected %v, got %v", xlHeader[:4], buf[:4])
|
|
|
|
}
|
|
|
|
|
|
|
|
if bytes.Equal(buf[4:8], []byte("1 ")) {
|
|
|
|
// Set as 1,0.
|
|
|
|
major, minor = 1, 0
|
|
|
|
} else {
|
|
|
|
major, minor = binary.LittleEndian.Uint16(buf[4:6]), binary.LittleEndian.Uint16(buf[6:8])
|
|
|
|
}
|
|
|
|
if major > xlVersionMajor {
|
|
|
|
return buf[8:], major, minor, fmt.Errorf("xlMeta: unknown major version %d found", major)
|
|
|
|
}
|
|
|
|
|
|
|
|
return buf[8:], major, minor, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func isXL2V1Format(buf []byte) bool {
|
|
|
|
_, _, _, err := checkXL2V1(buf)
|
|
|
|
return err == nil
|
|
|
|
}
|
|
|
|
|
|
|
|
//msgp:tuple xlMetaV2VersionHeader
|
|
|
|
type xlMetaV2VersionHeader struct {
|
|
|
|
VersionID [16]byte
|
|
|
|
ModTime int64
|
|
|
|
Signature [4]byte
|
|
|
|
Type VersionType
|
|
|
|
Flags xlFlags
|
|
|
|
}
|
|
|
|
|
|
|
|
func (x xlMetaV2VersionHeader) String() string {
|
|
|
|
return fmt.Sprintf("Type: %s, VersionID: %s, Signature: %s, ModTime: %s, Flags: %s",
|
|
|
|
x.Type.String(),
|
|
|
|
hex.EncodeToString(x.VersionID[:]),
|
|
|
|
hex.EncodeToString(x.Signature[:]),
|
|
|
|
time.Unix(0, x.ModTime),
|
|
|
|
x.Flags.String(),
|
|
|
|
)
|
|
|
|
}
|
|
|
|
|
2021-12-02 14:29:16 -05:00
|
|
|
// matchesNotStrict returns whether x and o have both have non-zero version,
|
|
|
|
// their versions match and their type match.
|
|
|
|
func (x xlMetaV2VersionHeader) matchesNotStrict(o xlMetaV2VersionHeader) bool {
|
|
|
|
return x.VersionID != [16]byte{} &&
|
|
|
|
x.VersionID == o.VersionID &&
|
|
|
|
x.Type == o.Type
|
|
|
|
}
|
|
|
|
|
|
|
|
// sortsBefore can be used as a tiebreaker for stable sorting/selecting.
|
|
|
|
// Returns false on ties.
|
|
|
|
func (x xlMetaV2VersionHeader) sortsBefore(o xlMetaV2VersionHeader) bool {
|
|
|
|
if x == o {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
// Prefer newest modtime.
|
|
|
|
if x.ModTime != o.ModTime {
|
|
|
|
return x.ModTime > o.ModTime
|
|
|
|
}
|
|
|
|
|
|
|
|
// The following doesn't make too much sense, but we want sort to be consistent nonetheless.
|
|
|
|
// Prefer lower types
|
|
|
|
if x.Type != o.Type {
|
|
|
|
return x.Type < o.Type
|
|
|
|
}
|
|
|
|
// Consistent sort on signature
|
|
|
|
if v := bytes.Compare(x.Signature[:], o.Signature[:]); v != 0 {
|
|
|
|
return v > 0
|
|
|
|
}
|
|
|
|
// On ID mismatch
|
|
|
|
if v := bytes.Compare(x.VersionID[:], o.VersionID[:]); v != 0 {
|
|
|
|
return v > 0
|
|
|
|
}
|
|
|
|
// Flags
|
|
|
|
if x.Flags != o.Flags {
|
|
|
|
return x.Flags > o.Flags
|
|
|
|
}
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
// Valid xl meta xlMetaV2Version is valid
|
|
|
|
func (j xlMetaV2Version) Valid() bool {
|
2021-05-24 12:28:19 -04:00
|
|
|
if !j.Type.valid() {
|
|
|
|
return false
|
|
|
|
}
|
2020-06-12 23:04:01 -04:00
|
|
|
switch j.Type {
|
|
|
|
case LegacyType:
|
2020-06-22 17:33:58 -04:00
|
|
|
return j.ObjectV1 != nil &&
|
|
|
|
j.ObjectV1.valid()
|
2020-06-12 23:04:01 -04:00
|
|
|
case ObjectType:
|
|
|
|
return j.ObjectV2 != nil &&
|
|
|
|
j.ObjectV2.ErasureAlgorithm.valid() &&
|
|
|
|
j.ObjectV2.BitrotChecksumAlgo.valid() &&
|
2020-06-22 17:33:58 -04:00
|
|
|
isXLMetaErasureInfoValid(j.ObjectV2.ErasureM, j.ObjectV2.ErasureN) &&
|
2020-07-04 15:25:53 -04:00
|
|
|
j.ObjectV2.ModTime > 0
|
2020-06-12 23:04:01 -04:00
|
|
|
case DeleteType:
|
2020-06-22 17:33:58 -04:00
|
|
|
return j.DeleteMarker != nil &&
|
|
|
|
j.DeleteMarker.ModTime > 0
|
2020-06-12 23:04:01 -04:00
|
|
|
}
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2021-11-18 15:15:22 -05:00
|
|
|
// header will return a shallow header of the version.
|
|
|
|
func (j *xlMetaV2Version) header() xlMetaV2VersionHeader {
|
|
|
|
var flags xlFlags
|
|
|
|
if j.FreeVersion() {
|
|
|
|
flags |= xlFlagFreeVersion
|
|
|
|
}
|
|
|
|
if j.Type == ObjectType && j.ObjectV2.UsesDataDir() {
|
|
|
|
flags |= xlFlagUsesDataDir
|
|
|
|
}
|
|
|
|
if j.Type == ObjectType && j.ObjectV2.InlineData() {
|
|
|
|
flags |= xlFlagInlineData
|
|
|
|
}
|
|
|
|
return xlMetaV2VersionHeader{
|
|
|
|
VersionID: j.getVersionID(),
|
|
|
|
ModTime: j.getModTime().UnixNano(),
|
|
|
|
Signature: j.getSignature(),
|
|
|
|
Type: j.Type,
|
|
|
|
Flags: flags,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// FreeVersion returns true if x represents a free-version, false otherwise.
|
|
|
|
func (x xlMetaV2VersionHeader) FreeVersion() bool {
|
|
|
|
return x.Flags&xlFlagFreeVersion != 0
|
|
|
|
}
|
|
|
|
|
|
|
|
// UsesDataDir returns true if this object version uses its data directory for
|
|
|
|
// its contents and false otherwise.
|
|
|
|
func (x xlMetaV2VersionHeader) UsesDataDir() bool {
|
|
|
|
return x.Flags&xlFlagUsesDataDir != 0
|
|
|
|
}
|
|
|
|
|
|
|
|
// InlineData returns whether inline data has been set.
|
|
|
|
// Note that false does not mean there is no inline data,
|
|
|
|
// only that it is unlikely.
|
|
|
|
func (x xlMetaV2VersionHeader) InlineData() bool {
|
|
|
|
return x.Flags&xlFlagInlineData != 0
|
|
|
|
}
|
|
|
|
|
|
|
|
// signatureErr is a signature returned when an error occurs.
|
|
|
|
var signatureErr = [4]byte{'e', 'r', 'r', 0}
|
|
|
|
|
|
|
|
// getSignature will return a signature that is expected to be the same across all disks.
|
|
|
|
func (j xlMetaV2Version) getSignature() [4]byte {
|
|
|
|
switch j.Type {
|
|
|
|
case ObjectType:
|
|
|
|
return j.ObjectV2.Signature()
|
|
|
|
case DeleteType:
|
|
|
|
return j.DeleteMarker.Signature()
|
|
|
|
case LegacyType:
|
|
|
|
return j.ObjectV1.Signature()
|
|
|
|
}
|
|
|
|
return signatureErr
|
|
|
|
}
|
|
|
|
|
2021-09-03 15:31:32 -04:00
|
|
|
// getModTime will return the ModTime of the underlying version.
|
|
|
|
func (j xlMetaV2Version) getModTime() time.Time {
|
|
|
|
switch j.Type {
|
|
|
|
case ObjectType:
|
|
|
|
return time.Unix(0, j.ObjectV2.ModTime)
|
|
|
|
case DeleteType:
|
|
|
|
return time.Unix(0, j.DeleteMarker.ModTime)
|
|
|
|
case LegacyType:
|
|
|
|
return j.ObjectV1.Stat.ModTime
|
|
|
|
}
|
|
|
|
return time.Time{}
|
|
|
|
}
|
|
|
|
|
2021-11-18 15:15:22 -05:00
|
|
|
// getVersionID will return the versionID of the underlying version.
|
|
|
|
func (j xlMetaV2Version) getVersionID() [16]byte {
|
|
|
|
switch j.Type {
|
|
|
|
case ObjectType:
|
|
|
|
return j.ObjectV2.VersionID
|
|
|
|
case DeleteType:
|
|
|
|
return j.DeleteMarker.VersionID
|
|
|
|
case LegacyType:
|
|
|
|
return [16]byte{}
|
2021-03-29 20:00:55 -04:00
|
|
|
}
|
2021-11-18 15:15:22 -05:00
|
|
|
return [16]byte{}
|
2021-03-29 20:00:55 -04:00
|
|
|
}
|
|
|
|
|
2021-12-02 14:29:16 -05:00
|
|
|
// ToFileInfo returns FileInfo of the underlying type.
|
2021-11-18 15:15:22 -05:00
|
|
|
func (j *xlMetaV2Version) ToFileInfo(volume, path string) (FileInfo, error) {
|
|
|
|
switch j.Type {
|
|
|
|
case ObjectType:
|
|
|
|
return j.ObjectV2.ToFileInfo(volume, path)
|
|
|
|
case DeleteType:
|
|
|
|
return j.DeleteMarker.ToFileInfo(volume, path)
|
|
|
|
case LegacyType:
|
|
|
|
return j.ObjectV1.ToFileInfo(volume, path)
|
2021-03-29 20:00:55 -04:00
|
|
|
}
|
2021-11-18 15:15:22 -05:00
|
|
|
return FileInfo{}, errFileNotFound
|
2021-03-29 20:00:55 -04:00
|
|
|
}
|
|
|
|
|
2021-11-18 15:15:22 -05:00
|
|
|
const (
|
|
|
|
xlHeaderVersion = 2
|
|
|
|
xlMetaVersion = 1
|
|
|
|
)
|
2021-04-03 12:03:42 -04:00
|
|
|
|
2021-11-18 15:15:22 -05:00
|
|
|
func (j xlMetaV2DeleteMarker) ToFileInfo(volume, path string) (FileInfo, error) {
|
|
|
|
versionID := ""
|
|
|
|
var uv uuid.UUID
|
|
|
|
// check if the version is not "null"
|
|
|
|
if j.VersionID != uv {
|
|
|
|
versionID = uuid.UUID(j.VersionID).String()
|
2021-03-29 20:00:55 -04:00
|
|
|
}
|
2021-11-18 15:15:22 -05:00
|
|
|
fi := FileInfo{
|
|
|
|
Volume: volume,
|
|
|
|
Name: path,
|
|
|
|
ModTime: time.Unix(0, j.ModTime).UTC(),
|
|
|
|
VersionID: versionID,
|
|
|
|
Deleted: true,
|
2021-03-29 20:00:55 -04:00
|
|
|
}
|
2021-11-18 15:15:22 -05:00
|
|
|
fi.ReplicationState = GetInternalReplicationState(j.MetaSys)
|
2021-04-03 12:03:42 -04:00
|
|
|
|
2021-11-18 15:15:22 -05:00
|
|
|
if j.FreeVersion() {
|
|
|
|
fi.SetTierFreeVersion()
|
|
|
|
fi.TransitionTier = string(j.MetaSys[ReservedMetadataPrefixLower+TransitionTier])
|
|
|
|
fi.TransitionedObjName = string(j.MetaSys[ReservedMetadataPrefixLower+TransitionedObjectName])
|
|
|
|
fi.TransitionVersionID = string(j.MetaSys[ReservedMetadataPrefixLower+TransitionedVersionID])
|
2021-03-29 20:00:55 -04:00
|
|
|
}
|
2021-04-03 12:03:42 -04:00
|
|
|
|
2021-11-18 15:15:22 -05:00
|
|
|
return fi, nil
|
2021-03-29 20:00:55 -04:00
|
|
|
}
|
|
|
|
|
2021-11-18 15:15:22 -05:00
|
|
|
// Signature will return a signature that is expected to be the same across all disks.
|
|
|
|
func (j *xlMetaV2DeleteMarker) Signature() [4]byte {
|
|
|
|
// Shallow copy
|
|
|
|
c := *j
|
2021-04-08 20:29:54 -04:00
|
|
|
|
2021-11-18 15:15:22 -05:00
|
|
|
// Marshal metadata
|
|
|
|
crc := hashDeterministicBytes(c.MetaSys)
|
|
|
|
c.MetaSys = nil
|
|
|
|
if bts, err := c.MarshalMsg(metaDataPoolGet()); err == nil {
|
|
|
|
crc ^= xxhash.Sum64(bts)
|
|
|
|
metaDataPoolPut(bts)
|
2021-04-08 20:29:54 -04:00
|
|
|
}
|
|
|
|
|
2021-11-18 15:15:22 -05:00
|
|
|
// Combine upper and lower part
|
|
|
|
var tmp [4]byte
|
|
|
|
binary.LittleEndian.PutUint32(tmp[:], uint32(crc^(crc>>32)))
|
|
|
|
return tmp
|
2021-04-08 20:29:54 -04:00
|
|
|
}
|
|
|
|
|
2021-11-18 15:15:22 -05:00
|
|
|
// UsesDataDir returns true if this object version uses its data directory for
|
|
|
|
// its contents and false otherwise.
|
|
|
|
func (j xlMetaV2Object) UsesDataDir() bool {
|
|
|
|
// Skip if this version is not transitioned, i.e it uses its data directory.
|
|
|
|
if !bytes.Equal(j.MetaSys[ReservedMetadataPrefixLower+TransitionStatus], []byte(lifecycle.TransitionComplete)) {
|
|
|
|
return true
|
2021-03-29 20:00:55 -04:00
|
|
|
}
|
|
|
|
|
2021-11-18 15:15:22 -05:00
|
|
|
// Check if this transitioned object has been restored on disk.
|
|
|
|
return isRestoredObjectOnDisk(j.MetaUser)
|
2021-03-29 20:00:55 -04:00
|
|
|
}
|
|
|
|
|
2021-11-18 15:15:22 -05:00
|
|
|
// InlineData returns whether inline data has been set.
|
|
|
|
// Note that false does not mean there is no inline data,
|
|
|
|
// only that it is unlikely.
|
|
|
|
func (j xlMetaV2Object) InlineData() bool {
|
|
|
|
_, ok := j.MetaSys[ReservedMetadataPrefixLower+"inline-data"]
|
|
|
|
return ok
|
2021-04-08 20:29:54 -04:00
|
|
|
}
|
|
|
|
|
2021-11-18 15:15:22 -05:00
|
|
|
func (j *xlMetaV2Object) SetTransition(fi FileInfo) {
|
|
|
|
j.MetaSys[ReservedMetadataPrefixLower+TransitionStatus] = []byte(fi.TransitionStatus)
|
|
|
|
j.MetaSys[ReservedMetadataPrefixLower+TransitionedObjectName] = []byte(fi.TransitionedObjName)
|
|
|
|
j.MetaSys[ReservedMetadataPrefixLower+TransitionedVersionID] = []byte(fi.TransitionVersionID)
|
|
|
|
j.MetaSys[ReservedMetadataPrefixLower+TransitionTier] = []byte(fi.TransitionTier)
|
2021-03-29 20:00:55 -04:00
|
|
|
}
|
|
|
|
|
2021-11-18 15:15:22 -05:00
|
|
|
func (j *xlMetaV2Object) RemoveRestoreHdrs() {
|
|
|
|
delete(j.MetaUser, xhttp.AmzRestore)
|
|
|
|
delete(j.MetaUser, xhttp.AmzRestoreExpiryDays)
|
|
|
|
delete(j.MetaUser, xhttp.AmzRestoreRequestDate)
|
2021-03-29 20:00:55 -04:00
|
|
|
}
|
|
|
|
|
2021-11-18 15:15:22 -05:00
|
|
|
// Signature will return a signature that is expected to be the same across all disks.
|
|
|
|
func (j *xlMetaV2Object) Signature() [4]byte {
|
|
|
|
// Shallow copy
|
|
|
|
c := *j
|
|
|
|
// Zero fields that will vary across disks
|
|
|
|
c.ErasureIndex = 0
|
2021-03-29 20:00:55 -04:00
|
|
|
|
2021-11-18 15:15:22 -05:00
|
|
|
// Nil 0 size allownil, so we don't differentiate between nil and 0 len.
|
|
|
|
if len(c.PartETags) == 0 {
|
|
|
|
c.PartETags = nil
|
2021-03-29 20:00:55 -04:00
|
|
|
}
|
2021-11-18 15:15:22 -05:00
|
|
|
if len(c.PartActualSizes) == 0 {
|
|
|
|
c.PartActualSizes = nil
|
2021-03-29 20:00:55 -04:00
|
|
|
}
|
|
|
|
|
2021-11-18 15:15:22 -05:00
|
|
|
// Get a 64 bit CRC
|
|
|
|
crc := hashDeterministicString(c.MetaUser)
|
|
|
|
crc ^= hashDeterministicBytes(c.MetaSys)
|
2021-03-29 20:00:55 -04:00
|
|
|
|
2021-11-18 15:15:22 -05:00
|
|
|
// Nil fields.
|
|
|
|
c.MetaSys = nil
|
|
|
|
c.MetaUser = nil
|
2021-03-29 20:00:55 -04:00
|
|
|
|
2021-11-18 15:15:22 -05:00
|
|
|
if bts, err := c.MarshalMsg(metaDataPoolGet()); err == nil {
|
|
|
|
crc ^= xxhash.Sum64(bts)
|
|
|
|
metaDataPoolPut(bts)
|
2021-03-29 20:00:55 -04:00
|
|
|
}
|
|
|
|
|
2021-11-18 15:15:22 -05:00
|
|
|
// Combine upper and lower part
|
|
|
|
var tmp [4]byte
|
|
|
|
binary.LittleEndian.PutUint32(tmp[:], uint32(crc^(crc>>32)))
|
|
|
|
return tmp
|
2021-07-16 12:38:27 -04:00
|
|
|
}
|
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
func (j xlMetaV2Object) ToFileInfo(volume, path string) (FileInfo, error) {
|
|
|
|
versionID := ""
|
|
|
|
var uv uuid.UUID
|
|
|
|
// check if the version is not "null"
|
2021-05-07 12:11:05 -04:00
|
|
|
if j.VersionID != uv {
|
2020-06-12 23:04:01 -04:00
|
|
|
versionID = uuid.UUID(j.VersionID).String()
|
|
|
|
}
|
|
|
|
fi := FileInfo{
|
|
|
|
Volume: volume,
|
|
|
|
Name: path,
|
2020-07-04 15:25:53 -04:00
|
|
|
Size: j.Size,
|
|
|
|
ModTime: time.Unix(0, j.ModTime).UTC(),
|
2020-06-12 23:04:01 -04:00
|
|
|
VersionID: versionID,
|
|
|
|
}
|
|
|
|
fi.Parts = make([]ObjectPartInfo, len(j.PartNumbers))
|
|
|
|
for i := range fi.Parts {
|
2020-08-24 15:11:20 -04:00
|
|
|
fi.Parts[i].Number = j.PartNumbers[i]
|
|
|
|
fi.Parts[i].Size = j.PartSizes[i]
|
2021-11-18 15:15:22 -05:00
|
|
|
if len(j.PartETags) > 0 {
|
|
|
|
fi.Parts[i].ETag = j.PartETags[i]
|
|
|
|
}
|
2020-08-24 15:11:20 -04:00
|
|
|
fi.Parts[i].ActualSize = j.PartActualSizes[i]
|
2020-06-12 23:04:01 -04:00
|
|
|
}
|
|
|
|
fi.Erasure.Checksums = make([]ChecksumInfo, len(j.PartSizes))
|
|
|
|
for i := range fi.Parts {
|
|
|
|
fi.Erasure.Checksums[i].PartNumber = fi.Parts[i].Number
|
|
|
|
switch j.BitrotChecksumAlgo {
|
|
|
|
case HighwayHash:
|
|
|
|
fi.Erasure.Checksums[i].Algorithm = HighwayHash256S
|
|
|
|
fi.Erasure.Checksums[i].Hash = []byte{}
|
|
|
|
default:
|
|
|
|
return FileInfo{}, fmt.Errorf("unknown BitrotChecksumAlgo: %v", j.BitrotChecksumAlgo)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
fi.Metadata = make(map[string]string, len(j.MetaUser)+len(j.MetaSys))
|
|
|
|
for k, v := range j.MetaUser {
|
2020-08-11 11:29:29 -04:00
|
|
|
// https://github.com/google/security-research/security/advisories/GHSA-76wf-9vgp-pj7w
|
2021-02-03 23:41:33 -05:00
|
|
|
if equals(k, xhttp.AmzMetaUnencryptedContentLength, xhttp.AmzMetaUnencryptedContentMD5) {
|
2020-08-11 11:29:29 -04:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
fi.Metadata[k] = v
|
|
|
|
}
|
|
|
|
for k, v := range j.MetaSys {
|
2021-02-03 23:41:33 -05:00
|
|
|
switch {
|
2021-09-18 16:31:35 -04:00
|
|
|
case strings.HasPrefix(strings.ToLower(k), ReservedMetadataPrefixLower), equals(k, VersionPurgeStatusKey):
|
2021-01-20 16:12:12 -05:00
|
|
|
fi.Metadata[k] = string(v)
|
|
|
|
}
|
2020-06-12 23:04:01 -04:00
|
|
|
}
|
2021-09-18 16:31:35 -04:00
|
|
|
fi.ReplicationState = getInternalReplicationState(fi.Metadata)
|
|
|
|
replStatus := fi.ReplicationState.CompositeReplicationStatus()
|
|
|
|
if replStatus != "" {
|
|
|
|
fi.Metadata[xhttp.AmzBucketReplicationStatus] = string(replStatus)
|
|
|
|
}
|
2020-06-12 23:04:01 -04:00
|
|
|
fi.Erasure.Algorithm = j.ErasureAlgorithm.String()
|
|
|
|
fi.Erasure.Index = j.ErasureIndex
|
|
|
|
fi.Erasure.BlockSize = j.ErasureBlockSize
|
|
|
|
fi.Erasure.DataBlocks = j.ErasureM
|
|
|
|
fi.Erasure.ParityBlocks = j.ErasureN
|
|
|
|
fi.Erasure.Distribution = make([]int, len(j.ErasureDist))
|
|
|
|
for i := range j.ErasureDist {
|
|
|
|
fi.Erasure.Distribution[i] = int(j.ErasureDist[i])
|
|
|
|
}
|
|
|
|
fi.DataDir = uuid.UUID(j.DataDir).String()
|
2020-11-12 15:12:09 -05:00
|
|
|
|
2021-04-19 13:30:42 -04:00
|
|
|
if st, ok := j.MetaSys[ReservedMetadataPrefixLower+TransitionStatus]; ok {
|
|
|
|
fi.TransitionStatus = string(st)
|
|
|
|
}
|
|
|
|
if o, ok := j.MetaSys[ReservedMetadataPrefixLower+TransitionedObjectName]; ok {
|
|
|
|
fi.TransitionedObjName = string(o)
|
|
|
|
}
|
2021-11-18 15:15:22 -05:00
|
|
|
if rv, ok := j.MetaSys[ReservedMetadataPrefixLower+TransitionedVersionID]; ok {
|
|
|
|
fi.TransitionVersionID = string(rv)
|
|
|
|
}
|
|
|
|
if sc, ok := j.MetaSys[ReservedMetadataPrefixLower+TransitionTier]; ok {
|
|
|
|
fi.TransitionTier = string(sc)
|
|
|
|
}
|
|
|
|
return fi, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Read at most this much on initial read.
|
|
|
|
const metaDataReadDefault = 4 << 10
|
|
|
|
|
|
|
|
// Return used metadata byte slices here.
|
|
|
|
var metaDataPool = sync.Pool{New: func() interface{} { return make([]byte, 0, metaDataReadDefault) }}
|
|
|
|
|
|
|
|
// metaDataPoolGet will return a byte slice with capacity at least metaDataReadDefault.
|
|
|
|
// It will be length 0.
|
|
|
|
func metaDataPoolGet() []byte {
|
|
|
|
return metaDataPool.Get().([]byte)[:0]
|
|
|
|
}
|
|
|
|
|
|
|
|
// metaDataPoolPut will put an unused small buffer back into the pool.
|
|
|
|
func metaDataPoolPut(buf []byte) {
|
|
|
|
if cap(buf) >= metaDataReadDefault && cap(buf) < metaDataReadDefault*4 {
|
|
|
|
metaDataPool.Put(buf)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// readXLMetaNoData will load the metadata, but skip data segments.
|
|
|
|
// This should only be used when data is never interesting.
|
|
|
|
// If data is not xlv2, it is returned in full.
|
|
|
|
func readXLMetaNoData(r io.Reader, size int64) ([]byte, error) {
|
|
|
|
initial := size
|
|
|
|
hasFull := true
|
|
|
|
if initial > metaDataReadDefault {
|
|
|
|
initial = metaDataReadDefault
|
|
|
|
hasFull = false
|
|
|
|
}
|
|
|
|
|
|
|
|
buf := metaDataPoolGet()[:initial]
|
|
|
|
_, err := io.ReadFull(r, buf)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("readXLMetaNoData.ReadFull: %w", err)
|
|
|
|
}
|
|
|
|
readMore := func(n int64) error {
|
|
|
|
has := int64(len(buf))
|
|
|
|
if has >= n {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
if hasFull || n > size {
|
|
|
|
return io.ErrUnexpectedEOF
|
|
|
|
}
|
|
|
|
extra := n - has
|
2021-12-10 16:08:10 -05:00
|
|
|
if int64(cap(buf)) >= n {
|
|
|
|
// Extend since we have enough space.
|
|
|
|
buf = buf[:n]
|
|
|
|
} else {
|
|
|
|
buf = append(buf, make([]byte, extra)...)
|
|
|
|
}
|
2021-11-18 15:15:22 -05:00
|
|
|
_, err := io.ReadFull(r, buf[has:])
|
|
|
|
if err != nil {
|
|
|
|
if errors.Is(err, io.EOF) {
|
|
|
|
// Returned if we read nothing.
|
|
|
|
return fmt.Errorf("readXLMetaNoData.readMore: %w", io.ErrUnexpectedEOF)
|
|
|
|
}
|
|
|
|
return fmt.Errorf("readXLMetaNoData.readMore: %w", err)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
tmp, major, minor, err := checkXL2V1(buf)
|
|
|
|
if err != nil {
|
|
|
|
err = readMore(size)
|
|
|
|
return buf, err
|
|
|
|
}
|
|
|
|
switch major {
|
|
|
|
case 1:
|
|
|
|
switch minor {
|
|
|
|
case 0:
|
|
|
|
err = readMore(size)
|
|
|
|
return buf, err
|
|
|
|
case 1, 2, 3:
|
|
|
|
sz, tmp, err := msgp.ReadBytesHeader(tmp)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
want := int64(sz) + int64(len(buf)-len(tmp))
|
|
|
|
|
|
|
|
// v1.1 does not have CRC.
|
|
|
|
if minor < 2 {
|
|
|
|
if err := readMore(want); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return buf[:want], nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// CRC is variable length, so we need to truncate exactly that.
|
|
|
|
wantMax := want + msgp.Uint32Size
|
|
|
|
if wantMax > size {
|
|
|
|
wantMax = size
|
|
|
|
}
|
|
|
|
if err := readMore(wantMax); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
tmp = buf[want:]
|
|
|
|
_, after, err := msgp.ReadUint32Bytes(tmp)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
want += int64(len(tmp) - len(after))
|
|
|
|
|
|
|
|
return buf[:want], err
|
|
|
|
|
|
|
|
default:
|
|
|
|
return nil, errors.New("unknown minor metadata version")
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
return nil, errors.New("unknown major metadata version")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func decodeXLHeaders(buf []byte) (versions int, headerV, metaV uint8, b []byte, err error) {
|
|
|
|
hdrVer, buf, err := msgp.ReadUint8Bytes(buf)
|
|
|
|
if err != nil {
|
|
|
|
return 0, 0, 0, buf, err
|
|
|
|
}
|
|
|
|
metaVer, buf, err := msgp.ReadUint8Bytes(buf)
|
|
|
|
if err != nil {
|
|
|
|
return 0, 0, 0, buf, err
|
|
|
|
}
|
|
|
|
if hdrVer > xlHeaderVersion {
|
|
|
|
return 0, 0, 0, buf, fmt.Errorf("decodeXLHeaders: Unknown xl header version %d", metaVer)
|
|
|
|
}
|
|
|
|
if metaVer > xlMetaVersion {
|
|
|
|
return 0, 0, 0, buf, fmt.Errorf("decodeXLHeaders: Unknown xl meta version %d", metaVer)
|
|
|
|
}
|
|
|
|
versions, buf, err = msgp.ReadIntBytes(buf)
|
|
|
|
if err != nil {
|
|
|
|
return 0, 0, 0, buf, err
|
|
|
|
}
|
|
|
|
if versions < 0 {
|
|
|
|
return 0, 0, 0, buf, fmt.Errorf("decodeXLHeaders: Negative version count %d", versions)
|
|
|
|
}
|
|
|
|
return versions, hdrVer, metaVer, buf, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// decodeVersions will decode a number of versions from a buffer
|
|
|
|
// and perform a callback for each version in order, newest first.
|
|
|
|
// Return errDoneForNow to stop processing and return nil.
|
|
|
|
// Any non-nil error is returned.
|
|
|
|
func decodeVersions(buf []byte, versions int, fn func(idx int, hdr, meta []byte) error) (err error) {
|
|
|
|
var tHdr, tMeta []byte // Zero copy bytes
|
|
|
|
for i := 0; i < versions; i++ {
|
|
|
|
tHdr, buf, err = msgp.ReadBytesZC(buf)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
tMeta, buf, err = msgp.ReadBytesZC(buf)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if err = fn(i, tHdr, tMeta); err != nil {
|
|
|
|
if err == errDoneForNow {
|
|
|
|
err = nil
|
|
|
|
}
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// isIndexedMetaV2 returns non-nil result if metadata is indexed.
|
|
|
|
// If data doesn't validate nil is also returned.
|
|
|
|
func isIndexedMetaV2(buf []byte) (meta xlMetaBuf, data xlMetaInlineData) {
|
|
|
|
buf, major, minor, err := checkXL2V1(buf)
|
|
|
|
if err != nil {
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
if major != 1 || minor < 3 {
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
meta, buf, err = msgp.ReadBytesZC(buf)
|
|
|
|
if err != nil {
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
if crc, nbuf, err := msgp.ReadUint32Bytes(buf); err == nil {
|
|
|
|
// Read metadata CRC
|
|
|
|
buf = nbuf
|
|
|
|
if got := uint32(xxhash.Sum64(meta)); got != crc {
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
data = buf
|
|
|
|
if data.validate() != nil {
|
|
|
|
data.repair()
|
|
|
|
}
|
|
|
|
|
|
|
|
return meta, data
|
|
|
|
}
|
|
|
|
|
|
|
|
type xlMetaV2ShallowVersion struct {
|
|
|
|
header xlMetaV2VersionHeader
|
|
|
|
meta []byte
|
|
|
|
}
|
|
|
|
|
|
|
|
//msgp:ignore xlMetaV2 xlMetaV2ShallowVersion
|
|
|
|
|
|
|
|
type xlMetaV2 struct {
|
|
|
|
versions []xlMetaV2ShallowVersion
|
|
|
|
|
|
|
|
// data will contain raw data if any.
|
|
|
|
// data will be one or more versions indexed by versionID.
|
|
|
|
// To remove all data set to nil.
|
|
|
|
data xlMetaInlineData
|
|
|
|
|
|
|
|
// metadata version.
|
|
|
|
metaV uint8
|
|
|
|
}
|
|
|
|
|
2021-12-02 14:29:16 -05:00
|
|
|
// LoadOrConvert will load the metadata in the buffer.
|
|
|
|
// If this is a legacy format, it will automatically be converted to XLV2.
|
|
|
|
func (x *xlMetaV2) LoadOrConvert(buf []byte) error {
|
|
|
|
if isXL2V1Format(buf) {
|
|
|
|
return x.Load(buf)
|
|
|
|
}
|
|
|
|
|
|
|
|
xlMeta := &xlMetaV1Object{}
|
2022-01-02 12:15:06 -05:00
|
|
|
json := jsoniter.ConfigCompatibleWithStandardLibrary
|
2021-12-02 14:29:16 -05:00
|
|
|
if err := json.Unmarshal(buf, xlMeta); err != nil {
|
|
|
|
return errFileCorrupt
|
|
|
|
}
|
|
|
|
if len(x.versions) > 0 {
|
|
|
|
x.versions = x.versions[:0]
|
|
|
|
}
|
|
|
|
x.data = nil
|
|
|
|
x.metaV = xlMetaVersion
|
|
|
|
return x.AddLegacy(xlMeta)
|
|
|
|
}
|
|
|
|
|
2021-11-18 15:15:22 -05:00
|
|
|
// Load all versions of the stored data.
|
|
|
|
// Note that references to the incoming buffer will be kept.
|
|
|
|
func (x *xlMetaV2) Load(buf []byte) error {
|
|
|
|
if meta, data := isIndexedMetaV2(buf); meta != nil {
|
|
|
|
return x.loadIndexed(meta, data)
|
|
|
|
}
|
|
|
|
// Convert older format.
|
|
|
|
return x.loadLegacy(buf)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (x *xlMetaV2) loadIndexed(buf xlMetaBuf, data xlMetaInlineData) error {
|
|
|
|
versions, headerV, metaV, buf, err := decodeXLHeaders(buf)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if cap(x.versions) < versions {
|
|
|
|
x.versions = make([]xlMetaV2ShallowVersion, 0, versions+1)
|
|
|
|
}
|
|
|
|
x.versions = x.versions[:versions]
|
|
|
|
x.data = data
|
|
|
|
x.metaV = metaV
|
|
|
|
if err = x.data.validate(); err != nil {
|
|
|
|
x.data.repair()
|
|
|
|
logger.Info("xlMetaV2.loadIndexed: data validation failed: %v. %d entries after repair", err, x.data.entries())
|
|
|
|
}
|
|
|
|
|
|
|
|
return decodeVersions(buf, versions, func(i int, hdr, meta []byte) error {
|
|
|
|
ver := &x.versions[i]
|
|
|
|
_, err = ver.header.unmarshalV(headerV, hdr)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
ver.meta = meta
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
// loadLegacy will load content prior to v1.3
|
|
|
|
// Note that references to the incoming buffer will be kept.
|
|
|
|
func (x *xlMetaV2) loadLegacy(buf []byte) error {
|
|
|
|
buf, major, minor, err := checkXL2V1(buf)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("xlMetaV2.Load %w", err)
|
|
|
|
}
|
|
|
|
var allMeta []byte
|
|
|
|
switch major {
|
|
|
|
case 1:
|
|
|
|
switch minor {
|
|
|
|
case 0:
|
|
|
|
allMeta = buf
|
|
|
|
case 1, 2:
|
|
|
|
v, buf, err := msgp.ReadBytesZC(buf)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("xlMetaV2.Load version(%d), bufLen(%d) %w", minor, len(buf), err)
|
|
|
|
}
|
|
|
|
if minor >= 2 {
|
|
|
|
if crc, nbuf, err := msgp.ReadUint32Bytes(buf); err == nil {
|
|
|
|
// Read metadata CRC (added in v2)
|
|
|
|
buf = nbuf
|
|
|
|
if got := uint32(xxhash.Sum64(v)); got != crc {
|
|
|
|
return fmt.Errorf("xlMetaV2.Load version(%d), CRC mismatch, want 0x%x, got 0x%x", minor, crc, got)
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
return fmt.Errorf("xlMetaV2.Load version(%d), loading CRC: %w", minor, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
allMeta = v
|
|
|
|
// Add remaining data.
|
|
|
|
x.data = buf
|
|
|
|
if err = x.data.validate(); err != nil {
|
|
|
|
x.data.repair()
|
|
|
|
logger.Info("xlMetaV2.Load: data validation failed: %v. %d entries after repair", err, x.data.entries())
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
return errors.New("unknown minor metadata version")
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
return errors.New("unknown major metadata version")
|
|
|
|
}
|
|
|
|
if allMeta == nil {
|
|
|
|
return errCorruptedFormat
|
|
|
|
}
|
|
|
|
// bts will shrink as we decode.
|
|
|
|
bts := allMeta
|
|
|
|
var field []byte
|
|
|
|
var zb0001 uint32
|
|
|
|
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
|
|
|
|
if err != nil {
|
|
|
|
return msgp.WrapError(err, "loadLegacy.ReadMapHeader")
|
|
|
|
}
|
|
|
|
|
|
|
|
var tmp xlMetaV2Version
|
|
|
|
for zb0001 > 0 {
|
|
|
|
zb0001--
|
|
|
|
field, bts, err = msgp.ReadMapKeyZC(bts)
|
|
|
|
if err != nil {
|
|
|
|
return msgp.WrapError(err, "loadLegacy.ReadMapKey")
|
|
|
|
}
|
|
|
|
switch msgp.UnsafeString(field) {
|
|
|
|
case "Versions":
|
|
|
|
var zb0002 uint32
|
|
|
|
zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts)
|
|
|
|
if err != nil {
|
|
|
|
return msgp.WrapError(err, "Versions")
|
|
|
|
}
|
|
|
|
if cap(x.versions) >= int(zb0002) {
|
|
|
|
x.versions = (x.versions)[:zb0002]
|
|
|
|
} else {
|
|
|
|
x.versions = make([]xlMetaV2ShallowVersion, zb0002, zb0002+1)
|
|
|
|
}
|
|
|
|
for za0001 := range x.versions {
|
|
|
|
start := len(allMeta) - len(bts)
|
|
|
|
bts, err = tmp.unmarshalV(1, bts)
|
|
|
|
if err != nil {
|
|
|
|
return msgp.WrapError(err, "Versions", za0001)
|
|
|
|
}
|
|
|
|
end := len(allMeta) - len(bts)
|
|
|
|
// We reference the marshaled data, so we don't have to re-marshal.
|
|
|
|
x.versions[za0001] = xlMetaV2ShallowVersion{
|
|
|
|
header: tmp.header(),
|
|
|
|
meta: allMeta[start:end],
|
|
|
|
}
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
bts, err = msgp.Skip(bts)
|
|
|
|
if err != nil {
|
|
|
|
return msgp.WrapError(err, "loadLegacy.Skip")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
x.metaV = 1 // Fixed for legacy conversions.
|
|
|
|
x.sortByModTime()
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2021-12-02 14:29:16 -05:00
|
|
|
// latestModtime returns the modtime of the latest version.
|
|
|
|
func (x *xlMetaV2) latestModtime() time.Time {
|
|
|
|
if x == nil || len(x.versions) == 0 {
|
|
|
|
return time.Time{}
|
|
|
|
}
|
|
|
|
return time.Unix(0, x.versions[0].header.ModTime)
|
|
|
|
}
|
|
|
|
|
2021-11-18 15:15:22 -05:00
|
|
|
func (x *xlMetaV2) addVersion(ver xlMetaV2Version) error {
|
|
|
|
modTime := ver.getModTime().UnixNano()
|
|
|
|
if !ver.Valid() {
|
|
|
|
return errors.New("attempted to add invalid version")
|
|
|
|
}
|
|
|
|
encoded, err := ver.MarshalMsg(nil)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
// Add space at the end.
|
|
|
|
// Will have -1 modtime, so it will be inserted there.
|
|
|
|
x.versions = append(x.versions, xlMetaV2ShallowVersion{header: xlMetaV2VersionHeader{ModTime: -1}})
|
|
|
|
|
|
|
|
// Linear search, we likely have to insert at front.
|
|
|
|
for i, existing := range x.versions {
|
|
|
|
if existing.header.ModTime <= modTime {
|
|
|
|
// Insert at current idx. First move current back.
|
|
|
|
copy(x.versions[i+1:], x.versions[i:])
|
|
|
|
x.versions[i] = xlMetaV2ShallowVersion{
|
|
|
|
header: ver.header(),
|
|
|
|
meta: encoded,
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return fmt.Errorf("addVersion: Internal error, unable to add version")
|
|
|
|
}
|
|
|
|
|
|
|
|
// AppendTo will marshal the data in z and append it to the provided slice.
|
|
|
|
func (x *xlMetaV2) AppendTo(dst []byte) ([]byte, error) {
|
|
|
|
// Header...
|
|
|
|
sz := len(xlHeader) + len(xlVersionCurrent) + msgp.ArrayHeaderSize + len(dst) + 3*msgp.Uint32Size
|
|
|
|
// Existing + Inline data
|
|
|
|
sz += len(dst) + len(x.data)
|
|
|
|
// Versions...
|
|
|
|
for _, ver := range x.versions {
|
|
|
|
sz += 32 + len(ver.meta)
|
|
|
|
}
|
|
|
|
if cap(dst) < sz {
|
|
|
|
buf := make([]byte, len(dst), sz)
|
|
|
|
copy(buf, dst)
|
|
|
|
dst = buf
|
|
|
|
}
|
|
|
|
if err := x.data.validate(); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
dst = append(dst, xlHeader[:]...)
|
|
|
|
dst = append(dst, xlVersionCurrent[:]...)
|
|
|
|
// Add "bin 32" type header to always have enough space.
|
|
|
|
// We will fill out the correct size when we know it.
|
|
|
|
dst = append(dst, 0xc6, 0, 0, 0, 0)
|
|
|
|
dataOffset := len(dst)
|
|
|
|
|
|
|
|
dst = msgp.AppendUint(dst, xlHeaderVersion)
|
|
|
|
dst = msgp.AppendUint(dst, xlMetaVersion)
|
|
|
|
dst = msgp.AppendInt(dst, len(x.versions))
|
|
|
|
|
|
|
|
tmp := metaDataPoolGet()
|
|
|
|
defer metaDataPoolPut(tmp)
|
|
|
|
for _, ver := range x.versions {
|
|
|
|
var err error
|
|
|
|
|
|
|
|
// Add header
|
|
|
|
tmp, err = ver.header.MarshalMsg(tmp[:0])
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
dst = msgp.AppendBytes(dst, tmp)
|
|
|
|
|
|
|
|
// Add full meta
|
|
|
|
dst = msgp.AppendBytes(dst, ver.meta)
|
2021-06-03 17:26:51 -04:00
|
|
|
}
|
2021-11-18 15:15:22 -05:00
|
|
|
|
|
|
|
// Update size...
|
|
|
|
binary.BigEndian.PutUint32(dst[dataOffset-4:dataOffset], uint32(len(dst)-dataOffset))
|
|
|
|
|
|
|
|
// Add CRC of metadata as fixed size (5 bytes)
|
|
|
|
// Prior to v1.3 this was variable sized.
|
|
|
|
tmp = tmp[:5]
|
|
|
|
tmp[0] = 0xce // muint32
|
|
|
|
binary.BigEndian.PutUint32(tmp[1:], uint32(xxhash.Sum64(dst[dataOffset:])))
|
|
|
|
dst = append(dst, tmp[:5]...)
|
|
|
|
return append(dst, x.data...), nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (x *xlMetaV2) findVersion(key [16]byte) (idx int, ver *xlMetaV2Version, err error) {
|
|
|
|
for i, ver := range x.versions {
|
|
|
|
if key == ver.header.VersionID {
|
|
|
|
obj, err := x.getIdx(i)
|
|
|
|
return i, obj, err
|
|
|
|
}
|
2021-04-19 13:30:42 -04:00
|
|
|
}
|
2021-11-18 15:15:22 -05:00
|
|
|
return -1, nil, errFileVersionNotFound
|
2020-06-12 23:04:01 -04:00
|
|
|
}
|
|
|
|
|
2021-11-18 15:15:22 -05:00
|
|
|
func (x *xlMetaV2) getIdx(idx int) (ver *xlMetaV2Version, err error) {
|
|
|
|
if idx < 0 || idx >= len(x.versions) {
|
|
|
|
return nil, errFileNotFound
|
2021-04-15 11:44:05 -04:00
|
|
|
}
|
2021-11-18 15:15:22 -05:00
|
|
|
var dst xlMetaV2Version
|
|
|
|
_, err = dst.unmarshalV(x.metaV, x.versions[idx].meta)
|
|
|
|
if false {
|
|
|
|
if err == nil && x.versions[idx].header.VersionID != dst.getVersionID() {
|
|
|
|
panic(fmt.Sprintf("header: %x != object id: %x", x.versions[idx].header.VersionID, dst.getVersionID()))
|
2021-04-15 11:44:05 -04:00
|
|
|
}
|
|
|
|
}
|
2021-11-18 15:15:22 -05:00
|
|
|
return &dst, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// setIdx will replace a version at a given index.
|
|
|
|
// Note that versions may become re-sorted if modtime changes.
|
|
|
|
func (x *xlMetaV2) setIdx(idx int, ver xlMetaV2Version) (err error) {
|
|
|
|
if idx < 0 || idx >= len(x.versions) {
|
|
|
|
return errFileNotFound
|
|
|
|
}
|
|
|
|
update := &x.versions[idx]
|
|
|
|
prevMod := update.header.ModTime
|
|
|
|
update.meta, err = ver.MarshalMsg(update.meta[:0:len(update.meta)])
|
2021-04-15 11:44:05 -04:00
|
|
|
if err != nil {
|
2021-11-18 15:15:22 -05:00
|
|
|
update.meta = nil
|
|
|
|
return err
|
2021-04-15 11:44:05 -04:00
|
|
|
}
|
2021-11-18 15:15:22 -05:00
|
|
|
update.header = ver.header()
|
|
|
|
if prevMod != update.header.ModTime {
|
|
|
|
x.sortByModTime()
|
|
|
|
}
|
|
|
|
return nil
|
2021-04-15 11:44:05 -04:00
|
|
|
}
|
|
|
|
|
2021-11-18 15:15:22 -05:00
|
|
|
// sortByModTime will sort versions by modtime in descending order,
|
|
|
|
// meaning index 0 will be latest version.
|
|
|
|
func (x *xlMetaV2) sortByModTime() {
|
|
|
|
// Quick check
|
|
|
|
if len(x.versions) <= 1 || sort.SliceIsSorted(x.versions, func(i, j int) bool {
|
2021-12-02 14:29:16 -05:00
|
|
|
return x.versions[i].header.sortsBefore(x.versions[j].header)
|
2021-11-18 15:15:22 -05:00
|
|
|
}) {
|
|
|
|
return
|
2021-04-15 11:44:05 -04:00
|
|
|
}
|
2021-11-18 15:15:22 -05:00
|
|
|
|
|
|
|
// We should sort.
|
|
|
|
sort.Slice(x.versions, func(i, j int) bool {
|
2021-12-02 14:29:16 -05:00
|
|
|
return x.versions[i].header.sortsBefore(x.versions[j].header)
|
2021-11-18 15:15:22 -05:00
|
|
|
})
|
2021-04-15 11:44:05 -04:00
|
|
|
}
|
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
// DeleteVersion deletes the version specified by version id.
|
|
|
|
// returns to the caller which dataDir to delete, also
|
|
|
|
// indicates if this is the last version.
|
2021-11-18 15:15:22 -05:00
|
|
|
func (x *xlMetaV2) DeleteVersion(fi FileInfo) (string, bool, error) {
|
2020-06-12 23:04:01 -04:00
|
|
|
// This is a situation where versionId is explicitly
|
|
|
|
// specified as "null", as we do not save "null"
|
|
|
|
// string it is considered empty. But empty also
|
|
|
|
// means the version which matches will be purged.
|
|
|
|
if fi.VersionID == nullVersionID {
|
|
|
|
fi.VersionID = ""
|
|
|
|
}
|
2020-09-02 03:19:03 -04:00
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
var uv uuid.UUID
|
2020-11-29 00:15:45 -05:00
|
|
|
var err error
|
2020-06-12 23:04:01 -04:00
|
|
|
if fi.VersionID != "" {
|
2020-11-29 00:15:45 -05:00
|
|
|
uv, err = uuid.Parse(fi.VersionID)
|
|
|
|
if err != nil {
|
|
|
|
return "", false, errFileVersionNotFound
|
|
|
|
}
|
2020-06-12 23:04:01 -04:00
|
|
|
}
|
2020-09-02 03:19:03 -04:00
|
|
|
|
|
|
|
var ventry xlMetaV2Version
|
|
|
|
if fi.Deleted {
|
|
|
|
ventry = xlMetaV2Version{
|
|
|
|
Type: DeleteType,
|
|
|
|
DeleteMarker: &xlMetaV2DeleteMarker{
|
|
|
|
VersionID: uv,
|
|
|
|
ModTime: fi.ModTime.UnixNano(),
|
2020-11-19 21:43:58 -05:00
|
|
|
MetaSys: make(map[string][]byte),
|
2020-09-02 03:19:03 -04:00
|
|
|
},
|
|
|
|
}
|
|
|
|
if !ventry.Valid() {
|
|
|
|
return "", false, errors.New("internal error: invalid version entry generated")
|
|
|
|
}
|
|
|
|
}
|
2020-11-19 21:43:58 -05:00
|
|
|
updateVersion := false
|
2021-09-18 16:31:35 -04:00
|
|
|
if fi.VersionPurgeStatus().Empty() && (fi.DeleteMarkerReplicationStatus() == "REPLICA" || fi.DeleteMarkerReplicationStatus().Empty()) {
|
2020-11-19 21:43:58 -05:00
|
|
|
updateVersion = fi.MarkDeleted
|
|
|
|
} else {
|
|
|
|
// for replication scenario
|
2021-09-18 16:31:35 -04:00
|
|
|
if fi.Deleted && fi.VersionPurgeStatus() != Complete {
|
|
|
|
if !fi.VersionPurgeStatus().Empty() || fi.DeleteMarkerReplicationStatus().Empty() {
|
2020-11-19 21:43:58 -05:00
|
|
|
updateVersion = true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// object or delete-marker versioned delete is not complete
|
2021-09-18 16:31:35 -04:00
|
|
|
if !fi.VersionPurgeStatus().Empty() && fi.VersionPurgeStatus() != Complete {
|
2020-11-19 21:43:58 -05:00
|
|
|
updateVersion = true
|
|
|
|
}
|
|
|
|
}
|
2021-09-18 16:31:35 -04:00
|
|
|
|
2020-11-19 21:43:58 -05:00
|
|
|
if fi.Deleted {
|
2021-09-18 16:31:35 -04:00
|
|
|
if !fi.DeleteMarkerReplicationStatus().Empty() {
|
|
|
|
switch fi.DeleteMarkerReplicationStatus() {
|
|
|
|
case replication.Replica:
|
|
|
|
ventry.DeleteMarker.MetaSys[ReservedMetadataPrefixLower+ReplicaStatus] = []byte(string(fi.ReplicationState.ReplicaStatus))
|
|
|
|
ventry.DeleteMarker.MetaSys[ReservedMetadataPrefixLower+ReplicaTimestamp] = []byte(fi.ReplicationState.ReplicaTimeStamp.Format(http.TimeFormat))
|
|
|
|
default:
|
|
|
|
ventry.DeleteMarker.MetaSys[ReservedMetadataPrefixLower+ReplicationStatus] = []byte(fi.ReplicationState.ReplicationStatusInternal)
|
|
|
|
ventry.DeleteMarker.MetaSys[ReservedMetadataPrefixLower+ReplicationTimestamp] = []byte(fi.ReplicationState.ReplicationTimeStamp.Format(http.TimeFormat))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if !fi.VersionPurgeStatus().Empty() {
|
|
|
|
ventry.DeleteMarker.MetaSys[VersionPurgeStatusKey] = []byte(fi.ReplicationState.VersionPurgeStatusInternal)
|
2020-11-19 21:43:58 -05:00
|
|
|
}
|
2021-09-18 16:31:35 -04:00
|
|
|
for k, v := range fi.ReplicationState.ResetStatusesMap {
|
|
|
|
ventry.DeleteMarker.MetaSys[k] = []byte(v)
|
2020-11-19 21:43:58 -05:00
|
|
|
}
|
|
|
|
}
|
2020-09-02 03:19:03 -04:00
|
|
|
|
2021-11-18 15:15:22 -05:00
|
|
|
for i, ver := range x.versions {
|
|
|
|
if ver.header.VersionID != uv {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
switch ver.header.Type {
|
|
|
|
case LegacyType:
|
|
|
|
ver, err := x.getIdx(i)
|
|
|
|
if err != nil {
|
|
|
|
return "", false, err
|
|
|
|
}
|
|
|
|
x.versions = append(x.versions[:i], x.versions[i+1:]...)
|
|
|
|
if fi.Deleted {
|
|
|
|
err = x.addVersion(ventry)
|
|
|
|
}
|
|
|
|
return ver.ObjectV1.DataDir, len(x.versions) == 0, err
|
|
|
|
case DeleteType:
|
|
|
|
if updateVersion {
|
|
|
|
ver, err := x.getIdx(i)
|
|
|
|
if err != nil {
|
|
|
|
return "", false, err
|
|
|
|
}
|
|
|
|
if len(ver.DeleteMarker.MetaSys) == 0 {
|
|
|
|
ver.DeleteMarker.MetaSys = make(map[string][]byte)
|
|
|
|
}
|
|
|
|
if !fi.DeleteMarkerReplicationStatus().Empty() {
|
|
|
|
switch fi.DeleteMarkerReplicationStatus() {
|
|
|
|
case replication.Replica:
|
|
|
|
ver.DeleteMarker.MetaSys[ReservedMetadataPrefixLower+ReplicaStatus] = []byte(string(fi.ReplicationState.ReplicaStatus))
|
|
|
|
ver.DeleteMarker.MetaSys[ReservedMetadataPrefixLower+ReplicaTimestamp] = []byte(fi.ReplicationState.ReplicaTimeStamp.Format(http.TimeFormat))
|
|
|
|
default:
|
|
|
|
ver.DeleteMarker.MetaSys[ReservedMetadataPrefixLower+ReplicationStatus] = []byte(fi.ReplicationState.ReplicationStatusInternal)
|
|
|
|
ver.DeleteMarker.MetaSys[ReservedMetadataPrefixLower+ReplicationTimestamp] = []byte(fi.ReplicationState.ReplicationTimeStamp.Format(http.TimeFormat))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if !fi.VersionPurgeStatus().Empty() {
|
|
|
|
ver.DeleteMarker.MetaSys[VersionPurgeStatusKey] = []byte(fi.ReplicationState.VersionPurgeStatusInternal)
|
|
|
|
}
|
|
|
|
for k, v := range fi.ReplicationState.ResetStatusesMap {
|
|
|
|
ver.DeleteMarker.MetaSys[k] = []byte(v)
|
|
|
|
}
|
|
|
|
err = x.setIdx(i, *ver)
|
|
|
|
return "", len(x.versions) == 0, err
|
|
|
|
}
|
|
|
|
var err error
|
|
|
|
x.versions = append(x.versions[:i], x.versions[i+1:]...)
|
|
|
|
if fi.MarkDeleted && (fi.VersionPurgeStatus().Empty() || (fi.VersionPurgeStatus() != Complete)) {
|
|
|
|
err = x.addVersion(ventry)
|
|
|
|
}
|
|
|
|
return "", len(x.versions) == 0, err
|
|
|
|
case ObjectType:
|
2021-11-30 21:30:06 -05:00
|
|
|
if updateVersion && !fi.Deleted {
|
2021-11-18 15:15:22 -05:00
|
|
|
ver, err := x.getIdx(i)
|
|
|
|
if err != nil {
|
|
|
|
return "", false, err
|
|
|
|
}
|
|
|
|
ver.ObjectV2.MetaSys[VersionPurgeStatusKey] = []byte(fi.ReplicationState.VersionPurgeStatusInternal)
|
|
|
|
for k, v := range fi.ReplicationState.ResetStatusesMap {
|
|
|
|
ver.ObjectV2.MetaSys[k] = []byte(v)
|
|
|
|
}
|
|
|
|
err = x.setIdx(i, *ver)
|
|
|
|
return "", len(x.versions) == 0, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for i, version := range x.versions {
|
|
|
|
if version.header.Type != ObjectType || version.header.VersionID != uv {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
ver, err := x.getIdx(i)
|
|
|
|
if err != nil {
|
|
|
|
return "", false, err
|
|
|
|
}
|
|
|
|
switch {
|
|
|
|
case fi.ExpireRestored:
|
|
|
|
ver.ObjectV2.RemoveRestoreHdrs()
|
|
|
|
err = x.setIdx(i, *ver)
|
|
|
|
case fi.TransitionStatus == lifecycle.TransitionComplete:
|
|
|
|
ver.ObjectV2.SetTransition(fi)
|
|
|
|
err = x.setIdx(i, *ver)
|
|
|
|
default:
|
|
|
|
x.versions = append(x.versions[:i], x.versions[i+1:]...)
|
|
|
|
// if uv has tiered content we add a
|
|
|
|
// free-version to track it for
|
|
|
|
// asynchronous deletion via scanner.
|
|
|
|
if freeVersion, toFree := ver.ObjectV2.InitFreeVersion(fi); toFree {
|
|
|
|
err = x.addVersion(freeVersion)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
logger.LogIf(context.Background(), err)
|
|
|
|
|
|
|
|
if fi.Deleted {
|
|
|
|
err = x.addVersion(ventry)
|
|
|
|
}
|
|
|
|
if x.SharedDataDirCount(ver.ObjectV2.VersionID, ver.ObjectV2.DataDir) > 0 {
|
|
|
|
// Found that another version references the same dataDir
|
|
|
|
// we shouldn't remove it, and only remove the version instead
|
|
|
|
return "", len(x.versions) == 0, nil
|
|
|
|
}
|
|
|
|
return uuid.UUID(ver.ObjectV2.DataDir).String(), len(x.versions) == 0, err
|
|
|
|
}
|
|
|
|
|
|
|
|
if fi.Deleted {
|
|
|
|
err = x.addVersion(ventry)
|
|
|
|
return "", false, err
|
|
|
|
}
|
|
|
|
return "", false, errFileVersionNotFound
|
|
|
|
}
|
|
|
|
|
|
|
|
// xlMetaDataDirDecoder is a shallow decoder for decoding object datadir only.
|
|
|
|
type xlMetaDataDirDecoder struct {
|
|
|
|
ObjectV2 *struct {
|
|
|
|
DataDir [16]byte `msg:"DDir"` // Data dir ID
|
|
|
|
} `msg:"V2Obj,omitempty"`
|
|
|
|
}
|
|
|
|
|
|
|
|
// UpdateObjectVersion updates metadata and modTime for a given
|
|
|
|
// versionID, NOTE: versionID must be valid and should exist -
|
|
|
|
// and must not be a DeleteMarker or legacy object, if no
|
|
|
|
// versionID is specified 'null' versionID is updated instead.
|
|
|
|
//
|
|
|
|
// It is callers responsibility to set correct versionID, this
|
|
|
|
// function shouldn't be further extended to update immutable
|
|
|
|
// values such as ErasureInfo, ChecksumInfo.
|
|
|
|
//
|
|
|
|
// Metadata is only updated to new values, existing values
|
|
|
|
// stay as is, if you wish to update all values you should
|
|
|
|
// update all metadata freshly before calling this function
|
|
|
|
// in-case you wish to clear existing metadata.
|
|
|
|
func (x *xlMetaV2) UpdateObjectVersion(fi FileInfo) error {
|
|
|
|
if fi.VersionID == "" {
|
|
|
|
// this means versioning is not yet
|
|
|
|
// enabled or suspend i.e all versions
|
|
|
|
// are basically default value i.e "null"
|
|
|
|
fi.VersionID = nullVersionID
|
|
|
|
}
|
|
|
|
|
|
|
|
var uv uuid.UUID
|
|
|
|
var err error
|
|
|
|
if fi.VersionID != "" && fi.VersionID != nullVersionID {
|
|
|
|
uv, err = uuid.Parse(fi.VersionID)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for i, version := range x.versions {
|
|
|
|
switch version.header.Type {
|
|
|
|
case LegacyType, DeleteType:
|
|
|
|
if version.header.VersionID == uv {
|
|
|
|
return errMethodNotAllowed
|
|
|
|
}
|
|
|
|
case ObjectType:
|
|
|
|
if version.header.VersionID == uv {
|
|
|
|
ver, err := x.getIdx(i)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
for k, v := range fi.Metadata {
|
|
|
|
if len(k) > len(ReservedMetadataPrefixLower) && strings.EqualFold(k[:len(ReservedMetadataPrefixLower)], ReservedMetadataPrefixLower) {
|
|
|
|
ver.ObjectV2.MetaSys[k] = []byte(v)
|
|
|
|
} else {
|
|
|
|
ver.ObjectV2.MetaUser[k] = v
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if !fi.ModTime.IsZero() {
|
|
|
|
ver.ObjectV2.ModTime = fi.ModTime.UnixNano()
|
|
|
|
}
|
|
|
|
return x.setIdx(i, *ver)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return errFileVersionNotFound
|
|
|
|
}
|
|
|
|
|
|
|
|
// AddVersion adds a new version
|
|
|
|
func (x *xlMetaV2) AddVersion(fi FileInfo) error {
|
|
|
|
if fi.VersionID == "" {
|
|
|
|
// this means versioning is not yet
|
|
|
|
// enabled or suspend i.e all versions
|
|
|
|
// are basically default value i.e "null"
|
|
|
|
fi.VersionID = nullVersionID
|
|
|
|
}
|
|
|
|
|
|
|
|
var uv uuid.UUID
|
|
|
|
var err error
|
|
|
|
if fi.VersionID != "" && fi.VersionID != nullVersionID {
|
|
|
|
uv, err = uuid.Parse(fi.VersionID)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
var dd uuid.UUID
|
|
|
|
if fi.DataDir != "" {
|
|
|
|
dd, err = uuid.Parse(fi.DataDir)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
ventry := xlMetaV2Version{}
|
|
|
|
|
|
|
|
if fi.Deleted {
|
|
|
|
ventry.Type = DeleteType
|
|
|
|
ventry.DeleteMarker = &xlMetaV2DeleteMarker{
|
|
|
|
VersionID: uv,
|
|
|
|
ModTime: fi.ModTime.UnixNano(),
|
|
|
|
MetaSys: make(map[string][]byte),
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
ventry.Type = ObjectType
|
|
|
|
ventry.ObjectV2 = &xlMetaV2Object{
|
|
|
|
VersionID: uv,
|
|
|
|
DataDir: dd,
|
|
|
|
Size: fi.Size,
|
|
|
|
ModTime: fi.ModTime.UnixNano(),
|
|
|
|
ErasureAlgorithm: ReedSolomon,
|
|
|
|
ErasureM: fi.Erasure.DataBlocks,
|
|
|
|
ErasureN: fi.Erasure.ParityBlocks,
|
|
|
|
ErasureBlockSize: fi.Erasure.BlockSize,
|
|
|
|
ErasureIndex: fi.Erasure.Index,
|
|
|
|
BitrotChecksumAlgo: HighwayHash,
|
|
|
|
ErasureDist: make([]uint8, len(fi.Erasure.Distribution)),
|
|
|
|
PartNumbers: make([]int, len(fi.Parts)),
|
|
|
|
PartETags: nil,
|
|
|
|
PartSizes: make([]int64, len(fi.Parts)),
|
|
|
|
PartActualSizes: make([]int64, len(fi.Parts)),
|
|
|
|
MetaSys: make(map[string][]byte),
|
|
|
|
MetaUser: make(map[string]string, len(fi.Metadata)),
|
|
|
|
}
|
|
|
|
for i := range fi.Parts {
|
|
|
|
// Only add etags if any.
|
|
|
|
if fi.Parts[i].ETag != "" {
|
|
|
|
ventry.ObjectV2.PartETags = make([]string, len(fi.Parts))
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
for i := range fi.Erasure.Distribution {
|
|
|
|
ventry.ObjectV2.ErasureDist[i] = uint8(fi.Erasure.Distribution[i])
|
|
|
|
}
|
|
|
|
|
|
|
|
for i := range fi.Parts {
|
|
|
|
ventry.ObjectV2.PartSizes[i] = fi.Parts[i].Size
|
|
|
|
if len(ventry.ObjectV2.PartETags) > 0 && fi.Parts[i].ETag != "" {
|
|
|
|
ventry.ObjectV2.PartETags[i] = fi.Parts[i].ETag
|
|
|
|
}
|
|
|
|
ventry.ObjectV2.PartNumbers[i] = fi.Parts[i].Number
|
|
|
|
ventry.ObjectV2.PartActualSizes[i] = fi.Parts[i].ActualSize
|
|
|
|
}
|
|
|
|
|
|
|
|
tierFVIDKey := ReservedMetadataPrefixLower + tierFVID
|
|
|
|
tierFVMarkerKey := ReservedMetadataPrefixLower + tierFVMarker
|
|
|
|
for k, v := range fi.Metadata {
|
|
|
|
if len(k) > len(ReservedMetadataPrefixLower) && strings.EqualFold(k[:len(ReservedMetadataPrefixLower)], ReservedMetadataPrefixLower) {
|
|
|
|
// Skip tierFVID, tierFVMarker keys; it's used
|
|
|
|
// only for creating free-version.
|
|
|
|
switch k {
|
|
|
|
case tierFVIDKey, tierFVMarkerKey:
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
ventry.ObjectV2.MetaSys[k] = []byte(v)
|
|
|
|
} else {
|
|
|
|
ventry.ObjectV2.MetaUser[k] = v
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// If asked to save data.
|
|
|
|
if len(fi.Data) > 0 || fi.Size == 0 {
|
|
|
|
x.data.replace(fi.VersionID, fi.Data)
|
|
|
|
}
|
|
|
|
|
|
|
|
if fi.TransitionStatus != "" {
|
|
|
|
ventry.ObjectV2.MetaSys[ReservedMetadataPrefixLower+TransitionStatus] = []byte(fi.TransitionStatus)
|
|
|
|
}
|
|
|
|
if fi.TransitionedObjName != "" {
|
|
|
|
ventry.ObjectV2.MetaSys[ReservedMetadataPrefixLower+TransitionedObjectName] = []byte(fi.TransitionedObjName)
|
|
|
|
}
|
|
|
|
if fi.TransitionVersionID != "" {
|
|
|
|
ventry.ObjectV2.MetaSys[ReservedMetadataPrefixLower+TransitionedVersionID] = []byte(fi.TransitionVersionID)
|
|
|
|
}
|
|
|
|
if fi.TransitionTier != "" {
|
|
|
|
ventry.ObjectV2.MetaSys[ReservedMetadataPrefixLower+TransitionTier] = []byte(fi.TransitionTier)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if !ventry.Valid() {
|
|
|
|
return errors.New("internal error: invalid version entry generated")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check if we should replace first.
|
|
|
|
for i := range x.versions {
|
|
|
|
if x.versions[i].header.VersionID != uv {
|
|
|
|
continue
|
2020-06-12 23:04:01 -04:00
|
|
|
}
|
2021-11-18 15:15:22 -05:00
|
|
|
switch x.versions[i].header.Type {
|
2020-06-12 23:04:01 -04:00
|
|
|
case LegacyType:
|
2021-11-18 15:15:22 -05:00
|
|
|
// This would convert legacy type into new ObjectType
|
|
|
|
// this means that we are basically purging the `null`
|
|
|
|
// version of the object.
|
|
|
|
return x.setIdx(i, ventry)
|
2020-11-19 21:43:58 -05:00
|
|
|
case ObjectType:
|
2021-11-18 15:15:22 -05:00
|
|
|
return x.setIdx(i, ventry)
|
|
|
|
case DeleteType:
|
|
|
|
// Allowing delete marker to replaced with proper
|
|
|
|
// object data type as well, this is not S3 complaint
|
|
|
|
// behavior but kept here for future flexibility.
|
|
|
|
return x.setIdx(i, ventry)
|
2020-06-12 23:04:01 -04:00
|
|
|
}
|
|
|
|
}
|
2020-08-03 19:21:10 -04:00
|
|
|
|
2021-11-18 15:15:22 -05:00
|
|
|
// We did not find it, add it.
|
|
|
|
return x.addVersion(ventry)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (x *xlMetaV2) SharedDataDirCount(versionID [16]byte, dataDir [16]byte) int {
|
|
|
|
// v2 object is inlined, if it is skip dataDir share check.
|
|
|
|
if x.data.entries() > 0 && x.data.find(uuid.UUID(versionID).String()) != nil {
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
var sameDataDirCount int
|
|
|
|
var decoded xlMetaDataDirDecoder
|
|
|
|
for _, version := range x.versions {
|
|
|
|
if version.header.Type != ObjectType || version.header.VersionID == versionID || !version.header.UsesDataDir() {
|
|
|
|
continue
|
2020-08-03 19:21:10 -04:00
|
|
|
}
|
2021-11-18 15:15:22 -05:00
|
|
|
_, err := decoded.UnmarshalMsg(version.meta)
|
|
|
|
if err != nil || decoded.ObjectV2 == nil || decoded.ObjectV2.DataDir != dataDir {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
sameDataDirCount++
|
|
|
|
}
|
|
|
|
return sameDataDirCount
|
|
|
|
}
|
2021-04-19 13:30:42 -04:00
|
|
|
|
2021-11-18 15:15:22 -05:00
|
|
|
func (x *xlMetaV2) SharedDataDirCountStr(versionID, dataDir string) int {
|
|
|
|
var (
|
|
|
|
uv uuid.UUID
|
|
|
|
ddir uuid.UUID
|
|
|
|
err error
|
|
|
|
)
|
|
|
|
if versionID == nullVersionID {
|
|
|
|
versionID = ""
|
|
|
|
}
|
|
|
|
if versionID != "" {
|
|
|
|
uv, err = uuid.Parse(versionID)
|
|
|
|
if err != nil {
|
|
|
|
return 0
|
2020-08-03 19:21:10 -04:00
|
|
|
}
|
|
|
|
}
|
2021-11-18 15:15:22 -05:00
|
|
|
ddir, err = uuid.Parse(dataDir)
|
|
|
|
if err != nil {
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
return x.SharedDataDirCount(uv, ddir)
|
|
|
|
}
|
2020-08-03 19:21:10 -04:00
|
|
|
|
2021-11-18 15:15:22 -05:00
|
|
|
// AddLegacy adds a legacy version, is only called when no prior
|
|
|
|
// versions exist, safe to use it by only one function in xl-storage(RenameData)
|
|
|
|
func (x *xlMetaV2) AddLegacy(m *xlMetaV1Object) error {
|
|
|
|
if !m.valid() {
|
|
|
|
return errFileCorrupt
|
2020-09-02 03:19:03 -04:00
|
|
|
}
|
2021-11-18 15:15:22 -05:00
|
|
|
m.VersionID = nullVersionID
|
|
|
|
|
|
|
|
return x.addVersion(xlMetaV2Version{ObjectV1: m, Type: LegacyType})
|
2020-06-12 23:04:01 -04:00
|
|
|
}
|
|
|
|
|
2021-11-18 15:15:22 -05:00
|
|
|
// ToFileInfo converts xlMetaV2 into a common FileInfo datastructure
|
|
|
|
// for consumption across callers.
|
|
|
|
func (x xlMetaV2) ToFileInfo(volume, path, versionID string) (fi FileInfo, err error) {
|
|
|
|
var uv uuid.UUID
|
|
|
|
if versionID != "" && versionID != nullVersionID {
|
|
|
|
uv, err = uuid.Parse(versionID)
|
|
|
|
if err != nil {
|
|
|
|
logger.LogIf(GlobalContext, fmt.Errorf("invalid versionID specified %s", versionID))
|
|
|
|
return fi, errFileVersionNotFound
|
|
|
|
}
|
|
|
|
}
|
|
|
|
var succModTime int64
|
|
|
|
isLatest := true
|
|
|
|
nonFreeVersions := len(x.versions)
|
|
|
|
found := false
|
|
|
|
for _, ver := range x.versions {
|
|
|
|
header := &ver.header
|
|
|
|
// skip listing free-version unless explicitly requested via versionID
|
|
|
|
if header.FreeVersion() {
|
|
|
|
nonFreeVersions--
|
|
|
|
if header.VersionID != uv {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if found {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// We need a specific version, skip...
|
|
|
|
if versionID != "" && uv != header.VersionID {
|
|
|
|
isLatest = false
|
|
|
|
succModTime = header.ModTime
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// We found what we need.
|
|
|
|
found = true
|
|
|
|
var version xlMetaV2Version
|
|
|
|
if _, err := version.unmarshalV(x.metaV, ver.meta); err != nil {
|
|
|
|
return fi, err
|
|
|
|
}
|
|
|
|
if fi, err = version.ToFileInfo(volume, path); err != nil {
|
|
|
|
return fi, err
|
|
|
|
}
|
|
|
|
fi.IsLatest = isLatest
|
|
|
|
if succModTime != 0 {
|
|
|
|
fi.SuccessorModTime = time.Unix(0, succModTime)
|
2020-06-12 23:04:01 -04:00
|
|
|
}
|
|
|
|
}
|
2021-11-18 15:15:22 -05:00
|
|
|
if !found {
|
|
|
|
if versionID == "" {
|
|
|
|
return FileInfo{}, errFileNotFound
|
|
|
|
}
|
|
|
|
|
|
|
|
return FileInfo{}, errFileVersionNotFound
|
|
|
|
}
|
|
|
|
fi.NumVersions = nonFreeVersions
|
|
|
|
return fi, err
|
2020-06-12 23:04:01 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// ListVersions lists current versions, and current deleted
|
|
|
|
// versions returns error for unexpected entries.
|
2020-11-19 21:43:58 -05:00
|
|
|
// showPendingDeletes is set to true if ListVersions needs to list objects marked deleted
|
|
|
|
// but waiting to be replicated
|
2021-11-18 15:15:22 -05:00
|
|
|
func (x xlMetaV2) ListVersions(volume, path string) ([]FileInfo, error) {
|
|
|
|
versions := make([]FileInfo, 0, len(x.versions))
|
2021-02-05 14:59:08 -05:00
|
|
|
var err error
|
|
|
|
|
2021-11-18 15:15:22 -05:00
|
|
|
var dst xlMetaV2Version
|
|
|
|
for _, version := range x.versions {
|
|
|
|
_, err = dst.unmarshalV(x.metaV, version.meta)
|
|
|
|
if err != nil {
|
|
|
|
return versions, err
|
2020-06-12 23:04:01 -04:00
|
|
|
}
|
2021-11-18 15:15:22 -05:00
|
|
|
fi, err := dst.ToFileInfo(volume, path)
|
2020-06-12 23:04:01 -04:00
|
|
|
if err != nil {
|
2021-11-18 15:15:22 -05:00
|
|
|
return versions, err
|
2020-06-12 23:04:01 -04:00
|
|
|
}
|
2021-11-18 15:15:22 -05:00
|
|
|
fi.NumVersions = len(x.versions)
|
2020-07-04 15:25:53 -04:00
|
|
|
versions = append(versions, fi)
|
2020-06-12 23:04:01 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
for i := range versions {
|
2021-02-05 14:59:08 -05:00
|
|
|
versions[i].NumVersions = len(versions)
|
|
|
|
if i > 0 {
|
|
|
|
versions[i].SuccessorModTime = versions[i-1].ModTime
|
2020-06-12 23:04:01 -04:00
|
|
|
}
|
|
|
|
}
|
2021-11-18 15:15:22 -05:00
|
|
|
if len(versions) > 0 {
|
|
|
|
versions[0].IsLatest = true
|
|
|
|
}
|
|
|
|
return versions, nil
|
2020-06-12 23:04:01 -04:00
|
|
|
}
|
|
|
|
|
2021-12-02 14:29:16 -05:00
|
|
|
// mergeXLV2Versions will merge all versions, typically from different disks
|
|
|
|
// that have at least quorum entries in all metas.
|
|
|
|
// Quorum must be the minimum number of matching metadata files.
|
|
|
|
// Quorum should be > 1 and <= len(versions).
|
|
|
|
// If strict is set to false, entries that match type
|
|
|
|
func mergeXLV2Versions(quorum int, strict bool, versions ...[]xlMetaV2ShallowVersion) (merged []xlMetaV2ShallowVersion) {
|
|
|
|
if len(versions) < quorum || len(versions) == 0 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
if len(versions) == 1 {
|
|
|
|
return versions[0]
|
|
|
|
}
|
|
|
|
if quorum == 1 {
|
|
|
|
// No need for non-strict checks if quorum is 1.
|
|
|
|
strict = true
|
|
|
|
}
|
|
|
|
// Our result
|
|
|
|
merged = make([]xlMetaV2ShallowVersion, 0, len(versions[0]))
|
|
|
|
tops := make([]xlMetaV2ShallowVersion, len(versions))
|
|
|
|
for {
|
|
|
|
// Step 1 create slice with all top versions.
|
|
|
|
tops = tops[:0]
|
|
|
|
var topSig [4]byte
|
|
|
|
var topID [16]byte
|
|
|
|
consistent := true // Are all signatures consistent (shortcut)
|
|
|
|
for _, vers := range versions {
|
|
|
|
if len(vers) == 0 {
|
|
|
|
consistent = false
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
ver := vers[0]
|
|
|
|
if len(tops) == 0 {
|
|
|
|
consistent = true
|
|
|
|
topSig = ver.header.Signature
|
|
|
|
topID = ver.header.VersionID
|
|
|
|
} else {
|
|
|
|
consistent = consistent && topSig == ver.header.Signature && topID == ver.header.VersionID
|
|
|
|
}
|
|
|
|
tops = append(tops, vers[0])
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check if done...
|
|
|
|
if len(tops) < quorum {
|
|
|
|
// We couldn't gather enough for quorum
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
|
|
|
var latest xlMetaV2ShallowVersion
|
|
|
|
var latestCount int
|
|
|
|
if consistent {
|
|
|
|
// All had the same signature, easy.
|
|
|
|
latest = tops[0]
|
|
|
|
latestCount = len(tops)
|
|
|
|
merged = append(merged, latest)
|
|
|
|
} else {
|
|
|
|
// Find latest.
|
|
|
|
for i, ver := range tops {
|
|
|
|
if ver.header == latest.header {
|
|
|
|
latestCount++
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if i == 0 || ver.header.sortsBefore(latest.header) {
|
|
|
|
if i == 0 {
|
|
|
|
latestCount = 1
|
|
|
|
} else if !strict && ver.header.matchesNotStrict(latest.header) {
|
|
|
|
latestCount++
|
|
|
|
} else {
|
|
|
|
latestCount = 1
|
|
|
|
}
|
|
|
|
latest = ver
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// Mismatch, but older.
|
|
|
|
if !strict && ver.header.matchesNotStrict(latest.header) {
|
|
|
|
// If non-nil version ID and it matches, assume match, but keep newest.
|
|
|
|
if ver.header.sortsBefore(latest.header) {
|
|
|
|
latest = ver
|
|
|
|
}
|
|
|
|
latestCount++
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if latestCount >= quorum {
|
|
|
|
merged = append(merged, latest)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Remove from all streams up until latest modtime or if selected.
|
|
|
|
for i, vers := range versions {
|
|
|
|
for _, ver := range vers {
|
|
|
|
// Truncate later modtimes, not selected.
|
|
|
|
if ver.header.ModTime > latest.header.ModTime {
|
|
|
|
versions[i] = versions[i][1:]
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
// Truncate matches
|
|
|
|
if ver.header == latest.header {
|
|
|
|
versions[i] = versions[i][1:]
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// Truncate non-empty version and type matches
|
|
|
|
if latest.header.VersionID == ver.header.VersionID {
|
|
|
|
versions[i] = versions[i][1:]
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
// Skip versions with version id we already emitted.
|
|
|
|
for _, mergedV := range merged {
|
|
|
|
if ver.header.VersionID == mergedV.header.VersionID {
|
|
|
|
versions[i] = versions[i][1:]
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Keep top entry (and remaining)...
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Sanity check. Enable if duplicates show up.
|
|
|
|
if false {
|
2022-01-02 12:15:06 -05:00
|
|
|
found := make(map[[16]byte]struct{})
|
2021-12-02 14:29:16 -05:00
|
|
|
for _, ver := range merged {
|
|
|
|
if _, ok := found[ver.header.VersionID]; ok {
|
|
|
|
panic("found dupe")
|
|
|
|
}
|
|
|
|
found[ver.header.VersionID] = struct{}{}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return merged
|
|
|
|
}
|
|
|
|
|
2021-11-18 15:15:22 -05:00
|
|
|
type xlMetaBuf []byte
|
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
// ToFileInfo converts xlMetaV2 into a common FileInfo datastructure
|
|
|
|
// for consumption across callers.
|
2021-11-18 15:15:22 -05:00
|
|
|
func (x xlMetaBuf) ToFileInfo(volume, path, versionID string) (fi FileInfo, err error) {
|
2020-06-12 23:04:01 -04:00
|
|
|
var uv uuid.UUID
|
2020-11-29 00:15:45 -05:00
|
|
|
if versionID != "" && versionID != nullVersionID {
|
|
|
|
uv, err = uuid.Parse(versionID)
|
|
|
|
if err != nil {
|
2021-04-20 13:44:39 -04:00
|
|
|
logger.LogIf(GlobalContext, fmt.Errorf("invalid versionID specified %s", versionID))
|
2021-11-18 15:15:22 -05:00
|
|
|
return fi, errFileVersionNotFound
|
2020-11-29 00:15:45 -05:00
|
|
|
}
|
2020-06-12 23:04:01 -04:00
|
|
|
}
|
2021-11-18 15:15:22 -05:00
|
|
|
versions, headerV, metaV, buf, err := decodeXLHeaders(x)
|
|
|
|
if err != nil {
|
|
|
|
return fi, err
|
|
|
|
}
|
|
|
|
var header xlMetaV2VersionHeader
|
|
|
|
var succModTime int64
|
|
|
|
isLatest := true
|
|
|
|
nonFreeVersions := versions
|
|
|
|
found := false
|
|
|
|
err = decodeVersions(buf, versions, func(idx int, hdr, meta []byte) error {
|
|
|
|
if _, err := header.unmarshalV(headerV, hdr); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2020-06-12 23:04:01 -04:00
|
|
|
|
2021-11-18 15:15:22 -05:00
|
|
|
// skip listing free-version unless explicitly requested via versionID
|
|
|
|
if header.FreeVersion() {
|
|
|
|
nonFreeVersions--
|
|
|
|
if header.VersionID != uv {
|
|
|
|
return nil
|
2021-02-01 12:52:11 -05:00
|
|
|
}
|
2020-07-04 15:25:53 -04:00
|
|
|
}
|
2021-11-18 15:15:22 -05:00
|
|
|
if found {
|
|
|
|
return nil
|
2021-06-30 22:32:07 -04:00
|
|
|
}
|
2021-02-01 12:52:11 -05:00
|
|
|
|
2021-11-18 15:15:22 -05:00
|
|
|
// We need a specific version, skip...
|
|
|
|
if versionID != "" && uv != header.VersionID {
|
|
|
|
isLatest = false
|
|
|
|
succModTime = header.ModTime
|
|
|
|
return nil
|
2020-06-12 23:04:01 -04:00
|
|
|
}
|
|
|
|
|
2021-11-18 15:15:22 -05:00
|
|
|
// We found what we need.
|
|
|
|
found = true
|
|
|
|
var version xlMetaV2Version
|
|
|
|
if _, err := version.unmarshalV(metaV, meta); err != nil {
|
|
|
|
return err
|
2020-06-12 23:04:01 -04:00
|
|
|
}
|
2021-11-18 15:15:22 -05:00
|
|
|
if fi, err = version.ToFileInfo(volume, path); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
fi.IsLatest = isLatest
|
|
|
|
if succModTime != 0 {
|
|
|
|
fi.SuccessorModTime = time.Unix(0, succModTime)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
if !found {
|
|
|
|
if versionID == "" {
|
|
|
|
return FileInfo{}, errFileNotFound
|
2021-02-01 12:52:11 -05:00
|
|
|
}
|
2021-08-23 14:17:27 -04:00
|
|
|
|
2021-11-18 15:15:22 -05:00
|
|
|
return FileInfo{}, errFileVersionNotFound
|
2021-08-23 14:17:27 -04:00
|
|
|
}
|
2021-11-18 15:15:22 -05:00
|
|
|
fi.NumVersions = nonFreeVersions
|
|
|
|
return fi, err
|
2021-08-23 14:17:27 -04:00
|
|
|
}
|
|
|
|
|
2021-11-18 15:15:22 -05:00
|
|
|
// ListVersions lists current versions, and current deleted
|
|
|
|
// versions returns error for unexpected entries.
|
|
|
|
// showPendingDeletes is set to true if ListVersions needs to list objects marked deleted
|
|
|
|
// but waiting to be replicated
|
|
|
|
func (x xlMetaBuf) ListVersions(volume, path string) ([]FileInfo, error) {
|
|
|
|
vers, _, metaV, buf, err := decodeXLHeaders(x)
|
2021-05-21 12:10:54 -04:00
|
|
|
if err != nil {
|
2021-11-18 15:15:22 -05:00
|
|
|
return nil, err
|
2021-05-21 12:10:54 -04:00
|
|
|
}
|
2021-11-18 15:15:22 -05:00
|
|
|
var succModTime time.Time
|
|
|
|
isLatest := true
|
|
|
|
dst := make([]FileInfo, 0, vers)
|
|
|
|
var xl xlMetaV2Version
|
|
|
|
err = decodeVersions(buf, vers, func(idx int, hdr, meta []byte) error {
|
|
|
|
if _, err := xl.unmarshalV(metaV, meta); err != nil {
|
|
|
|
return err
|
2021-05-21 12:10:54 -04:00
|
|
|
}
|
2021-11-18 15:15:22 -05:00
|
|
|
if !xl.Valid() {
|
|
|
|
return errFileCorrupt
|
2021-05-21 12:10:54 -04:00
|
|
|
}
|
2021-11-18 15:15:22 -05:00
|
|
|
fi, err := xl.ToFileInfo(volume, path)
|
2021-05-21 12:10:54 -04:00
|
|
|
if err != nil {
|
2021-11-18 15:15:22 -05:00
|
|
|
return err
|
2021-05-21 12:10:54 -04:00
|
|
|
}
|
2021-11-18 15:15:22 -05:00
|
|
|
fi.IsLatest = isLatest
|
|
|
|
fi.SuccessorModTime = succModTime
|
|
|
|
fi.NumVersions = vers
|
|
|
|
isLatest = false
|
|
|
|
succModTime = xl.getModTime()
|
|
|
|
|
|
|
|
dst = append(dst, fi)
|
2021-05-21 12:10:54 -04:00
|
|
|
return nil
|
2021-11-18 15:15:22 -05:00
|
|
|
})
|
|
|
|
return dst, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// IsLatestDeleteMarker returns true if latest version is a deletemarker or there are no versions.
|
|
|
|
// If any error occurs false is returned.
|
|
|
|
func (x xlMetaBuf) IsLatestDeleteMarker() bool {
|
|
|
|
vers, headerV, _, buf, err := decodeXLHeaders(x)
|
2021-05-21 12:10:54 -04:00
|
|
|
if err != nil {
|
2021-11-18 15:15:22 -05:00
|
|
|
return false
|
2021-05-21 12:10:54 -04:00
|
|
|
}
|
2021-11-18 15:15:22 -05:00
|
|
|
if vers == 0 {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
isDeleteMarker := false
|
2021-05-21 12:10:54 -04:00
|
|
|
|
2021-11-18 15:15:22 -05:00
|
|
|
_ = decodeVersions(buf, vers, func(idx int, hdr, _ []byte) error {
|
|
|
|
var xl xlMetaV2VersionHeader
|
|
|
|
if _, err := xl.unmarshalV(headerV, hdr); err != nil {
|
|
|
|
return errDoneForNow
|
2021-05-21 12:10:54 -04:00
|
|
|
}
|
2021-11-18 15:15:22 -05:00
|
|
|
isDeleteMarker = xl.Type == DeleteType
|
|
|
|
return errDoneForNow
|
|
|
|
})
|
|
|
|
return isDeleteMarker
|
2021-05-21 12:10:54 -04:00
|
|
|
}
|