mirror of https://github.com/minio/minio.git
fix: encrypt checksums in metadata (#15620)
This commit is contained in:
parent
dec942beb6
commit
8e4a45ec41
|
@ -704,16 +704,17 @@ func generateInitiateMultipartUploadResponse(bucket, key, uploadID string) Initi
|
|||
|
||||
// generates CompleteMultipartUploadResponse for given bucket, key, location and ETag.
|
||||
func generateCompleteMultpartUploadResponse(bucket, key, location string, oi ObjectInfo) CompleteMultipartUploadResponse {
|
||||
cs := oi.decryptChecksums()
|
||||
c := CompleteMultipartUploadResponse{
|
||||
Location: location,
|
||||
Bucket: bucket,
|
||||
Key: key,
|
||||
// AWS S3 quotes the ETag in XML, make sure we are compatible here.
|
||||
ETag: "\"" + oi.ETag + "\"",
|
||||
ChecksumSHA1: oi.Checksum[hash.ChecksumSHA1.String()],
|
||||
ChecksumSHA256: oi.Checksum[hash.ChecksumSHA256.String()],
|
||||
ChecksumCRC32: oi.Checksum[hash.ChecksumCRC32.String()],
|
||||
ChecksumCRC32C: oi.Checksum[hash.ChecksumCRC32C.String()],
|
||||
ChecksumSHA1: cs[hash.ChecksumSHA1.String()],
|
||||
ChecksumSHA256: cs[hash.ChecksumSHA256.String()],
|
||||
ChecksumCRC32: cs[hash.ChecksumCRC32.String()],
|
||||
ChecksumCRC32C: cs[hash.ChecksumCRC32C.String()],
|
||||
}
|
||||
return c
|
||||
}
|
||||
|
|
|
@ -19,6 +19,7 @@ package cmd
|
|||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/hmac"
|
||||
"crypto/rand"
|
||||
|
@ -37,6 +38,7 @@ import (
|
|||
"github.com/minio/minio/internal/crypto"
|
||||
"github.com/minio/minio/internal/etag"
|
||||
"github.com/minio/minio/internal/fips"
|
||||
"github.com/minio/minio/internal/hash"
|
||||
"github.com/minio/minio/internal/hash/sha256"
|
||||
xhttp "github.com/minio/minio/internal/http"
|
||||
"github.com/minio/minio/internal/kms"
|
||||
|
@ -1052,3 +1054,60 @@ func deriveClientKey(clientKey [32]byte, bucket, object string) [32]byte {
|
|||
mac.Sum(key[:0])
|
||||
return key
|
||||
}
|
||||
|
||||
type (
|
||||
objectMetaEncryptFn func(baseKey string, data []byte) []byte
|
||||
objectMetaDecryptFn func(baseKey string, data []byte) ([]byte, error)
|
||||
)
|
||||
|
||||
// metadataEncrypter returns a function that will read data from input,
|
||||
// encrypt it using the provided key and return the result.
|
||||
// 0 sized inputs are passed through.
|
||||
func metadataEncrypter(key crypto.ObjectKey) objectMetaEncryptFn {
|
||||
return func(baseKey string, data []byte) []byte {
|
||||
if len(data) == 0 {
|
||||
return data
|
||||
}
|
||||
var buffer bytes.Buffer
|
||||
mac := hmac.New(sha256.New, key[:])
|
||||
mac.Write([]byte(baseKey))
|
||||
if _, err := sio.Encrypt(&buffer, bytes.NewReader(data), sio.Config{Key: mac.Sum(nil), CipherSuites: fips.DARECiphers()}); err != nil {
|
||||
logger.CriticalIf(context.Background(), errors.New("unable to encrypt using object key"))
|
||||
}
|
||||
return buffer.Bytes()
|
||||
}
|
||||
}
|
||||
|
||||
// metadataDecrypter reverses metadataEncrypter.
|
||||
func (o *ObjectInfo) metadataDecrypter() objectMetaDecryptFn {
|
||||
return func(baseKey string, input []byte) ([]byte, error) {
|
||||
if len(input) == 0 {
|
||||
return input, nil
|
||||
}
|
||||
|
||||
key, err := decryptObjectInfo(nil, o.Bucket, o.Name, o.UserDefined)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
mac := hmac.New(sha256.New, key)
|
||||
mac.Write([]byte(baseKey))
|
||||
return sio.DecryptBuffer(nil, input, sio.Config{Key: mac.Sum(nil), CipherSuites: fips.DARECiphers()})
|
||||
}
|
||||
}
|
||||
|
||||
// decryptChecksums will attempt to decode checksums and return it/them if set.
|
||||
func (o *ObjectInfo) decryptChecksums() map[string]string {
|
||||
data := o.Checksum
|
||||
if len(data) == 0 {
|
||||
return nil
|
||||
}
|
||||
if _, encrypted := crypto.IsEncrypted(o.UserDefined); encrypted {
|
||||
decrypted, err := o.metadataDecrypter()("object-checksum", data)
|
||||
if err != nil {
|
||||
logger.LogIf(GlobalContext, err)
|
||||
return nil
|
||||
}
|
||||
data = decrypted
|
||||
}
|
||||
return hash.ReadCheckSums(data)
|
||||
}
|
||||
|
|
|
@ -603,7 +603,7 @@ func (er erasureObjects) PutObjectPart(ctx context.Context, bucket, object, uplo
|
|||
return pi, InvalidArgument{
|
||||
Bucket: bucket,
|
||||
Object: fi.Name,
|
||||
Err: fmt.Errorf("checksum missing"),
|
||||
Err: fmt.Errorf("checksum missing, want %s, got %s", cs, r.ContentCRCType().String()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1170,7 +1170,10 @@ func (er erasureObjects) CompleteMultipartUpload(ctx context.Context, bucket str
|
|||
}
|
||||
if checksumType.IsSet() {
|
||||
cs := hash.NewChecksumFromData(checksumType, checksumCombined)
|
||||
fi.Checksum = map[string]string{cs.Type.String(): cs.Encoded}
|
||||
fi.Checksum = cs.AppendTo(nil)
|
||||
if opts.EncryptFn != nil {
|
||||
fi.Checksum = opts.EncryptFn("object-checksum", fi.Checksum)
|
||||
}
|
||||
}
|
||||
delete(fi.Metadata, hash.MinIOMultipartChecksum) // Not needed in final object.
|
||||
|
||||
|
|
|
@ -962,7 +962,10 @@ func (er erasureObjects) putObject(ctx context.Context, bucket string, object st
|
|||
}
|
||||
|
||||
fi.DataDir = mustGetUUID()
|
||||
fi.Checksum = opts.WantChecksum.AsMap()
|
||||
fi.Checksum = opts.WantChecksum.AppendTo(nil)
|
||||
if opts.EncryptFn != nil {
|
||||
fi.Checksum = opts.EncryptFn("object-checksum", fi.Checksum)
|
||||
}
|
||||
uniqueID := mustGetUUID()
|
||||
tempObj := uniqueID
|
||||
|
||||
|
|
|
@ -179,8 +179,9 @@ type ObjectInfo struct {
|
|||
// The modtime of the successor object version if any
|
||||
SuccessorModTime time.Time
|
||||
|
||||
// User-Defined object tags
|
||||
Checksum map[string]string
|
||||
// Checksums added on upload.
|
||||
// Encoded, maybe encrypted.
|
||||
Checksum []byte
|
||||
}
|
||||
|
||||
// ArchiveInfo returns any saved zip archive meta information
|
||||
|
|
|
@ -78,6 +78,9 @@ type ObjectOptions struct {
|
|||
// Use the maximum parity (N/2), used when saving server configuration files
|
||||
MaxParity bool
|
||||
|
||||
// Provides a per object encryption function, allowing metadata encryption.
|
||||
EncryptFn objectMetaEncryptFn
|
||||
|
||||
// SkipDecommissioned set to 'true' if the call requires skipping the pool being decommissioned.
|
||||
// mainly set for certain WRITE operations.
|
||||
SkipDecommissioned bool
|
||||
|
|
|
@ -20,7 +20,6 @@ package cmd
|
|||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/hmac"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
@ -44,15 +43,12 @@ import (
|
|||
"github.com/minio/minio/internal/config/dns"
|
||||
"github.com/minio/minio/internal/config/storageclass"
|
||||
"github.com/minio/minio/internal/crypto"
|
||||
"github.com/minio/minio/internal/fips"
|
||||
"github.com/minio/minio/internal/hash"
|
||||
"github.com/minio/minio/internal/hash/sha256"
|
||||
xhttp "github.com/minio/minio/internal/http"
|
||||
"github.com/minio/minio/internal/ioutil"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/pkg/trie"
|
||||
"github.com/minio/pkg/wildcard"
|
||||
"github.com/minio/sio"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -720,7 +716,7 @@ func NewGetObjectReader(rs *HTTPRangeSpec, oi ObjectInfo, opts ObjectOptions) (
|
|||
}
|
||||
// Decompression reader.
|
||||
var dopts []s2.ReaderOption
|
||||
if off > 0 {
|
||||
if off > 0 || decOff > 0 {
|
||||
// We are not starting at the beginning, so ignore stream identifiers.
|
||||
dopts = append(dopts, s2.ReaderIgnoreStreamIdentifier())
|
||||
}
|
||||
|
@ -845,6 +841,8 @@ func (g *GetObjectReader) Close() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// compressionIndexEncrypter returns a function that will read data from input,
|
||||
// encrypt it using the provided key and return the result.
|
||||
func compressionIndexEncrypter(key crypto.ObjectKey, input func() []byte) func() []byte {
|
||||
var data []byte
|
||||
var fetched bool
|
||||
|
@ -853,31 +851,13 @@ func compressionIndexEncrypter(key crypto.ObjectKey, input func() []byte) func()
|
|||
data = input()
|
||||
fetched = true
|
||||
}
|
||||
if len(data) == 0 {
|
||||
return data
|
||||
}
|
||||
var buffer bytes.Buffer
|
||||
mac := hmac.New(sha256.New, key[:])
|
||||
mac.Write([]byte("compression-index"))
|
||||
if _, err := sio.Encrypt(&buffer, bytes.NewReader(data), sio.Config{Key: mac.Sum(nil), CipherSuites: fips.DARECiphers()}); err != nil {
|
||||
logger.CriticalIf(context.Background(), errors.New("unable to encrypt compression index using object key"))
|
||||
}
|
||||
return buffer.Bytes()
|
||||
return metadataEncrypter(key)("compression-index", data)
|
||||
}
|
||||
}
|
||||
|
||||
// compressionIndexDecrypt reverses compressionIndexEncrypter.
|
||||
func (o *ObjectInfo) compressionIndexDecrypt(input []byte) ([]byte, error) {
|
||||
if len(input) == 0 {
|
||||
return input, nil
|
||||
}
|
||||
|
||||
key, err := decryptObjectInfo(nil, o.Bucket, o.Name, o.UserDefined)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
mac := hmac.New(sha256.New, key)
|
||||
mac.Write([]byte("compression-index"))
|
||||
return sio.DecryptBuffer(nil, input, sio.Config{Key: mac.Sum(nil), CipherSuites: fips.DARECiphers()})
|
||||
return o.metadataDecrypter()("compression-index", input)
|
||||
}
|
||||
|
||||
// SealMD5CurrFn seals md5sum with object encryption key and returns sealed
|
||||
|
|
|
@ -267,7 +267,7 @@ func setPutObjHeaders(w http.ResponseWriter, objInfo ObjectInfo, delete bool) {
|
|||
lc.SetPredictionHeaders(w, objInfo.ToLifecycleOpts())
|
||||
}
|
||||
}
|
||||
hash.AddChecksumHeader(w, objInfo.Checksum)
|
||||
hash.AddChecksumHeader(w, objInfo.decryptChecksums())
|
||||
}
|
||||
|
||||
func deleteObjectVersions(ctx context.Context, o ObjectLayer, bucket string, toDel []ObjectToDelete) {
|
||||
|
|
|
@ -520,7 +520,7 @@ func (api objectAPIHandlers) getObjectHandler(ctx context.Context, objectAPI Obj
|
|||
}
|
||||
|
||||
if r.Header.Get(xhttp.AmzChecksumMode) == "ENABLED" {
|
||||
hash.AddChecksumHeader(w, objInfo.Checksum)
|
||||
hash.AddChecksumHeader(w, objInfo.decryptChecksums())
|
||||
}
|
||||
|
||||
if err = setObjectHeaders(w, objInfo, rs, opts); err != nil {
|
||||
|
@ -788,7 +788,7 @@ func (api objectAPIHandlers) headObjectHandler(ctx context.Context, objectAPI Ob
|
|||
}
|
||||
|
||||
if r.Header.Get(xhttp.AmzChecksumMode) == "ENABLED" {
|
||||
hash.AddChecksumHeader(w, objInfo.Checksum)
|
||||
hash.AddChecksumHeader(w, objInfo.decryptChecksums())
|
||||
}
|
||||
|
||||
// Set standard object headers.
|
||||
|
@ -1850,6 +1850,7 @@ func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Req
|
|||
if opts.IndexCB != nil {
|
||||
opts.IndexCB = compressionIndexEncrypter(objectEncryptionKey, opts.IndexCB)
|
||||
}
|
||||
opts.EncryptFn = metadataEncrypter(objectEncryptionKey)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -389,8 +389,7 @@ func (api objectAPIHandlers) PutObjectPartHandler(w http.ResponseWriter, r *http
|
|||
return
|
||||
}
|
||||
|
||||
rawReader := hashReader
|
||||
pReader := NewPutObjReader(rawReader)
|
||||
pReader := NewPutObjReader(hashReader)
|
||||
|
||||
_, isEncrypted := crypto.IsEncrypted(mi.UserDefined)
|
||||
var objectEncryptionKey crypto.ObjectKey
|
||||
|
@ -446,14 +445,21 @@ func (api objectAPIHandlers) PutObjectPartHandler(w http.ResponseWriter, r *http
|
|||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
if err := hashReader.AddChecksum(r, true); err != nil {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidChecksum), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
pReader, err = pReader.WithEncryption(hashReader, &objectEncryptionKey)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if idxCb != nil {
|
||||
idxCb = compressionIndexEncrypter(objectEncryptionKey, idxCb)
|
||||
}
|
||||
opts.EncryptFn = metadataEncrypter(objectEncryptionKey)
|
||||
}
|
||||
opts.IndexCB = idxCb
|
||||
|
||||
|
|
|
@ -231,8 +231,7 @@ type FileInfo struct {
|
|||
DiskMTime time.Time `msg:"dmt"`
|
||||
|
||||
// Combined checksum when object was uploaded.
|
||||
// Format is type:base64(checksum).
|
||||
Checksum map[string]string `msg:"cs,allownil"`
|
||||
Checksum []byte `msg:"cs,allownil"`
|
||||
}
|
||||
|
||||
// Equals checks if fi(FileInfo) matches ofi(FileInfo)
|
||||
|
|
|
@ -778,43 +778,10 @@ func (z *FileInfo) DecodeMsg(dc *msgp.Reader) (err error) {
|
|||
err = msgp.WrapError(err, "DiskMTime")
|
||||
return
|
||||
}
|
||||
if dc.IsNil() {
|
||||
err = dc.ReadNil()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
z.Checksum = nil
|
||||
} else {
|
||||
var zb0004 uint32
|
||||
zb0004, err = dc.ReadMapHeader()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Checksum")
|
||||
return
|
||||
}
|
||||
if z.Checksum == nil {
|
||||
z.Checksum = make(map[string]string, zb0004)
|
||||
} else if len(z.Checksum) > 0 {
|
||||
for key := range z.Checksum {
|
||||
delete(z.Checksum, key)
|
||||
}
|
||||
}
|
||||
for zb0004 > 0 {
|
||||
zb0004--
|
||||
var za0004 string
|
||||
var za0005 string
|
||||
za0004, err = dc.ReadString()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Checksum")
|
||||
return
|
||||
}
|
||||
za0005, err = dc.ReadString()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Checksum", za0004)
|
||||
return
|
||||
}
|
||||
z.Checksum[za0004] = za0005
|
||||
}
|
||||
z.Checksum, err = dc.ReadBytes(z.Checksum)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Checksum")
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
@ -980,29 +947,10 @@ func (z *FileInfo) EncodeMsg(en *msgp.Writer) (err error) {
|
|||
err = msgp.WrapError(err, "DiskMTime")
|
||||
return
|
||||
}
|
||||
if z.Checksum == nil { // allownil: if nil
|
||||
err = en.WriteNil()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
} else {
|
||||
err = en.WriteMapHeader(uint32(len(z.Checksum)))
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Checksum")
|
||||
return
|
||||
}
|
||||
for za0004, za0005 := range z.Checksum {
|
||||
err = en.WriteString(za0004)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Checksum")
|
||||
return
|
||||
}
|
||||
err = en.WriteString(za0005)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Checksum", za0004)
|
||||
return
|
||||
}
|
||||
}
|
||||
err = en.WriteBytes(z.Checksum)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Checksum")
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
@ -1058,15 +1006,7 @@ func (z *FileInfo) MarshalMsg(b []byte) (o []byte, err error) {
|
|||
o = msgp.AppendBool(o, z.Fresh)
|
||||
o = msgp.AppendInt(o, z.Idx)
|
||||
o = msgp.AppendTime(o, z.DiskMTime)
|
||||
if z.Checksum == nil { // allownil: if nil
|
||||
o = msgp.AppendNil(o)
|
||||
} else {
|
||||
o = msgp.AppendMapHeader(o, uint32(len(z.Checksum)))
|
||||
for za0004, za0005 := range z.Checksum {
|
||||
o = msgp.AppendString(o, za0004)
|
||||
o = msgp.AppendString(o, za0005)
|
||||
}
|
||||
}
|
||||
o = msgp.AppendBytes(o, z.Checksum)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -1254,39 +1194,10 @@ func (z *FileInfo) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
|||
err = msgp.WrapError(err, "DiskMTime")
|
||||
return
|
||||
}
|
||||
if msgp.IsNil(bts) {
|
||||
bts = bts[1:]
|
||||
z.Checksum = nil
|
||||
} else {
|
||||
var zb0004 uint32
|
||||
zb0004, bts, err = msgp.ReadMapHeaderBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Checksum")
|
||||
return
|
||||
}
|
||||
if z.Checksum == nil {
|
||||
z.Checksum = make(map[string]string, zb0004)
|
||||
} else if len(z.Checksum) > 0 {
|
||||
for key := range z.Checksum {
|
||||
delete(z.Checksum, key)
|
||||
}
|
||||
}
|
||||
for zb0004 > 0 {
|
||||
var za0004 string
|
||||
var za0005 string
|
||||
zb0004--
|
||||
za0004, bts, err = msgp.ReadStringBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Checksum")
|
||||
return
|
||||
}
|
||||
za0005, bts, err = msgp.ReadStringBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Checksum", za0004)
|
||||
return
|
||||
}
|
||||
z.Checksum[za0004] = za0005
|
||||
}
|
||||
z.Checksum, bts, err = msgp.ReadBytesBytes(bts, z.Checksum)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Checksum")
|
||||
return
|
||||
}
|
||||
o = bts
|
||||
return
|
||||
|
@ -1305,13 +1216,7 @@ func (z *FileInfo) Msgsize() (s int) {
|
|||
for za0003 := range z.Parts {
|
||||
s += z.Parts[za0003].Msgsize()
|
||||
}
|
||||
s += z.Erasure.Msgsize() + msgp.BoolSize + z.ReplicationState.Msgsize() + msgp.BytesPrefixSize + len(z.Data) + msgp.IntSize + msgp.TimeSize + msgp.BoolSize + msgp.IntSize + msgp.TimeSize + msgp.MapHeaderSize
|
||||
if z.Checksum != nil {
|
||||
for za0004, za0005 := range z.Checksum {
|
||||
_ = za0005
|
||||
s += msgp.StringPrefixSize + len(za0004) + msgp.StringPrefixSize + len(za0005)
|
||||
}
|
||||
}
|
||||
s += z.Erasure.Msgsize() + msgp.BoolSize + z.ReplicationState.Msgsize() + msgp.BytesPrefixSize + len(z.Data) + msgp.IntSize + msgp.TimeSize + msgp.BoolSize + msgp.IntSize + msgp.TimeSize + msgp.BytesPrefixSize + len(z.Checksum)
|
||||
return
|
||||
}
|
||||
|
||||
|
|
|
@ -36,7 +36,6 @@ import (
|
|||
jsoniter "github.com/json-iterator/go"
|
||||
"github.com/minio/minio/internal/bucket/lifecycle"
|
||||
"github.com/minio/minio/internal/bucket/replication"
|
||||
"github.com/minio/minio/internal/hash"
|
||||
xhttp "github.com/minio/minio/internal/http"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/tinylib/msgp/msgp"
|
||||
|
@ -640,7 +639,7 @@ func (j xlMetaV2Object) ToFileInfo(volume, path string) (FileInfo, error) {
|
|||
fi.TransitionTier = string(sc)
|
||||
}
|
||||
if crcs := j.MetaSys[ReservedMetadataPrefixLower+"crc"]; len(crcs) > 0 {
|
||||
fi.Checksum = hash.ReadCheckSums(crcs)
|
||||
fi.Checksum = crcs
|
||||
}
|
||||
return fi, nil
|
||||
}
|
||||
|
@ -1541,14 +1540,7 @@ func (x *xlMetaV2) AddVersion(fi FileInfo) error {
|
|||
ventry.ObjectV2.MetaSys[ReservedMetadataPrefixLower+TransitionTier] = []byte(fi.TransitionTier)
|
||||
}
|
||||
if len(fi.Checksum) > 0 {
|
||||
res := make([]byte, 0, len(fi.Checksum)*40)
|
||||
for k, v := range fi.Checksum {
|
||||
crc := hash.NewChecksumString(k, v)
|
||||
if crc.Valid() {
|
||||
res = crc.AppendTo(res)
|
||||
}
|
||||
}
|
||||
ventry.ObjectV2.MetaSys[ReservedMetadataPrefixLower+"crc"] = res
|
||||
ventry.ObjectV2.MetaSys[ReservedMetadataPrefixLower+"crc"] = fi.Checksum
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -215,7 +215,10 @@ func NewChecksumString(alg, value string) *Checksum {
|
|||
|
||||
// AppendTo will append the checksum to b.
|
||||
// ReadCheckSums reads the values back.
|
||||
func (c Checksum) AppendTo(b []byte) []byte {
|
||||
func (c *Checksum) AppendTo(b []byte) []byte {
|
||||
if c == nil {
|
||||
return nil
|
||||
}
|
||||
var tmp [binary.MaxVarintLen32]byte
|
||||
n := binary.PutUvarint(tmp[:], uint64(c.Type))
|
||||
crc := c.Raw()
|
||||
|
|
Loading…
Reference in New Issue