with xlv2 format we never had to fill in checksumInfo() (#17963)

- this PR avoids sending a large ChecksumInfo slice
  when its not needed

- also for a file with XLV2 format there is no reason
  to allocate Checksum slice while reading
This commit is contained in:
Harshavardhana 2023-09-01 13:45:58 -07:00 committed by GitHub
parent 6a8d8f34a5
commit 18b3655c99
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 5 additions and 39 deletions

View File

@ -620,7 +620,7 @@ func TestDisksWithAllParts(t *testing.T) {
diskFailures[15] = "part.1"
for diskIndex, partName := range diskFailures {
for i := range partsMetadata[diskIndex].Erasure.Checksums {
for i := range partsMetadata[diskIndex].Parts {
if fmt.Sprintf("part.%d", i+1) == partName {
filePath := pathJoin(erasureDisks[diskIndex].String(), bucket, object, partsMetadata[diskIndex].DataDir, partName)
f, err := os.OpenFile(filePath, os.O_WRONLY|os.O_SYNC, 0)

View File

@ -714,11 +714,6 @@ func (er *erasureObjects) healObject(ctx context.Context, bucket string, object
partsMetadata[i].DataDir = dstDataDir
partsMetadata[i].AddObjectPart(partNumber, "", partSize, partActualSize, partModTime, partIdx, partChecksums)
partsMetadata[i].Erasure.AddChecksumInfo(ChecksumInfo{
PartNumber: partNumber,
Algorithm: checksumAlgo,
Hash: bitrotWriterSum(writers[i]),
})
if len(inlineBuffers) > 0 && inlineBuffers[i] != nil {
partsMetadata[i].Data = inlineBuffers[i].Bytes()
partsMetadata[i].SetInlineData()

View File

@ -41,17 +41,6 @@ const minIOErasureUpgraded = "x-minio-internal-erasure-upgraded"
const erasureAlgorithm = "rs-vandermonde"
// AddChecksumInfo adds a checksum of a part.
func (e *ErasureInfo) AddChecksumInfo(ckSumInfo ChecksumInfo) {
for i, sum := range e.Checksums {
if sum.PartNumber == ckSumInfo.PartNumber {
e.Checksums[i] = ckSumInfo
return
}
}
e.Checksums = append(e.Checksums, ckSumInfo)
}
// GetChecksumInfo - get checksum of a part.
func (e ErasureInfo) GetChecksumInfo(partNumber int) (ckSum ChecksumInfo) {
for _, sum := range e.Checksums {
@ -60,7 +49,7 @@ func (e ErasureInfo) GetChecksumInfo(partNumber int) (ckSum ChecksumInfo) {
return sum
}
}
return ChecksumInfo{}
return ChecksumInfo{Algorithm: DefaultBitrotAlgorithm}
}
// ShardFileSize - returns final erasure size from original size.

View File

@ -960,11 +960,6 @@ func (er erasureObjects) putMetacacheObject(ctx context.Context, key string, r *
}
partsMetadata[i].Data = inlineBuffers[i].Bytes()
partsMetadata[i].AddObjectPart(1, "", n, data.ActualSize(), modTime, index, nil)
partsMetadata[i].Erasure.AddChecksumInfo(ChecksumInfo{
PartNumber: 1,
Algorithm: DefaultBitrotAlgorithm,
Hash: bitrotWriterSum(w),
})
}
// Fill all the necessary metadata.
@ -1296,11 +1291,6 @@ func (er erasureObjects) putObject(ctx context.Context, bucket string, object st
}
// No need to add checksum to part. We already have it on the object.
partsMetadata[i].AddObjectPart(1, "", n, data.ActualSize(), modTime, compIndex, nil)
partsMetadata[i].Erasure.AddChecksumInfo(ChecksumInfo{
PartNumber: 1,
Algorithm: DefaultBitrotAlgorithm,
Hash: bitrotWriterSum(w),
})
partsMetadata[i].Versioned = opts.Versioned || opts.VersionSuspended
}

View File

@ -612,17 +612,9 @@ func (j xlMetaV2Object) ToFileInfo(volume, path string) (FileInfo, error) {
fi.Parts[i].Index = j.PartIndices[i]
}
}
fi.Erasure.Checksums = make([]ChecksumInfo, len(j.PartSizes))
for i := range fi.Parts {
fi.Erasure.Checksums[i].PartNumber = fi.Parts[i].Number
switch j.BitrotChecksumAlgo {
case HighwayHash:
fi.Erasure.Checksums[i].Algorithm = HighwayHash256S
fi.Erasure.Checksums[i].Hash = []byte{}
default:
return FileInfo{}, fmt.Errorf("unknown BitrotChecksumAlgo: %v", j.BitrotChecksumAlgo)
}
}
// fi.Erasure.Checksums - is left empty since we do not have any
// whole checksums for many years now, no need to allocate.
fi.Metadata = make(map[string]string, len(j.MetaUser)+len(j.MetaSys))
for k, v := range j.MetaUser {
// https://github.com/google/security-research/security/advisories/GHSA-76wf-9vgp-pj7w