mirror of
https://github.com/minio/minio.git
synced 2025-04-19 02:05:24 -04:00
convert ETag properly for all gateways (#5099)
Previously ID/ETag from backend service is used as is which causes failure on s3cmd like tools where those tools use ETag as checksum to validate data. This is fixed by prepending "-1". Refer minio/mint#193 minio/mint#201
This commit is contained in:
parent
d23ded0d83
commit
bc8b936d4b
@ -162,11 +162,6 @@ func azurePropertiesToS3Meta(meta storage.BlobMetadata, props storage.BlobProper
|
|||||||
return s3Metadata
|
return s3Metadata
|
||||||
}
|
}
|
||||||
|
|
||||||
// Append "-1" to etag so that clients do not interpret it as MD5.
|
|
||||||
func azureToS3ETag(etag string) string {
|
|
||||||
return canonicalizeETag(etag) + "-1"
|
|
||||||
}
|
|
||||||
|
|
||||||
// azureObjects - Implements Object layer for Azure blob storage.
|
// azureObjects - Implements Object layer for Azure blob storage.
|
||||||
type azureObjects struct {
|
type azureObjects struct {
|
||||||
gatewayUnsupported
|
gatewayUnsupported
|
||||||
@ -420,7 +415,7 @@ func (a *azureObjects) ListObjects(bucket, prefix, marker, delimiter string, max
|
|||||||
Name: object.Name,
|
Name: object.Name,
|
||||||
ModTime: time.Time(object.Properties.LastModified),
|
ModTime: time.Time(object.Properties.LastModified),
|
||||||
Size: object.Properties.ContentLength,
|
Size: object.Properties.ContentLength,
|
||||||
ETag: azureToS3ETag(object.Properties.Etag),
|
ETag: toS3ETag(object.Properties.Etag),
|
||||||
ContentType: object.Properties.ContentType,
|
ContentType: object.Properties.ContentType,
|
||||||
ContentEncoding: object.Properties.ContentEncoding,
|
ContentEncoding: object.Properties.ContentEncoding,
|
||||||
})
|
})
|
||||||
@ -510,7 +505,7 @@ func (a *azureObjects) GetObjectInfo(bucket, object string) (objInfo ObjectInfo,
|
|||||||
objInfo = ObjectInfo{
|
objInfo = ObjectInfo{
|
||||||
Bucket: bucket,
|
Bucket: bucket,
|
||||||
UserDefined: meta,
|
UserDefined: meta,
|
||||||
ETag: azureToS3ETag(blob.Properties.Etag),
|
ETag: toS3ETag(blob.Properties.Etag),
|
||||||
ModTime: time.Time(blob.Properties.LastModified),
|
ModTime: time.Time(blob.Properties.LastModified),
|
||||||
Name: object,
|
Name: object,
|
||||||
Size: blob.Properties.ContentLength,
|
Size: blob.Properties.ContentLength,
|
||||||
@ -629,8 +624,7 @@ func (a *azureObjects) PutObjectPart(bucket, object, uploadID string, partID int
|
|||||||
|
|
||||||
etag := data.MD5HexString()
|
etag := data.MD5HexString()
|
||||||
if etag == "" {
|
if etag == "" {
|
||||||
// Generate random ETag.
|
etag = genETag()
|
||||||
etag = azureToS3ETag(getMD5Hash([]byte(mustGetUUID())))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
subPartSize, subPartNumber := int64(azureBlockSize), 1
|
subPartSize, subPartNumber := int64(azureBlockSize), 1
|
||||||
|
@ -25,23 +25,6 @@ import (
|
|||||||
"github.com/Azure/azure-sdk-for-go/storage"
|
"github.com/Azure/azure-sdk-for-go/storage"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Test azureToS3ETag.
|
|
||||||
func TestAzureToS3ETag(t *testing.T) {
|
|
||||||
tests := []struct {
|
|
||||||
etag string
|
|
||||||
expected string
|
|
||||||
}{
|
|
||||||
{`"etag"`, `etag-1`},
|
|
||||||
{"etag", "etag-1"},
|
|
||||||
}
|
|
||||||
for i, test := range tests {
|
|
||||||
got := azureToS3ETag(test.etag)
|
|
||||||
if got != test.expected {
|
|
||||||
t.Errorf("test %d: got:%s expected:%s", i+1, got, test.expected)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Test canonical metadata.
|
// Test canonical metadata.
|
||||||
func TestS3MetaToAzureProperties(t *testing.T) {
|
func TestS3MetaToAzureProperties(t *testing.T) {
|
||||||
headers := map[string]string{
|
headers := map[string]string{
|
||||||
|
@ -264,7 +264,7 @@ func (l *b2Objects) ListObjects(bucket string, prefix string, marker string, del
|
|||||||
Name: file.Name,
|
Name: file.Name,
|
||||||
ModTime: file.Timestamp,
|
ModTime: file.Timestamp,
|
||||||
Size: file.Size,
|
Size: file.Size,
|
||||||
ETag: file.Info.ID,
|
ETag: toS3ETag(file.Info.ID),
|
||||||
ContentType: file.Info.ContentType,
|
ContentType: file.Info.ContentType,
|
||||||
UserDefined: file.Info.Info,
|
UserDefined: file.Info.Info,
|
||||||
})
|
})
|
||||||
@ -299,7 +299,7 @@ func (l *b2Objects) ListObjectsV2(bucket, prefix, continuationToken, delimiter s
|
|||||||
Name: file.Name,
|
Name: file.Name,
|
||||||
ModTime: file.Timestamp,
|
ModTime: file.Timestamp,
|
||||||
Size: file.Size,
|
Size: file.Size,
|
||||||
ETag: file.Info.ID,
|
ETag: toS3ETag(file.Info.ID),
|
||||||
ContentType: file.Info.ContentType,
|
ContentType: file.Info.ContentType,
|
||||||
UserDefined: file.Info.Info,
|
UserDefined: file.Info.Info,
|
||||||
})
|
})
|
||||||
@ -346,7 +346,7 @@ func (l *b2Objects) GetObjectInfo(bucket string, object string) (objInfo ObjectI
|
|||||||
objInfo = ObjectInfo{
|
objInfo = ObjectInfo{
|
||||||
Bucket: bucket,
|
Bucket: bucket,
|
||||||
Name: object,
|
Name: object,
|
||||||
ETag: fi.ID,
|
ETag: toS3ETag(fi.ID),
|
||||||
Size: fi.Size,
|
Size: fi.Size,
|
||||||
ModTime: fi.Timestamp,
|
ModTime: fi.Timestamp,
|
||||||
ContentType: fi.ContentType,
|
ContentType: fi.ContentType,
|
||||||
@ -452,7 +452,7 @@ func (l *b2Objects) PutObject(bucket string, object string, data *h2.Reader, met
|
|||||||
return ObjectInfo{
|
return ObjectInfo{
|
||||||
Bucket: bucket,
|
Bucket: bucket,
|
||||||
Name: object,
|
Name: object,
|
||||||
ETag: fi.ID,
|
ETag: toS3ETag(fi.ID),
|
||||||
Size: fi.Size,
|
Size: fi.Size,
|
||||||
ModTime: fi.Timestamp,
|
ModTime: fi.Timestamp,
|
||||||
ContentType: fi.ContentType,
|
ContentType: fi.ContentType,
|
||||||
@ -566,7 +566,7 @@ func (l *b2Objects) PutObjectPart(bucket string, object string, uploadID string,
|
|||||||
return PartInfo{
|
return PartInfo{
|
||||||
PartNumber: partID,
|
PartNumber: partID,
|
||||||
LastModified: UTCNow(),
|
LastModified: UTCNow(),
|
||||||
ETag: sha1,
|
ETag: toS3ETag(sha1),
|
||||||
Size: data.Size(),
|
Size: data.Size(),
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
@ -597,7 +597,7 @@ func (l *b2Objects) ListObjectParts(bucket string, object string, uploadID strin
|
|||||||
for _, part := range partsList {
|
for _, part := range partsList {
|
||||||
lpi.Parts = append(lpi.Parts, PartInfo{
|
lpi.Parts = append(lpi.Parts, PartInfo{
|
||||||
PartNumber: part.Number,
|
PartNumber: part.Number,
|
||||||
ETag: part.SHA1,
|
ETag: toS3ETag(part.SHA1),
|
||||||
Size: part.Size,
|
Size: part.Size,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@ -627,7 +627,9 @@ func (l *b2Objects) CompleteMultipartUpload(bucket string, object string, upload
|
|||||||
if i+1 != uploadedPart.PartNumber {
|
if i+1 != uploadedPart.PartNumber {
|
||||||
return oi, b2ToObjectError(traceError(InvalidPart{}), bucket, object, uploadID)
|
return oi, b2ToObjectError(traceError(InvalidPart{}), bucket, object, uploadID)
|
||||||
}
|
}
|
||||||
hashes[uploadedPart.PartNumber] = uploadedPart.ETag
|
|
||||||
|
// Trim "-1" suffix in ETag as PutObjectPart() treats B2 returned SHA1 as ETag.
|
||||||
|
hashes[uploadedPart.PartNumber] = strings.TrimSuffix(uploadedPart.ETag, "-1")
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, err = bkt.File(uploadID, object).CompileParts(0, hashes).FinishLargeFile(l.ctx); err != nil {
|
if _, err = bkt.File(uploadID, object).CompileParts(0, hashes).FinishLargeFile(l.ctx); err != nil {
|
||||||
|
@ -581,7 +581,7 @@ func (l *gcsGateway) ListObjects(bucket string, prefix string, marker string, de
|
|||||||
Bucket: attrs.Bucket,
|
Bucket: attrs.Bucket,
|
||||||
ModTime: attrs.Updated,
|
ModTime: attrs.Updated,
|
||||||
Size: attrs.Size,
|
Size: attrs.Size,
|
||||||
ETag: fmt.Sprintf("%d", attrs.CRC32C),
|
ETag: toS3ETag(fmt.Sprintf("%d", attrs.CRC32C)),
|
||||||
UserDefined: attrs.Metadata,
|
UserDefined: attrs.Metadata,
|
||||||
ContentType: attrs.ContentType,
|
ContentType: attrs.ContentType,
|
||||||
ContentEncoding: attrs.ContentEncoding,
|
ContentEncoding: attrs.ContentEncoding,
|
||||||
@ -723,7 +723,7 @@ func fromGCSAttrsToObjectInfo(attrs *storage.ObjectAttrs) ObjectInfo {
|
|||||||
Bucket: attrs.Bucket,
|
Bucket: attrs.Bucket,
|
||||||
ModTime: attrs.Updated,
|
ModTime: attrs.Updated,
|
||||||
Size: attrs.Size,
|
Size: attrs.Size,
|
||||||
ETag: fmt.Sprintf("%d", attrs.CRC32C),
|
ETag: toS3ETag(fmt.Sprintf("%d", attrs.CRC32C)),
|
||||||
UserDefined: attrs.Metadata,
|
UserDefined: attrs.Metadata,
|
||||||
ContentType: attrs.ContentType,
|
ContentType: attrs.ContentType,
|
||||||
ContentEncoding: attrs.ContentEncoding,
|
ContentEncoding: attrs.ContentEncoding,
|
||||||
@ -858,7 +858,7 @@ func (l *gcsGateway) PutObjectPart(bucket string, key string, uploadID string, p
|
|||||||
etag := data.MD5HexString()
|
etag := data.MD5HexString()
|
||||||
if etag == "" {
|
if etag == "" {
|
||||||
// Generate random ETag.
|
// Generate random ETag.
|
||||||
etag = getMD5Hash([]byte(mustGetUUID()))
|
etag = genETag()
|
||||||
}
|
}
|
||||||
object := l.client.Bucket(bucket).Object(gcsMultipartDataName(uploadID, partNumber, etag))
|
object := l.client.Bucket(bucket).Object(gcsMultipartDataName(uploadID, partNumber, etag))
|
||||||
w := object.NewWriter(l.ctx)
|
w := object.NewWriter(l.ctx)
|
||||||
|
18
cmd/utils.go
18
cmd/utils.go
@ -211,3 +211,21 @@ func checkURL(urlStr string) (*url.URL, error) {
|
|||||||
func UTCNow() time.Time {
|
func UTCNow() time.Time {
|
||||||
return time.Now().UTC()
|
return time.Now().UTC()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// genETag - generate UUID based ETag
|
||||||
|
func genETag() string {
|
||||||
|
return toS3ETag(getMD5Hash([]byte(mustGetUUID())))
|
||||||
|
}
|
||||||
|
|
||||||
|
// toS3ETag - return checksum to ETag
|
||||||
|
func toS3ETag(etag string) string {
|
||||||
|
etag = canonicalizeETag(etag)
|
||||||
|
|
||||||
|
if !strings.HasSuffix(etag, "-1") {
|
||||||
|
// Tools like s3cmd uses ETag as checksum of data to validate.
|
||||||
|
// Append "-1" to indicate ETag is not a checksum.
|
||||||
|
etag += "-1"
|
||||||
|
}
|
||||||
|
|
||||||
|
return etag
|
||||||
|
}
|
||||||
|
@ -293,3 +293,22 @@ func TestDumpRequest(t *testing.T) {
|
|||||||
t.Fatalf("Expected %#v, got %#v", expectedHeader, res.Header)
|
t.Fatalf("Expected %#v, got %#v", expectedHeader, res.Header)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Test toS3ETag()
|
||||||
|
func TestToS3ETag(t *testing.T) {
|
||||||
|
testCases := []struct {
|
||||||
|
etag string
|
||||||
|
expectedETag string
|
||||||
|
}{
|
||||||
|
{`"8019e762"`, `8019e762-1`},
|
||||||
|
{"5d57546eeb86b3eba68967292fba0644", "5d57546eeb86b3eba68967292fba0644-1"},
|
||||||
|
{`"8019e762-1"`, `8019e762-1`},
|
||||||
|
{"5d57546eeb86b3eba68967292fba0644-1", "5d57546eeb86b3eba68967292fba0644-1"},
|
||||||
|
}
|
||||||
|
for i, testCase := range testCases {
|
||||||
|
etag := toS3ETag(testCase.etag)
|
||||||
|
if etag != testCase.expectedETag {
|
||||||
|
t.Fatalf("test %v: expected: %v, got: %v", i+1, testCase.expectedETag, etag)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Loading…
x
Reference in New Issue
Block a user