fix: convert storage class into azure tiers (#9381)

This commit is contained in:
Harshavardhana 2020-04-19 13:42:56 -07:00 committed by GitHub
parent 69ee28a082
commit 3ff5bf2369
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 55 additions and 10 deletions

View File

@ -264,7 +264,6 @@ func s3MetaToAzureProperties(ctx context.Context, s3Metadata map[string]string)
// handle it for storage.
k = strings.Replace(k, "X-Amz-Meta-", "", 1)
blobMeta[encodeKey(k)] = v
// All cases below, extract common metadata that is
// accepted by S3 into BlobProperties for setting on
// Azure - see
@ -306,6 +305,28 @@ func newPartMetaV1(uploadID string, partID int) (partMeta *partMetadataV1) {
return p
}
func s3StorageClassToAzureTier(sc string) azblob.AccessTierType {
switch sc {
case "REDUCED_REDUNDANCY":
return azblob.AccessTierCool
case "STANDARD":
return azblob.AccessTierHot
}
return azblob.AccessTierHot
}
func azureTierToS3StorageClass(tierType string) string {
switch azblob.AccessTierType(tierType) {
case azblob.AccessTierCool:
return "REDUCED_REDUNDANCY"
case azblob.AccessTierHot:
return "STANDARD"
default:
return "STANDARD"
}
}
// azurePropertiesToS3Meta converts Azure metadata/properties to S3
// metadata. It is the reverse of s3MetaToAzureProperties. Azure's
// `.GetMetadata()` lower-cases all header keys, so this is taken into
@ -408,6 +429,8 @@ func azureCodesToObjectError(err error, serviceCode string, statusCode int, buck
err = minio.PartTooBig{}
case "InvalidMetadata":
err = minio.UnsupportedMetadata{}
case "BlobAccessTierNotSupportedForAccountType":
err = minio.NotImplemented{}
default:
switch statusCode {
case http.StatusNotFound:
@ -639,6 +662,7 @@ func (a *azureObjects) ListObjects(ctx context.Context, bucket, prefix, marker,
ETag: etag,
ContentType: *blob.Properties.ContentType,
ContentEncoding: *blob.Properties.ContentEncoding,
UserDefined: blob.Metadata,
})
}
@ -786,6 +810,7 @@ func (a *azureObjects) GetObjectInfo(ctx context.Context, bucket, object string,
Size: blob.ContentLength(),
ContentType: blob.ContentType(),
ContentEncoding: blob.ContentEncoding(),
StorageClass: azureTierToS3StorageClass(blob.AccessTier()),
}, nil
}
@ -855,7 +880,7 @@ func (a *azureObjects) CopyObject(ctx context.Context, srcBucket, srcObject, des
// To handle the case where the source object should be copied without its metadata,
// the metadata must be removed from the dest. object after the copy completes
if len(azureMeta) == 0 {
_, err := destBlob.SetMetadata(ctx, azureMeta, azblob.BlobAccessConditions{})
_, err = destBlob.SetMetadata(ctx, azureMeta, azblob.BlobAccessConditions{})
if err != nil {
return objInfo, azureToObjectError(err, srcBucket, srcObject)
}
@ -865,6 +890,15 @@ func (a *azureObjects) CopyObject(ctx context.Context, srcBucket, srcObject, des
if err != nil {
return objInfo, azureToObjectError(err, srcBucket, srcObject)
}
if _, ok := srcInfo.UserDefined["x-amz-storage-class"]; ok {
_, err = destBlob.SetTier(ctx, s3StorageClassToAzureTier(srcInfo.UserDefined["x-amz-storage-class"]),
azblob.LeaseAccessConditions{})
if err != nil {
return objInfo, azureToObjectError(err, srcBucket, srcObject)
}
}
return a.GetObjectInfo(ctx, destBucket, destObject, dstOpts)
}

View File

@ -650,11 +650,10 @@ function test_copy_object() {
fi
fi
${AWS} s3 rb s3://"${bucket_name}" --force > /dev/null 2>&1
if [ $rv -eq 0 ]; then
log_success "$(get_duration "$start_time")" "${test_function}"
else
# clean up and log error
${AWS} s3 rb s3://"${bucket_name}" --force > /dev/null 2>&1
log_failure "$(get_duration "$start_time")" "${function}" "${out}"
fi
@ -686,8 +685,15 @@ function test_copy_object_storage_class() {
if [ $rv -eq 0 ]; then
function="${AWS} s3api copy-object --bucket ${bucket_name} --storage-class REDUCED_REDUNDANCY --key datafile-1-kB-copy --copy-source ${bucket_name}/datafile-1-kB"
test_function=${function}
out=$($function)
out=$($function 2>&1)
rv=$?
# if this functionality is not implemented return right away.
if [ $rv -eq 255 ]; then
if echo "$out" | greq -q "NotImplemented"; then
${AWS} s3 rb s3://"${bucket_name}" --force > /dev/null 2>&1
return 0
fi
fi
hash2=$(echo "$out" | jq -r .CopyObjectResult.ETag | sed -e 's/^"//' -e 's/"$//')
if [ $rv -eq 0 ] && [ "$HASH_1_KB" != "$hash2" ]; then
# Verification failed
@ -715,11 +721,10 @@ function test_copy_object_storage_class() {
fi
fi
${AWS} s3 rb s3://"${bucket_name}" --force > /dev/null 2>&1
if [ $rv -eq 0 ]; then
log_success "$(get_duration "$start_time")" "${test_function}"
else
# clean up and log error
${AWS} s3 rb s3://"${bucket_name}" --force > /dev/null 2>&1
log_failure "$(get_duration "$start_time")" "${function}" "${out}"
fi
@ -750,8 +755,15 @@ function test_copy_object_storage_class_same() {
if [ $rv -eq 0 ]; then
function="${AWS} s3api copy-object --bucket ${bucket_name} --storage-class REDUCED_REDUNDANCY --key datafile-1-kB --copy-source ${bucket_name}/datafile-1-kB"
test_function=${function}
out=$($function)
out=$($function 2>&1)
rv=$?
# if this functionality is not implemented return right away.
if [ $rv -eq 255 ]; then
if echo "$out" | greq -q "NotImplemented"; then
${AWS} s3 rb s3://"${bucket_name}" --force > /dev/null 2>&1
return 0
fi
fi
hash2=$(echo "$out" | jq -r .CopyObjectResult.ETag | sed -e 's/^"//' -e 's/"$//')
if [ $rv -eq 0 ] && [ "$HASH_1_KB" != "$hash2" ]; then
# Verification failed
@ -779,11 +791,10 @@ function test_copy_object_storage_class_same() {
fi
fi
${AWS} s3 rb s3://"${bucket_name}" --force > /dev/null 2>&1
if [ $rv -eq 0 ]; then
log_success "$(get_duration "$start_time")" "${test_function}"
else
# clean up and log error
${AWS} s3 rb s3://"${bucket_name}" --force > /dev/null 2>&1
log_failure "$(get_duration "$start_time")" "${function}" "${out}"
fi