mirror of
				https://github.com/minio/minio.git
				synced 2025-10-30 00:05:02 -04:00 
			
		
		
		
	fix: convert storage class into azure tiers (#9381)
This commit is contained in:
		
							parent
							
								
									69ee28a082
								
							
						
					
					
						commit
						3ff5bf2369
					
				| @ -264,7 +264,6 @@ func s3MetaToAzureProperties(ctx context.Context, s3Metadata map[string]string) | |||||||
| 			// handle it for storage. | 			// handle it for storage. | ||||||
| 			k = strings.Replace(k, "X-Amz-Meta-", "", 1) | 			k = strings.Replace(k, "X-Amz-Meta-", "", 1) | ||||||
| 			blobMeta[encodeKey(k)] = v | 			blobMeta[encodeKey(k)] = v | ||||||
| 
 |  | ||||||
| 		// All cases below, extract common metadata that is | 		// All cases below, extract common metadata that is | ||||||
| 		// accepted by S3 into BlobProperties for setting on | 		// accepted by S3 into BlobProperties for setting on | ||||||
| 		// Azure - see | 		// Azure - see | ||||||
| @ -306,6 +305,28 @@ func newPartMetaV1(uploadID string, partID int) (partMeta *partMetadataV1) { | |||||||
| 	return p | 	return p | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | func s3StorageClassToAzureTier(sc string) azblob.AccessTierType { | ||||||
|  | 	switch sc { | ||||||
|  | 	case "REDUCED_REDUNDANCY": | ||||||
|  | 		return azblob.AccessTierCool | ||||||
|  | 	case "STANDARD": | ||||||
|  | 		return azblob.AccessTierHot | ||||||
|  | 	} | ||||||
|  | 	return azblob.AccessTierHot | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func azureTierToS3StorageClass(tierType string) string { | ||||||
|  | 	switch azblob.AccessTierType(tierType) { | ||||||
|  | 	case azblob.AccessTierCool: | ||||||
|  | 		return "REDUCED_REDUNDANCY" | ||||||
|  | 	case azblob.AccessTierHot: | ||||||
|  | 		return "STANDARD" | ||||||
|  | 	default: | ||||||
|  | 		return "STANDARD" | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | } | ||||||
|  | 
 | ||||||
| // azurePropertiesToS3Meta converts Azure metadata/properties to S3 | // azurePropertiesToS3Meta converts Azure metadata/properties to S3 | ||||||
| // metadata. It is the reverse of s3MetaToAzureProperties. Azure's | // metadata. It is the reverse of s3MetaToAzureProperties. Azure's | ||||||
| // `.GetMetadata()` lower-cases all header keys, so this is taken into | // `.GetMetadata()` lower-cases all header keys, so this is taken into | ||||||
| @ -408,6 +429,8 @@ func azureCodesToObjectError(err error, serviceCode string, statusCode int, buck | |||||||
| 		err = minio.PartTooBig{} | 		err = minio.PartTooBig{} | ||||||
| 	case "InvalidMetadata": | 	case "InvalidMetadata": | ||||||
| 		err = minio.UnsupportedMetadata{} | 		err = minio.UnsupportedMetadata{} | ||||||
|  | 	case "BlobAccessTierNotSupportedForAccountType": | ||||||
|  | 		err = minio.NotImplemented{} | ||||||
| 	default: | 	default: | ||||||
| 		switch statusCode { | 		switch statusCode { | ||||||
| 		case http.StatusNotFound: | 		case http.StatusNotFound: | ||||||
| @ -639,6 +662,7 @@ func (a *azureObjects) ListObjects(ctx context.Context, bucket, prefix, marker, | |||||||
| 				ETag:            etag, | 				ETag:            etag, | ||||||
| 				ContentType:     *blob.Properties.ContentType, | 				ContentType:     *blob.Properties.ContentType, | ||||||
| 				ContentEncoding: *blob.Properties.ContentEncoding, | 				ContentEncoding: *blob.Properties.ContentEncoding, | ||||||
|  | 				UserDefined:     blob.Metadata, | ||||||
| 			}) | 			}) | ||||||
| 		} | 		} | ||||||
| 
 | 
 | ||||||
| @ -786,6 +810,7 @@ func (a *azureObjects) GetObjectInfo(ctx context.Context, bucket, object string, | |||||||
| 		Size:            blob.ContentLength(), | 		Size:            blob.ContentLength(), | ||||||
| 		ContentType:     blob.ContentType(), | 		ContentType:     blob.ContentType(), | ||||||
| 		ContentEncoding: blob.ContentEncoding(), | 		ContentEncoding: blob.ContentEncoding(), | ||||||
|  | 		StorageClass:    azureTierToS3StorageClass(blob.AccessTier()), | ||||||
| 	}, nil | 	}, nil | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| @ -855,7 +880,7 @@ func (a *azureObjects) CopyObject(ctx context.Context, srcBucket, srcObject, des | |||||||
| 	// To handle the case where the source object should be copied without its metadata, | 	// To handle the case where the source object should be copied without its metadata, | ||||||
| 	// the metadata must be removed from the dest. object after the copy completes | 	// the metadata must be removed from the dest. object after the copy completes | ||||||
| 	if len(azureMeta) == 0 { | 	if len(azureMeta) == 0 { | ||||||
| 		_, err := destBlob.SetMetadata(ctx, azureMeta, azblob.BlobAccessConditions{}) | 		_, err = destBlob.SetMetadata(ctx, azureMeta, azblob.BlobAccessConditions{}) | ||||||
| 		if err != nil { | 		if err != nil { | ||||||
| 			return objInfo, azureToObjectError(err, srcBucket, srcObject) | 			return objInfo, azureToObjectError(err, srcBucket, srcObject) | ||||||
| 		} | 		} | ||||||
| @ -865,6 +890,15 @@ func (a *azureObjects) CopyObject(ctx context.Context, srcBucket, srcObject, des | |||||||
| 	if err != nil { | 	if err != nil { | ||||||
| 		return objInfo, azureToObjectError(err, srcBucket, srcObject) | 		return objInfo, azureToObjectError(err, srcBucket, srcObject) | ||||||
| 	} | 	} | ||||||
|  | 
 | ||||||
|  | 	if _, ok := srcInfo.UserDefined["x-amz-storage-class"]; ok { | ||||||
|  | 		_, err = destBlob.SetTier(ctx, s3StorageClassToAzureTier(srcInfo.UserDefined["x-amz-storage-class"]), | ||||||
|  | 			azblob.LeaseAccessConditions{}) | ||||||
|  | 		if err != nil { | ||||||
|  | 			return objInfo, azureToObjectError(err, srcBucket, srcObject) | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
| 	return a.GetObjectInfo(ctx, destBucket, destObject, dstOpts) | 	return a.GetObjectInfo(ctx, destBucket, destObject, dstOpts) | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | |||||||
| @ -650,11 +650,10 @@ function test_copy_object() { | |||||||
|         fi |         fi | ||||||
|     fi |     fi | ||||||
| 
 | 
 | ||||||
|  |     ${AWS} s3 rb s3://"${bucket_name}" --force > /dev/null 2>&1 | ||||||
|     if [ $rv -eq 0 ]; then |     if [ $rv -eq 0 ]; then | ||||||
|         log_success "$(get_duration "$start_time")" "${test_function}" |         log_success "$(get_duration "$start_time")" "${test_function}" | ||||||
|     else |     else | ||||||
|         # clean up and log error |  | ||||||
|         ${AWS} s3 rb s3://"${bucket_name}" --force > /dev/null 2>&1 |  | ||||||
|         log_failure "$(get_duration "$start_time")" "${function}" "${out}" |         log_failure "$(get_duration "$start_time")" "${function}" "${out}" | ||||||
|     fi |     fi | ||||||
| 
 | 
 | ||||||
| @ -686,8 +685,15 @@ function test_copy_object_storage_class() { | |||||||
|     if [ $rv -eq 0 ]; then |     if [ $rv -eq 0 ]; then | ||||||
|         function="${AWS} s3api copy-object --bucket ${bucket_name} --storage-class REDUCED_REDUNDANCY --key datafile-1-kB-copy --copy-source ${bucket_name}/datafile-1-kB" |         function="${AWS} s3api copy-object --bucket ${bucket_name} --storage-class REDUCED_REDUNDANCY --key datafile-1-kB-copy --copy-source ${bucket_name}/datafile-1-kB" | ||||||
|         test_function=${function} |         test_function=${function} | ||||||
|         out=$($function) |         out=$($function 2>&1) | ||||||
|         rv=$? |         rv=$? | ||||||
|  |         # if this functionality is not implemented return right away. | ||||||
|  |         if [ $rv -eq 255 ]; then | ||||||
|  |             if echo "$out" | greq -q "NotImplemented"; then | ||||||
|  |                 ${AWS} s3 rb s3://"${bucket_name}" --force > /dev/null 2>&1 | ||||||
|  |                 return 0 | ||||||
|  |             fi | ||||||
|  |         fi | ||||||
|         hash2=$(echo "$out" | jq -r .CopyObjectResult.ETag | sed -e 's/^"//' -e 's/"$//') |         hash2=$(echo "$out" | jq -r .CopyObjectResult.ETag | sed -e 's/^"//' -e 's/"$//') | ||||||
|         if [ $rv -eq 0 ] && [ "$HASH_1_KB" != "$hash2" ]; then |         if [ $rv -eq 0 ] && [ "$HASH_1_KB" != "$hash2" ]; then | ||||||
|             # Verification failed |             # Verification failed | ||||||
| @ -715,11 +721,10 @@ function test_copy_object_storage_class() { | |||||||
|         fi |         fi | ||||||
|     fi |     fi | ||||||
| 
 | 
 | ||||||
|  |     ${AWS} s3 rb s3://"${bucket_name}" --force > /dev/null 2>&1 | ||||||
|     if [ $rv -eq 0 ]; then |     if [ $rv -eq 0 ]; then | ||||||
|         log_success "$(get_duration "$start_time")" "${test_function}" |         log_success "$(get_duration "$start_time")" "${test_function}" | ||||||
|     else |     else | ||||||
|         # clean up and log error |  | ||||||
|         ${AWS} s3 rb s3://"${bucket_name}" --force > /dev/null 2>&1 |  | ||||||
|         log_failure "$(get_duration "$start_time")" "${function}" "${out}" |         log_failure "$(get_duration "$start_time")" "${function}" "${out}" | ||||||
|     fi |     fi | ||||||
| 
 | 
 | ||||||
| @ -750,8 +755,15 @@ function test_copy_object_storage_class_same() { | |||||||
|     if [ $rv -eq 0 ]; then |     if [ $rv -eq 0 ]; then | ||||||
|         function="${AWS} s3api copy-object --bucket ${bucket_name} --storage-class REDUCED_REDUNDANCY --key datafile-1-kB --copy-source ${bucket_name}/datafile-1-kB" |         function="${AWS} s3api copy-object --bucket ${bucket_name} --storage-class REDUCED_REDUNDANCY --key datafile-1-kB --copy-source ${bucket_name}/datafile-1-kB" | ||||||
|         test_function=${function} |         test_function=${function} | ||||||
|         out=$($function) |         out=$($function 2>&1) | ||||||
|         rv=$? |         rv=$? | ||||||
|  |         # if this functionality is not implemented return right away. | ||||||
|  |         if [ $rv -eq 255 ]; then | ||||||
|  |             if echo "$out" | greq -q "NotImplemented"; then | ||||||
|  |                 ${AWS} s3 rb s3://"${bucket_name}" --force > /dev/null 2>&1 | ||||||
|  |                 return 0 | ||||||
|  |             fi | ||||||
|  |         fi | ||||||
|         hash2=$(echo "$out" | jq -r .CopyObjectResult.ETag | sed -e 's/^"//' -e 's/"$//') |         hash2=$(echo "$out" | jq -r .CopyObjectResult.ETag | sed -e 's/^"//' -e 's/"$//') | ||||||
|         if [ $rv -eq 0 ] && [ "$HASH_1_KB" != "$hash2" ]; then |         if [ $rv -eq 0 ] && [ "$HASH_1_KB" != "$hash2" ]; then | ||||||
|             # Verification failed |             # Verification failed | ||||||
| @ -779,11 +791,10 @@ function test_copy_object_storage_class_same() { | |||||||
|         fi |         fi | ||||||
|     fi |     fi | ||||||
| 
 | 
 | ||||||
|  |     ${AWS} s3 rb s3://"${bucket_name}" --force > /dev/null 2>&1 | ||||||
|     if [ $rv -eq 0 ]; then |     if [ $rv -eq 0 ]; then | ||||||
|         log_success "$(get_duration "$start_time")" "${test_function}" |         log_success "$(get_duration "$start_time")" "${test_function}" | ||||||
|     else |     else | ||||||
|         # clean up and log error |  | ||||||
|         ${AWS} s3 rb s3://"${bucket_name}" --force > /dev/null 2>&1 |  | ||||||
|         log_failure "$(get_duration "$start_time")" "${function}" "${out}" |         log_failure "$(get_duration "$start_time")" "${function}" "${out}" | ||||||
|     fi |     fi | ||||||
| 
 | 
 | ||||||
|  | |||||||
		Loading…
	
	
			
			x
			
			
		
	
		Reference in New Issue
	
	Block a user