Convert gateways into respective packages (#5200)

- Make azure gateway a package
- Make b2 gateway a package
- Make gcs gateway a package
- Make s3 gateway a package
- Make sia gateway a package
This commit is contained in:
Harshavardhana
2017-12-05 17:58:09 -08:00
committed by Dee Koder
parent 52e382b697
commit eb2894233c
31 changed files with 1586 additions and 1505 deletions

View File

@@ -0,0 +1,322 @@
/*
* Minio Cloud Storage, (C) 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package azure
import (
"encoding/xml"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"strconv"
"strings"
"time"
"github.com/Azure/azure-sdk-for-go/storage"
"github.com/minio/minio/pkg/errors"
minio "github.com/minio/minio/cmd"
)
// Copied from github.com/Azure/azure-sdk-for-go/storage/container.go
func azureListBlobsGetParameters(p storage.ListBlobsParameters) url.Values {
out := url.Values{}
if p.Prefix != "" {
out.Set("prefix", p.Prefix)
}
if p.Delimiter != "" {
out.Set("delimiter", p.Delimiter)
}
if p.Marker != "" {
out.Set("marker", p.Marker)
}
if p.Include != nil {
addString := func(datasets []string, include bool, text string) []string {
if include {
datasets = append(datasets, text)
}
return datasets
}
include := []string{}
include = addString(include, p.Include.Snapshots, "snapshots")
include = addString(include, p.Include.Metadata, "metadata")
include = addString(include, p.Include.UncommittedBlobs, "uncommittedblobs")
include = addString(include, p.Include.Copy, "copy")
fullInclude := strings.Join(include, ",")
out.Set("include", fullInclude)
}
if p.MaxResults != 0 {
out.Set("maxresults", fmt.Sprintf("%v", p.MaxResults))
}
if p.Timeout != 0 {
out.Set("timeout", fmt.Sprintf("%v", p.Timeout))
}
return out
}
// Make anonymous HTTP request to azure endpoint.
func azureAnonRequest(verb, urlStr string, header http.Header) (*http.Response, error) {
req, err := http.NewRequest(verb, urlStr, nil)
if err != nil {
return nil, err
}
if header != nil {
req.Header = header
}
resp, err := http.DefaultClient.Do(req)
if err != nil {
return nil, err
}
// 4XX and 5XX are error HTTP codes.
if resp.StatusCode >= 400 && resp.StatusCode <= 511 {
defer resp.Body.Close()
respBody, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, err
}
if len(respBody) == 0 {
// no error in response body, might happen in HEAD requests
return nil, storage.AzureStorageServiceError{
StatusCode: resp.StatusCode,
Code: resp.Status,
Message: "no response body was available for error status code",
}
}
// Response contains Azure storage service error object.
var storageErr storage.AzureStorageServiceError
if err := xml.Unmarshal(respBody, &storageErr); err != nil {
return nil, err
}
storageErr.StatusCode = resp.StatusCode
return nil, storageErr
}
return resp, nil
}
// AnonGetBucketInfo - Get bucket metadata from azure anonymously.
func (a *azureObjects) AnonGetBucketInfo(bucket string) (bucketInfo minio.BucketInfo, err error) {
blobURL := a.client.GetContainerReference(bucket).GetBlobReference("").GetURL()
url, err := url.Parse(blobURL)
if err != nil {
return bucketInfo, azureToObjectError(errors.Trace(err))
}
url.RawQuery = "restype=container"
resp, err := azureAnonRequest(http.MethodHead, url.String(), nil)
if err != nil {
return bucketInfo, azureToObjectError(errors.Trace(err), bucket)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return bucketInfo, azureToObjectError(errors.Trace(minio.AnonErrToObjectErr(resp.StatusCode, bucket)), bucket)
}
t, err := time.Parse(time.RFC1123, resp.Header.Get("Last-Modified"))
if err != nil {
return bucketInfo, errors.Trace(err)
}
return minio.BucketInfo{
Name: bucket,
Created: t,
}, nil
}
// AnonGetObject - SendGET request without authentication.
// This is needed when clients send GET requests on objects that can be downloaded without auth.
func (a *azureObjects) AnonGetObject(bucket, object string, startOffset int64, length int64, writer io.Writer) (err error) {
h := make(http.Header)
if length > 0 && startOffset > 0 {
h.Add("Range", fmt.Sprintf("bytes=%d-%d", startOffset, startOffset+length-1))
} else if startOffset > 0 {
h.Add("Range", fmt.Sprintf("bytes=%d-", startOffset))
}
blobURL := a.client.GetContainerReference(bucket).GetBlobReference(object).GetURL()
resp, err := azureAnonRequest(http.MethodGet, blobURL, h)
if err != nil {
return azureToObjectError(errors.Trace(err), bucket, object)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusPartialContent && resp.StatusCode != http.StatusOK {
return azureToObjectError(errors.Trace(minio.AnonErrToObjectErr(resp.StatusCode, bucket, object)), bucket, object)
}
_, err = io.Copy(writer, resp.Body)
return errors.Trace(err)
}
// AnonGetObjectInfo - Send HEAD request without authentication and convert the
// result to ObjectInfo.
func (a *azureObjects) AnonGetObjectInfo(bucket, object string) (objInfo minio.ObjectInfo, err error) {
blobURL := a.client.GetContainerReference(bucket).GetBlobReference(object).GetURL()
resp, err := azureAnonRequest(http.MethodHead, blobURL, nil)
if err != nil {
return objInfo, azureToObjectError(errors.Trace(err), bucket, object)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return objInfo, azureToObjectError(errors.Trace(minio.AnonErrToObjectErr(resp.StatusCode, bucket, object)), bucket, object)
}
var contentLength int64
contentLengthStr := resp.Header.Get("Content-Length")
if contentLengthStr != "" {
contentLength, err = strconv.ParseInt(contentLengthStr, 0, 64)
if err != nil {
return objInfo, azureToObjectError(errors.Trace(fmt.Errorf("Unexpected error")), bucket, object)
}
}
t, err := time.Parse(time.RFC1123, resp.Header.Get("Last-Modified"))
if err != nil {
return objInfo, errors.Trace(err)
}
objInfo.ModTime = t
objInfo.Bucket = bucket
objInfo.UserDefined = make(map[string]string)
if resp.Header.Get("Content-Encoding") != "" {
objInfo.UserDefined["Content-Encoding"] = resp.Header.Get("Content-Encoding")
}
objInfo.UserDefined["Content-Type"] = resp.Header.Get("Content-Type")
objInfo.ETag = resp.Header.Get("Etag")
objInfo.ModTime = t
objInfo.Name = object
objInfo.Size = contentLength
return
}
// AnonListObjects - Use Azure equivalent ListBlobs.
func (a *azureObjects) AnonListObjects(bucket, prefix, marker, delimiter string, maxKeys int) (result minio.ListObjectsInfo, err error) {
params := storage.ListBlobsParameters{
Prefix: prefix,
Marker: marker,
Delimiter: delimiter,
MaxResults: uint(maxKeys),
}
q := azureListBlobsGetParameters(params)
q.Set("restype", "container")
q.Set("comp", "list")
blobURL := a.client.GetContainerReference(bucket).GetBlobReference("").GetURL()
url, err := url.Parse(blobURL)
if err != nil {
return result, azureToObjectError(errors.Trace(err))
}
url.RawQuery = q.Encode()
resp, err := azureAnonRequest(http.MethodGet, url.String(), nil)
if err != nil {
return result, azureToObjectError(errors.Trace(err))
}
defer resp.Body.Close()
var listResp storage.BlobListResponse
data, err := ioutil.ReadAll(resp.Body)
if err != nil {
return result, azureToObjectError(errors.Trace(err))
}
err = xml.Unmarshal(data, &listResp)
if err != nil {
return result, azureToObjectError(errors.Trace(err))
}
result.IsTruncated = listResp.NextMarker != ""
result.NextMarker = listResp.NextMarker
for _, object := range listResp.Blobs {
result.Objects = append(result.Objects, minio.ObjectInfo{
Bucket: bucket,
Name: object.Name,
ModTime: time.Time(object.Properties.LastModified),
Size: object.Properties.ContentLength,
ETag: object.Properties.Etag,
ContentType: object.Properties.ContentType,
ContentEncoding: object.Properties.ContentEncoding,
})
}
result.Prefixes = listResp.BlobPrefixes
return result, nil
}
// AnonListObjectsV2 - List objects in V2 mode, anonymously
func (a *azureObjects) AnonListObjectsV2(bucket, prefix, continuationToken, delimiter string, maxKeys int, fetchOwner bool, startAfter string) (result minio.ListObjectsV2Info, err error) {
params := storage.ListBlobsParameters{
Prefix: prefix,
Marker: continuationToken,
Delimiter: delimiter,
MaxResults: uint(maxKeys),
}
q := azureListBlobsGetParameters(params)
q.Set("restype", "container")
q.Set("comp", "list")
blobURL := a.client.GetContainerReference(bucket).GetBlobReference("").GetURL()
url, err := url.Parse(blobURL)
if err != nil {
return result, azureToObjectError(errors.Trace(err))
}
url.RawQuery = q.Encode()
resp, err := http.Get(url.String())
if err != nil {
return result, azureToObjectError(errors.Trace(err))
}
defer resp.Body.Close()
var listResp storage.BlobListResponse
data, err := ioutil.ReadAll(resp.Body)
if err != nil {
return result, azureToObjectError(errors.Trace(err))
}
err = xml.Unmarshal(data, &listResp)
if err != nil {
return result, azureToObjectError(errors.Trace(err))
}
// If NextMarker is not empty, this means response is truncated and NextContinuationToken should be set
if listResp.NextMarker != "" {
result.IsTruncated = true
result.NextContinuationToken = listResp.NextMarker
}
for _, object := range listResp.Blobs {
result.Objects = append(result.Objects, minio.ObjectInfo{
Bucket: bucket,
Name: object.Name,
ModTime: time.Time(object.Properties.LastModified),
Size: object.Properties.ContentLength,
ETag: minio.CanonicalizeETag(object.Properties.Etag),
ContentType: object.Properties.ContentType,
ContentEncoding: object.Properties.ContentEncoding,
})
}
result.Prefixes = listResp.BlobPrefixes
return result, nil
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,376 @@
/*
* Minio Cloud Storage, (C) 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package azure
import (
"fmt"
"net/http"
"net/url"
"reflect"
"testing"
"github.com/Azure/azure-sdk-for-go/storage"
minio "github.com/minio/minio/cmd"
"github.com/minio/minio/pkg/errors"
)
// Test canonical metadata.
func TestS3MetaToAzureProperties(t *testing.T) {
headers := map[string]string{
"accept-encoding": "gzip",
"content-encoding": "gzip",
"cache-control": "age: 3600",
"content-disposition": "dummy",
"content-length": "10",
"content-type": "application/javascript",
"X-Amz-Meta-Hdr": "value",
"X-Amz-Meta-X_test_key": "value",
"X-Amz-Meta-X__test__key": "value",
"X-Amz-Meta-X-Test__key": "value",
"X-Amz-Meta-X-Amz-Key": "hu3ZSqtqwn+aL4V2VhAeov4i+bG3KyCtRMSXQFRHXOk=",
"X-Amz-Meta-X-Amz-Matdesc": "{}",
"X-Amz-Meta-X-Amz-Iv": "eWmyryl8kq+EVnnsE7jpOg==",
}
// Only X-Amz-Meta- prefixed entries will be returned in
// Metadata (without the prefix!)
expectedHeaders := map[string]string{
"Hdr": "value",
"X__test__key": "value",
"X____test____key": "value",
"X_Test____key": "value",
"X_Amz_Key": "hu3ZSqtqwn+aL4V2VhAeov4i+bG3KyCtRMSXQFRHXOk=",
"X_Amz_Matdesc": "{}",
"X_Amz_Iv": "eWmyryl8kq+EVnnsE7jpOg==",
}
meta, _, err := s3MetaToAzureProperties(headers)
if err != nil {
t.Fatalf("Test failed, with %s", err)
}
if !reflect.DeepEqual(map[string]string(meta), expectedHeaders) {
t.Fatalf("Test failed, expected %#v, got %#v", expectedHeaders, meta)
}
headers = map[string]string{
"invalid--meta": "value",
}
_, _, err = s3MetaToAzureProperties(headers)
if err = errors.Cause(err); err != nil {
if _, ok := err.(minio.UnsupportedMetadata); !ok {
t.Fatalf("Test failed with unexpected error %s, expected UnsupportedMetadata", err)
}
}
headers = map[string]string{
"content-md5": "Dce7bmCX61zvxzP5QmfelQ==",
}
_, props, err := s3MetaToAzureProperties(headers)
if err != nil {
t.Fatalf("Test failed, with %s", err)
}
if props.ContentMD5 != headers["content-md5"] {
t.Fatalf("Test failed, expected %s, got %s", headers["content-md5"], props.ContentMD5)
}
}
func TestAzurePropertiesToS3Meta(t *testing.T) {
// Just one testcase. Adding more test cases does not add value to the testcase
// as azureToS3Metadata() just adds a prefix.
metadata := map[string]string{
"first_name": "myname",
"x_test_key": "value",
"x_test__key": "value",
"x__test__key": "value",
"x____test____key": "value",
"x_amz_key": "hu3ZSqtqwn+aL4V2VhAeov4i+bG3KyCtRMSXQFRHXOk=",
"x_amz_matdesc": "{}",
"x_amz_iv": "eWmyryl8kq+EVnnsE7jpOg==",
}
expectedMeta := map[string]string{
"X-Amz-Meta-First-Name": "myname",
"X-Amz-Meta-X-Test-Key": "value",
"X-Amz-Meta-X-Test_key": "value",
"X-Amz-Meta-X_test_key": "value",
"X-Amz-Meta-X__test__key": "value",
"X-Amz-Meta-X-Amz-Key": "hu3ZSqtqwn+aL4V2VhAeov4i+bG3KyCtRMSXQFRHXOk=",
"X-Amz-Meta-X-Amz-Matdesc": "{}",
"X-Amz-Meta-X-Amz-Iv": "eWmyryl8kq+EVnnsE7jpOg==",
"Cache-Control": "max-age: 3600",
"Content-Disposition": "dummy",
"Content-Encoding": "gzip",
"Content-Length": "10",
"Content-MD5": "base64-md5",
"Content-Type": "application/javascript",
}
actualMeta := azurePropertiesToS3Meta(metadata, storage.BlobProperties{
CacheControl: "max-age: 3600",
ContentDisposition: "dummy",
ContentEncoding: "gzip",
ContentLength: 10,
ContentMD5: "base64-md5",
ContentType: "application/javascript",
})
if !reflect.DeepEqual(actualMeta, expectedMeta) {
t.Fatalf("Test failed, expected %#v, got %#v", expectedMeta, actualMeta)
}
}
// Add tests for azure to object error.
func TestAzureToObjectError(t *testing.T) {
testCases := []struct {
actualErr error
expectedErr error
bucket, object string
}{
{
nil, nil, "", "",
},
{
errors.Trace(fmt.Errorf("Non azure error")),
fmt.Errorf("Non azure error"), "", "",
},
{
storage.AzureStorageServiceError{
Code: "ContainerAlreadyExists",
}, storage.AzureStorageServiceError{
Code: "ContainerAlreadyExists",
}, "bucket", "",
},
{
errors.Trace(storage.AzureStorageServiceError{
Code: "ContainerAlreadyExists",
}), minio.BucketExists{Bucket: "bucket"}, "bucket", "",
},
{
errors.Trace(storage.AzureStorageServiceError{
Code: "InvalidResourceName",
}), minio.BucketNameInvalid{Bucket: "bucket."}, "bucket.", "",
},
{
errors.Trace(storage.AzureStorageServiceError{
Code: "RequestBodyTooLarge",
}), minio.PartTooBig{}, "", "",
},
{
errors.Trace(storage.AzureStorageServiceError{
Code: "InvalidMetadata",
}), minio.UnsupportedMetadata{}, "", "",
},
{
errors.Trace(storage.AzureStorageServiceError{
StatusCode: http.StatusNotFound,
}), minio.ObjectNotFound{
Bucket: "bucket",
Object: "object",
}, "bucket", "object",
},
{
errors.Trace(storage.AzureStorageServiceError{
StatusCode: http.StatusNotFound,
}), minio.BucketNotFound{Bucket: "bucket"}, "bucket", "",
},
{
errors.Trace(storage.AzureStorageServiceError{
StatusCode: http.StatusBadRequest,
}), minio.BucketNameInvalid{Bucket: "bucket."}, "bucket.", "",
},
}
for i, testCase := range testCases {
if err := azureToObjectError(testCase.actualErr, testCase.bucket, testCase.object); err != nil {
if err.Error() != testCase.expectedErr.Error() {
t.Errorf("Test %d: Expected error %s, got %s", i+1, testCase.expectedErr, err)
}
}
}
}
// Test azureGetBlockID().
func TestAzureGetBlockID(t *testing.T) {
testCases := []struct {
partID int
subPartNumber int
uploadID string
md5 string
blockID string
}{
{1, 7, "f328c35cad938137", "d41d8cd98f00b204e9800998ecf8427e", "MDAwMDEuMDcuZjMyOGMzNWNhZDkzODEzNy5kNDFkOGNkOThmMDBiMjA0ZTk4MDA5OThlY2Y4NDI3ZQ=="},
{2, 19, "abcdc35cad938137", "a7fb6b7b36ee4ed66b5546fac4690273", "MDAwMDIuMTkuYWJjZGMzNWNhZDkzODEzNy5hN2ZiNmI3YjM2ZWU0ZWQ2NmI1NTQ2ZmFjNDY5MDI3Mw=="},
}
for _, test := range testCases {
blockID := azureGetBlockID(test.partID, test.subPartNumber, test.uploadID, test.md5)
if blockID != test.blockID {
t.Fatalf("%s is not equal to %s", blockID, test.blockID)
}
}
}
// Test azureParseBlockID().
func TestAzureParseBlockID(t *testing.T) {
testCases := []struct {
blockID string
partID int
subPartNumber int
uploadID string
md5 string
success bool
}{
// Invalid base64.
{"MDAwMDEuMDcuZjMyOGMzNWNhZDkzODEzNy5kNDFkOGNkOThmMDBiMjA0ZTk4MDA5OThlY2Y4NDI3ZQ=", 0, 0, "", "", false},
// Invalid number of tokens.
{"MDAwMDEuQUEuZjMyOGMzNWNhZDkzODEzNwo=", 0, 0, "", "", false},
// Invalid encoded part ID.
{"MDAwMGEuMDcuZjMyOGMzNWNhZDkzODEzNy5kNDFkOGNkOThmMDBiMjA0ZTk4MDA5OThlY2Y4NDI3ZQo=", 0, 0, "", "", false},
// Invalid sub part ID.
{"MDAwMDEuQUEuZjMyOGMzNWNhZDkzODEzNy5kNDFkOGNkOThmMDBiMjA0ZTk4MDA5OThlY2Y4NDI3ZQo=", 0, 0, "", "", false},
{"MDAwMDEuMDcuZjMyOGMzNWNhZDkzODEzNy5kNDFkOGNkOThmMDBiMjA0ZTk4MDA5OThlY2Y4NDI3ZQ==", 1, 7, "f328c35cad938137", "d41d8cd98f00b204e9800998ecf8427e", true},
{"MDAwMDIuMTkuYWJjZGMzNWNhZDkzODEzNy5hN2ZiNmI3YjM2ZWU0ZWQ2NmI1NTQ2ZmFjNDY5MDI3Mw==", 2, 19, "abcdc35cad938137", "a7fb6b7b36ee4ed66b5546fac4690273", true},
}
for i, test := range testCases {
partID, subPartNumber, uploadID, md5, err := azureParseBlockID(test.blockID)
if err != nil && test.success {
t.Errorf("Test %d: Expected success but failed %s", i+1, err)
}
if err == nil && !test.success {
t.Errorf("Test %d: Expected to fail but succeeeded insteadl", i+1)
}
if err == nil {
if partID != test.partID {
t.Errorf("Test %d: %d not equal to %d", i+1, partID, test.partID)
}
if subPartNumber != test.subPartNumber {
t.Errorf("Test %d: %d not equal to %d", i+1, subPartNumber, test.subPartNumber)
}
if uploadID != test.uploadID {
t.Errorf("Test %d: %s not equal to %s", i+1, uploadID, test.uploadID)
}
if md5 != test.md5 {
t.Errorf("Test %d: %s not equal to %s", i+1, md5, test.md5)
}
}
}
}
// Test azureListBlobsGetParameters()
func TestAzureListBlobsGetParameters(t *testing.T) {
// Test values set 1
expectedURLValues := url.Values{}
expectedURLValues.Set("prefix", "test")
expectedURLValues.Set("delimiter", "_")
expectedURLValues.Set("marker", "marker")
expectedURLValues.Set("include", "metadata")
expectedURLValues.Set("maxresults", "20")
expectedURLValues.Set("timeout", "10")
setBlobParameters := storage.ListBlobsParameters{
Prefix: "test",
Delimiter: "_",
Marker: "marker",
Include: &storage.IncludeBlobDataset{Metadata: true},
MaxResults: 20,
Timeout: 10,
}
// Test values set 2
expectedURLValues1 := url.Values{}
setBlobParameters1 := storage.ListBlobsParameters{
Prefix: "",
Delimiter: "",
Marker: "",
Include: nil,
MaxResults: 0,
Timeout: 0,
}
testCases := []struct {
name string
args storage.ListBlobsParameters
want url.Values
}{
{"TestIfValuesSet", setBlobParameters, expectedURLValues},
{"TestIfValuesNotSet", setBlobParameters1, expectedURLValues1},
}
for _, test := range testCases {
t.Run(test.name, func(t *testing.T) {
if got := azureListBlobsGetParameters(test.args); !reflect.DeepEqual(got, test.want) {
t.Errorf("azureListBlobsGetParameters() = %v, want %v", got, test.want)
}
})
}
}
func TestAnonErrToObjectErr(t *testing.T) {
testCases := []struct {
name string
statusCode int
params []string
wantErr error
}{
{"ObjectNotFound",
http.StatusNotFound,
[]string{"testBucket", "testObject"},
minio.ObjectNotFound{Bucket: "testBucket", Object: "testObject"},
},
{"BucketNotFound",
http.StatusNotFound,
[]string{"testBucket", ""},
minio.BucketNotFound{Bucket: "testBucket"},
},
{"ObjectNameInvalid",
http.StatusBadRequest,
[]string{"testBucket", "testObject"},
minio.ObjectNameInvalid{Bucket: "testBucket", Object: "testObject"},
},
{"BucketNameInvalid",
http.StatusBadRequest,
[]string{"testBucket", ""},
minio.BucketNameInvalid{Bucket: "testBucket"},
},
}
for _, test := range testCases {
t.Run(test.name, func(t *testing.T) {
if err := minio.AnonErrToObjectErr(test.statusCode, test.params...); !reflect.DeepEqual(err, test.wantErr) {
t.Errorf("anonErrToObjectErr() error = %v, wantErr %v", err, test.wantErr)
}
})
}
}
func TestCheckAzureUploadID(t *testing.T) {
invalidUploadIDs := []string{
"123456789abcdefg",
"hello world",
"0x1234567890",
"1234567890abcdef1234567890abcdef",
}
for _, uploadID := range invalidUploadIDs {
if err := checkAzureUploadID(uploadID); err == nil {
t.Fatalf("%s: expected: <error>, got: <nil>", uploadID)
}
}
validUploadIDs := []string{
"1234567890abcdef",
"1122334455667788",
}
for _, uploadID := range validUploadIDs {
if err := checkAzureUploadID(uploadID); err != nil {
t.Fatalf("%s: expected: <nil>, got: %s", uploadID, err)
}
}
}